Merge branch 'dev' into rp/docs-v0.6.9

This commit is contained in:
Ramon Perez 2025-08-28 21:47:25 +10:00
commit 798cae28a9
28 changed files with 623 additions and 414 deletions

196
LICENSE
View File

@ -1,201 +1,19 @@
Apache License Jan
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION Copyright 2025 Menlo Research
1. Definitions. This product includes software developed by Menlo Research (https://menlo.ai).
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2025 Menlo Research Pte. Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. You may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
Attribution is requested in user-facing documentation and materials, where appropriate.

264
autoqa/checklist.md Normal file
View File

@ -0,0 +1,264 @@
# I. Before release
## A. Initial update / migration Data check
Before testing, set-up the following in the old version to make sure that we can see the data is properly migrated:
- [ ] Changing appearance / theme to something that is obviously different from default set-up
- [ ] Ensure there are a few chat threads
- [ ] Ensure there are a few favourites / star threads
- [ ] Ensure there are 2 model downloaded
- [ ] Ensure there are 2 import on local provider (llama.cpp)
- [ ] Modify MCP servers list and add some ENV value to MCP servers
- [ ] Modify Local API Server
- [ ] HTTPS proxy config value
- [ ] Add 2 custom assistants to Jan
- [ ] Create a new chat with the custom assistant
- [ ] Change the `App Data` to some other folder
- [ ] Create a Custom Provider
- [ ] Disabled some model providers
- [NEW] Change llama.cpp setting of 2 models
#### Validate that the update does not corrupt existing user data or settings (before and after update show the same information):
- [ ] Threads
- [ ] Previously used model and assistants is shown correctly
- [ ] Can resume chat in threads with the previous context
- [ ] Assistants
- Settings:
- [ ] Appearance
- [ ] MCP Servers
- [ ] Local API Server
- [ ] HTTPS Proxy
- [ ] Custom Provider Set-up
#### In `Hub`:
- [ ] Can see model from HF listed properly
- [ ] Downloaded model will show `Use` instead of `Download`
- [ ] Toggling on `Downloaded` on the right corner show the correct list of downloaded models
#### In `Settings -> General`:
- [ ] Ensure the `App Data` path is the same
- [ ] Click Open Logs, App Log will show
#### In `Settings -> Model Providers`:
- [ ] Llama.cpp still listed downloaded models and user can chat with the models
- [ ] Llama.cpp still listed imported models and user can chat with the models
- [ ] Remote model still retain previously set up API keys and user can chat with model from the provider without having to re-enter API keys
- [ ] Enabled and Disabled Model Providers stay the same as before update
#### In `Settings -> Extensions`, check that following exists:
- [ ] Conversational
- [ ] Jan Assistant
- [ ] Download Manager
- [ ] llama.cpp Inference Engine
## B. `Settings`
#### In `General`:
- [ ] Ensure `Community` links work and point to the correct website
- [ ] Ensure the `Check for Updates` function detect the correct latest version
- [ ] [ENG] Create a folder with un-standard character as title (e.g. Chinese character) => change the `App data` location to that folder => test that model is still able to load and run properly.
#### In `Appearance`:
- [ ] Toggle between different `Theme` options to check that they change accordingly and that all elements of the UI are legible with the right contrast:
- [ ] Light
- [ ] Dark
- [ ] System (should follow your OS system settings)
- [ ] Change the following values => close the application => re-open the application => ensure that the change is persisted across session:
- [ ] Theme
- [ ] Font Size
- [ ] Window Background
- [ ] App Main View
- [ ] Primary
- [ ] Accent
- [ ] Destructive
- [ ] Chat Width
- [ ] Ensure that when this value is changed, there is no broken UI caused by it
- [ ] Code Block
- [ ] Show Line Numbers
- [ENG] Ensure that when click on `Reset` in the `Appearance` section, it reset back to the default values
- [ENG] Ensure that when click on `Reset` in the `Code Block` section, it reset back to the default values
#### In `Model Providers`:
In `Llama.cpp`:
- [ ] After downloading a model from hub, the model is listed with the correct name under `Models`
- [ ] Can import `gguf` model with no error
- [ ] Imported model will be listed with correct name under the `Models`
- [ ] Check that when click `delete` the model will be removed from the list
- [ ] Deleted model doesn't appear in the selectable models section in chat input (even in old threads that use the model previously)
- [ ] Ensure that user can re-import deleted imported models
- [ ] Enable `Auto-Unload Old Models`, and ensure that only one model can run / start at a time. If there are two model running at the time of enable, both of them will be stopped.
- [ ] Disable `Auto-Unload Old Models`, and ensure that multiple models can run at the same time.
- [ ] Enable `Context Shift` and ensure that context can run for long without encountering memory error. Use the `banana test` by turn on fetch MCP => ask local model to fetch and summarize the history of banana (banana has a very long history on wiki it turns out). It should run out of context memory sufficiently fast if `Context Shift` is not enabled.
- [ ] Ensure that user can change the Jinja chat template of individual model and it doesn't affect the template of other model
- [ ] Ensure that there is a recommended `llama.cpp` for each system and that it works out of the box for users.
- [ ] [0.6.9] Take a `gguf` file and delete the `.gguf` extensions from the file name, import it into Jan and verify that it works.
In Remote Model Providers:
- [ ] Check that the following providers are presence:
- [ ] OpenAI
- [ ] Anthropic
- [ ] Cohere
- [ ] OpenRouter
- [ ] Mistral
- [ ] Groq
- [ ] Gemini
- [ ] Hugging Face
- [ ] Models should appear as available on the selectable dropdown in chat input once some value is input in the API key field. (it could be the wrong API key)
- [ ] Once a valid API key is used, user can select a model from that provider and chat without any error.
- [ ] Delete a model and ensure that it doesn't show up in the `Modesl` list view or in the selectable dropdown in chat input.
- [ ] Ensure that a deleted model also not selectable or appear in old threads that used it.
- [ ] Adding of new model manually works and user can chat with the newly added model without error (you can add back the model you just delete for testing)
- [ ] [0.6.9] Make sure that Ollama set-up as a custom provider work with Jan
In Custom Providers:
- [ ] Ensure that user can create a new custom providers with the right baseURL and API key.
- [ ] Click `Refresh` should retrieve a list of available models from the Custom Providers.
- [ ] User can chat with the custom providers
- [ ] Ensure that Custom Providers can be deleted and won't reappear in a new session
In general:
- [ ] Disabled Model Provider should not show up as selectable in chat input of new thread and old thread alike (old threads' chat input should show `Select Model` instead of disabled model)
#### In `Shortcuts`:
Make sure the following shortcut key combo is visible and works:
- [ ] New chat
- [ ] Toggle Sidebar
- [ ] Zoom In
- [ ] Zoom Out
- [ ] Send Message
- [ ] New Line
- [ ] Navigation
#### In `Hardware`:
Ensure that the following section information show up for hardware
- [ ] Operating System
- [ ] CPU
- [ ] Memory
- [ ] GPU (If the machine has one)
- [ ] Enabling and Disabling GPUs and ensure that model still run correctly in both mode
- [ ] Enabling or Disabling GPU should not affect the UI of the application
#### In `MCP Servers`:
- [ ] Ensure that an user can create a MCP server successfully when enter in the correct information
- [ ] Ensure that `Env` value is masked by `*` in the quick view.
- [ ] If an `Env` value is missing, there should be a error pop up.
- [ ] Ensure that deleted MCP server disappear from the `MCP Server` list without any error
- [ ] Ensure that before a MCP is deleted, it will be disable itself first and won't appear on the tool list after deleted.
- [ ] Ensure that when the content of a MCP server is edited, it will be updated and reflected accordingly in the UI and when running it.
- [ ] Toggling enable and disabled of a MCP server work properly
- [ ] A disabled MCP should not appear in the available tool list in chat input
- [ ] An disabled MCP should not be callable even when forced prompt by the model (ensure there is no ghost MCP server)
- [ ] Ensure that enabled MCP server start automatically upon starting of the application
- [ ] An enabled MCP should show functions in the available tool list
- [ ] User can use a model and call different tool from multiple enabled MCP servers in the same thread
- [ ] If `Allow All MCP Tool Permissions` is disabled, in every new thread, before a tool is called, there should be a confirmation dialog pop up to confirm the action.
- [ ] When the user click `Deny`, the tool call will not be executed and return a message indicate so in the tool call result.
- [ ] When the user click `Allow Once` on the pop up, a confirmation dialog will appear again when the tool is called next time.
- [ ] When the user click `Always Allow` on the pop up, the tool will retain permission and won't ask for confirmation again. (this applied at an individual tool level, not at the MCP server level)
- [ ] If `Allow All MCP Tool Permissions` is enabled, in every new thread, there should not be any confirmation dialog pop up when a tool is called.
- [ ] When the pop-up appear, make sure that the `Tool Parameters` is also shown with detail in the pop-up.a
- [ ] [0.6.9] Go to Enter JSON configuration when created a new MCp => paste the JSON config inside => click `Save` => server works
- [ ] [0.6.9] If individual JSON config format is failed, the MCP server should not be activated
- [ ] [0.6.9] Make sure that MCP server can be used with streamable-http transport => connect to Smithery and test MCP server
#### In `Local API Server`:
- [ ] User can `Start Server` and chat with the default endpoint
- [ ] User should see the correct model name at `v1/models`
- [ ] User should be able to chat with it at `v1/chat/completions`
- [ ] `Open Logs` show the correct query log send to the server and return from the server
- [ ] Make sure that changing all the parameter in `Server Configuration` is reflected when `Start Server`
- [ ] [0.6.9] When the startup configuration, the last used model is also automatically start (users does not have to manually start a model before starting the server)
- [ ] [0.6.9] Make sure that you can send an image to a Local API Server and it also works (can set up Local API Server as a Custom Provider in Jan to test)
#### In `HTTPS Proxy`:
- [ ] Model download request goes through proxy endpoint
## C. Hub
- [ ] User can click `Download` to download a model
- [ ] User can cancel a model in the middle of downloading
- [ ] User can add a Hugging Face model detail to the list by pasting a model name / model url into the search bar and press enter
- [ ] Clicking on a listing will open up the model card information within Jan and render the HTML properly
- [ ] Clicking download work on the `Show variants` section
- [ ] Clicking download work inside the Model card HTML
- [ ] [0.6.9] Check that the model recommendation base on user hardware work as expected in the Model Hub
## D. Threads
#### In the left bar:
- [ ] User can delete an old thread, and it won't reappear even when app restart
- [ ] Change the title of the thread should update its last modification date and re-organise its position in the correct chronological order on the left bar.
- [ ] The title of a new thread is the first message from the user.
- [ ] Users can starred / un-starred threads accordingly
- [ ] Starred threads should move to `Favourite` section and other threads should stay in `Recent`
- [ ] Ensure that the search thread feature return accurate result based on thread titles and contents (including from both `Favourite` and `Recent`)
- [ ] `Delete All` should delete only threads in the `Recents` section
- [ ] `Unstar All` should un-star all of the `Favourites` threads and return them to `Recent`
#### In a thread:
- [ ] When `New Chat` is clicked, the assistant is set as the last selected assistant, the model selected is set as the last used model, and the user can immediately chat with the model.
- [ ] User can conduct multi-turn conversation in a single thread without lost of data (given that `Context Shift` is not enabled)
- [ ] User can change to a different model in the middle of a conversation in a thread and the model work.
- [ ] User can click on `Regenerate` button on a returned message from the model to get a new response base on the previous context.
- [ ] User can change `Assistant` in the middle of a conversation in a thread and the new assistant setting will be applied instead.
- [ ] The chat windows can render and show all the content of a selected threads (including scroll up and down on long threads)
- [ ] Old thread retained their setting as of the last update / usage
- [ ] Assistant option
- [ ] Model option (except if the model / model provider has been deleted or disabled)
- [ ] User can send message with different type of text content (e.g text, emoji, ...)
- [ ] When request model to generate a markdown table, the table is correctly formatted as returned from the model.
- [ ] When model generate code, ensure that the code snippets is properly formatted according to the `Appearance -> Code Block` setting.
- [ ] Users can edit their old message and and user can regenerate the answer based on the new message
- [ ] User can click `Copy` to copy the model response
- [ ] User can click `Delete` to delete either the user message or the model response.
- [ ] The token speed appear when a response from model is being generated and the final value is show under the response.
- [ ] Make sure that user when using IME keyboard to type Chinese and Japanese character and they press `Enter`, the `Send` button doesn't trigger automatically after each words.
- [ ] [0.6.9] Attach an image to the chat input and see if you can chat with it using a remote model
- [ ] [0.6.9] Attach an image to the chat input and see if you can chat with it using a local model
- [ ] [0.6.9] Check that you can paste an image to text box from your system clipboard (Copy - Paste)
- [ ] [0.6.9] Make sure that user can favourite a model in the model selection in chat input
## E. Assistants
- [ ] There is always at least one default Assistant which is Jan
- [ ] The default Jan assistant has `stream = True` by default
- [ ] User can create / edit a new assistant with different parameters and instructions choice.
- [ ] When user delete the default Assistant, the next Assistant in line will be come the default Assistant and apply their setting to new chat accordingly.
- [ ] User can create / edit assistant from within a Chat windows (on the top left)
## F. After checking everything else
In `Settings -> General`:
- [ ] Change the location of the `App Data` to some other path that is not the default path
- [ ] Click on `Reset` button in `Other` to factory reset the app:
- [ ] All threads deleted
- [ ] All Assistant deleted except for default Jan Assistant
- [ ] `App Data` location is reset back to default path
- [ ] Appearance reset
- [ ] Model Providers information all reset
- [ ] Llama.cpp setting reset
- [ ] API keys cleared
- [ ] All Custom Providers deleted
- [ ] MCP Servers reset
- [ ] Local API Server reset
- [ ] HTTPS Proxy reset
- [ ] After closing the app, all models are unloaded properly
- [ ] Locate to the data folder using the `App Data` path information => delete the folder => reopen the app to check that all the folder is re-created with all the necessary data.
- [ ] Ensure that the uninstallation process removes the app successfully from the system.
## G. New App Installation
- [ ] Clean up by deleting all the left over folder created by Jan
- [ ] On MacOS
- [ ] `~/Library/Application Support/Jan`
- [ ] `~/Library/Caches/jan.ai.app`
- [ ] On Windows
- [ ] `C:\Users<Username>\AppData\Roaming\Jan\`
- [ ] `C:\Users<Username>\AppData\Local\jan.ai.app`
- [ ] On Linux
- [ ] `~/.cache/Jan`
- [ ] `~/.cache/jan.ai.app`
- [ ] `~/.local/share/Jan`
- [ ] `~/.local/share/jan.ai.app`
- [ ] Ensure that the fresh install of Jan launch
- [ ] Do some basic check to see that all function still behaved as expected. To be extra careful, you can go through the whole list again. However, it is more advisable to just check to make sure that all the core functionality like `Thread` and `Model Providers` work as intended.
# II. After release
- [ ] Check that the App Updater works and user can update to the latest release without any problem
- [ ] App restarts after the user finished an update
- [ ] Repeat section `A. Initial update / migration Data check` above to verify that update is done correctly on live version

View File

@ -16,7 +16,7 @@
"description": "Environmental variables for llama.cpp(KEY=VALUE), separated by ';'", "description": "Environmental variables for llama.cpp(KEY=VALUE), separated by ';'",
"controllerType": "input", "controllerType": "input",
"controllerProps": { "controllerProps": {
"value": "none", "value": "",
"placeholder": "Eg. GGML_VK_VISIBLE_DEVICES=0,1", "placeholder": "Eg. GGML_VK_VISIBLE_DEVICES=0,1",
"type": "text", "type": "text",
"textAlign": "right" "textAlign": "right"

View File

@ -1084,7 +1084,7 @@ export default class llamacpp_extension extends AIEngine {
// The downloadFiles function only returns successfully if all files downloaded AND validated // The downloadFiles function only returns successfully if all files downloaded AND validated
events.emit(DownloadEvent.onFileDownloadAndVerificationSuccess, { events.emit(DownloadEvent.onFileDownloadAndVerificationSuccess, {
modelId, modelId,
downloadType: 'Model' downloadType: 'Model',
}) })
} catch (error) { } catch (error) {
logger.error('Error downloading model:', modelId, opts, error) logger.error('Error downloading model:', modelId, opts, error)
@ -1092,7 +1092,8 @@ export default class llamacpp_extension extends AIEngine {
error instanceof Error ? error.message : String(error) error instanceof Error ? error.message : String(error)
// Check if this is a cancellation // Check if this is a cancellation
const isCancellationError = errorMessage.includes('Download cancelled') || const isCancellationError =
errorMessage.includes('Download cancelled') ||
errorMessage.includes('Validation cancelled') || errorMessage.includes('Validation cancelled') ||
errorMessage.includes('Hash computation cancelled') || errorMessage.includes('Hash computation cancelled') ||
errorMessage.includes('cancelled') || errorMessage.includes('cancelled') ||
@ -1372,7 +1373,7 @@ export default class llamacpp_extension extends AIEngine {
envs['LLAMA_API_KEY'] = api_key envs['LLAMA_API_KEY'] = api_key
// set user envs // set user envs
this.parseEnvFromString(envs, this.llamacpp_env) if (this.llamacpp_env) this.parseEnvFromString(envs, this.llamacpp_env)
// model option is required // model option is required
// NOTE: model_path and mmproj_path can be either relative to Jan's data folder or absolute path // NOTE: model_path and mmproj_path can be either relative to Jan's data folder or absolute path
@ -1751,7 +1752,7 @@ export default class llamacpp_extension extends AIEngine {
} }
// set envs // set envs
const envs: Record<string, string> = {} const envs: Record<string, string> = {}
this.parseEnvFromString(envs, this.llamacpp_env) if (this.llamacpp_env) this.parseEnvFromString(envs, this.llamacpp_env)
// Ensure backend is downloaded and ready before proceeding // Ensure backend is downloaded and ready before proceeding
await this.ensureBackendReady(backend, version) await this.ensureBackendReady(backend, version)
@ -1767,7 +1768,7 @@ export default class llamacpp_extension extends AIEngine {
return dList return dList
} catch (error) { } catch (error) {
logger.error('Failed to query devices:\n', error) logger.error('Failed to query devices:\n', error)
throw new Error("Failed to load llamacpp backend") throw new Error('Failed to load llamacpp backend')
} }
} }
@ -1876,7 +1877,7 @@ export default class llamacpp_extension extends AIEngine {
logger.info( logger.info(
`Using explicit key_length: ${keyLen}, value_length: ${valLen}` `Using explicit key_length: ${keyLen}, value_length: ${valLen}`
) )
headDim = (keyLen + valLen) headDim = keyLen + valLen
} else { } else {
// Fall back to embedding_length estimation // Fall back to embedding_length estimation
const embeddingLen = Number(meta[`${arch}.embedding_length`]) const embeddingLen = Number(meta[`${arch}.embedding_length`])

View File

@ -18,11 +18,11 @@
"test:prepare": "yarn build:icon && yarn copy:assets:tauri && yarn build --no-bundle ", "test:prepare": "yarn build:icon && yarn copy:assets:tauri && yarn build --no-bundle ",
"dev:web": "yarn workspace @janhq/web-app dev", "dev:web": "yarn workspace @janhq/web-app dev",
"dev:tauri": "yarn build:icon && yarn copy:assets:tauri && cross-env IS_CLEAN=true tauri dev", "dev:tauri": "yarn build:icon && yarn copy:assets:tauri && cross-env IS_CLEAN=true tauri dev",
"copy:assets:tauri": "cpx \"pre-install/*.tgz\" \"src-tauri/resources/pre-install/\"", "copy:assets:tauri": "cpx \"pre-install/*.tgz\" \"src-tauri/resources/pre-install/\" && cpx \"LICENSE\" \"src-tauri/resources/\"",
"download:lib": "node ./scripts/download-lib.mjs", "download:lib": "node ./scripts/download-lib.mjs",
"download:bin": "node ./scripts/download-bin.mjs", "download:bin": "node ./scripts/download-bin.mjs",
"build:tauri:win32": "yarn download:bin && yarn tauri build", "build:tauri:win32": "yarn download:bin && yarn tauri build",
"build:tauri:linux": "yarn download:bin && ./src-tauri/build-utils/shim-linuxdeploy.sh yarn tauri build && ./src-tauri/build-utils/buildAppImage.sh", "build:tauri:linux": "yarn download:bin && NO_STRIP=1 ./src-tauri/build-utils/shim-linuxdeploy.sh yarn tauri build --verbose && ./src-tauri/build-utils/buildAppImage.sh",
"build:tauri:darwin": "yarn tauri build --target universal-apple-darwin", "build:tauri:darwin": "yarn tauri build --target universal-apple-darwin",
"build:tauri": "yarn build:icon && yarn copy:assets:tauri && run-script-os", "build:tauri": "yarn build:icon && yarn copy:assets:tauri && run-script-os",
"build:tauri:plugin:api": "cd src-tauri/plugins && yarn install && yarn workspaces foreach -Apt run build", "build:tauri:plugin:api": "cd src-tauri/plugins && yarn install && yarn workspaces foreach -Apt run build",

View File

@ -9,6 +9,11 @@
"core:window:allow-set-theme", "core:window:allow-set-theme",
"log:default", "log:default",
"core:webview:allow-create-webview-window", "core:webview:allow-create-webview-window",
"core:window:allow-set-focus" "core:window:allow-set-focus",
"hardware:allow-get-system-info",
"hardware:allow-get-system-usage",
"llamacpp:allow-get-devices",
"llamacpp:allow-read-gguf-metadata",
"deep-link:allow-get-current"
] ]
} }

View File

@ -698,6 +698,7 @@ Section Install
CreateDirectory "$INSTDIR\resources\pre-install" CreateDirectory "$INSTDIR\resources\pre-install"
SetOutPath $INSTDIR SetOutPath $INSTDIR
File /a "/oname=vulkan-1.dll" "D:\a\jan\jan\src-tauri\resources\lib\vulkan-1.dll" File /a "/oname=vulkan-1.dll" "D:\a\jan\jan\src-tauri\resources\lib\vulkan-1.dll"
File /a "/oname=LICENSE" "D:\a\jan\jan\src-tauri\resources\LICENSE"
SetOutPath "$INSTDIR\resources\pre-install" SetOutPath "$INSTDIR\resources\pre-install"
File /nonfatal /a /r "D:\a\jan\jan\src-tauri\resources\pre-install\" File /nonfatal /a /r "D:\a\jan\jan\src-tauri\resources\pre-install\"
SetOutPath $INSTDIR SetOutPath $INSTDIR
@ -821,6 +822,9 @@ Section Uninstall
; Copy main executable ; Copy main executable
Delete "$INSTDIR\${MAINBINARYNAME}.exe" Delete "$INSTDIR\${MAINBINARYNAME}.exe"
; Delete LICENSE file
Delete "$INSTDIR\LICENSE"
; Delete resources ; Delete resources
Delete "$INSTDIR\resources\pre-install\janhq-assistant-extension-1.0.2.tgz" Delete "$INSTDIR\resources\pre-install\janhq-assistant-extension-1.0.2.tgz"
Delete "$INSTDIR\resources\pre-install\janhq-conversational-extension-1.0.0.tgz" Delete "$INSTDIR\resources\pre-install\janhq-conversational-extension-1.0.0.tgz"

View File

@ -84,6 +84,7 @@
"icons/128x128@2x.png", "icons/128x128@2x.png",
"icons/icon.icns", "icons/icon.icns",
"icons/icon.ico" "icons/icon.ico"
] ],
"resources": ["resources/LICENSE"]
} }
} }

View File

@ -1,12 +1,13 @@
{ {
"bundle": { "bundle": {
"targets": ["deb", "appimage"], "targets": ["deb", "appimage"],
"resources": ["resources/pre-install/**/*"], "resources": ["resources/pre-install/**/*", "resources/LICENSE"],
"externalBin": ["resources/bin/uv"], "externalBin": ["resources/bin/uv"],
"linux": { "linux": {
"appimage": { "appimage": {
"bundleMediaFramework": false, "bundleMediaFramework": false,
"files": {} "files": {
}
}, },
"deb": { "deb": {
"files": { "files": {

View File

@ -1,7 +1,7 @@
{ {
"bundle": { "bundle": {
"targets": ["app", "dmg"], "targets": ["app", "dmg"],
"resources": ["resources/pre-install/**/*"], "resources": ["resources/pre-install/**/*", "resources/LICENSE"],
"externalBin": ["resources/bin/bun", "resources/bin/uv"] "externalBin": ["resources/bin/bun", "resources/bin/uv"]
} }
} }

View File

@ -6,7 +6,14 @@ import { cn } from '@/lib/utils'
function HoverCard({ function HoverCard({
...props ...props
}: React.ComponentProps<typeof HoverCardPrimitive.Root>) { }: React.ComponentProps<typeof HoverCardPrimitive.Root>) {
return <HoverCardPrimitive.Root data-slot="hover-card" {...props} /> return (
<HoverCardPrimitive.Root
openDelay={0}
closeDelay={0}
data-slot="hover-card"
{...props}
/>
)
} }
function HoverCardTrigger({ function HoverCardTrigger({

View File

@ -107,9 +107,15 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
if (selectedProvider === 'llamacpp') { if (selectedProvider === 'llamacpp') {
const hasLocalMmproj = await checkMmprojExists(selectedModel.id) const hasLocalMmproj = await checkMmprojExists(selectedModel.id)
setHasMmproj(hasLocalMmproj) setHasMmproj(hasLocalMmproj)
} else { }
// For non-llamacpp providers, only check vision capability // For non-llamacpp providers, only check vision capability
else if (
selectedProvider !== 'llamacpp' &&
selectedModel?.capabilities?.includes('vision')
) {
setHasMmproj(true) setHasMmproj(true)
} else {
setHasMmproj(false)
} }
} catch (error) { } catch (error) {
console.error('Error checking mmproj:', error) console.error('Error checking mmproj:', error)
@ -119,7 +125,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
} }
checkMmprojSupport() checkMmprojSupport()
}, [selectedModel?.id, selectedProvider]) }, [selectedModel?.capabilities, selectedModel?.id, selectedProvider])
// Check if there are active MCP servers // Check if there are active MCP servers
const hasActiveMCPServers = connectedServers.length > 0 || tools.length > 0 const hasActiveMCPServers = connectedServers.length > 0 || tools.length > 0
@ -368,34 +374,89 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
} }
} }
const handlePaste = (e: React.ClipboardEvent) => { const handlePaste = async (e: React.ClipboardEvent) => {
const clipboardItems = e.clipboardData?.items
if (!clipboardItems) return
// Only allow paste if model supports mmproj // Only allow paste if model supports mmproj
if (!hasMmproj) { if (!hasMmproj) {
return return
} }
const imageItems = Array.from(clipboardItems).filter((item) => const clipboardItems = e.clipboardData?.items
item.type.startsWith('image/') let hasProcessedImage = false
)
if (imageItems.length > 0) { // Try clipboardData.items first (traditional method)
if (clipboardItems && clipboardItems.length > 0) {
const imageItems = Array.from(clipboardItems).filter((item) =>
item.type.startsWith('image/')
)
if (imageItems.length > 0) {
e.preventDefault()
const files: File[] = []
let processedCount = 0
imageItems.forEach((item) => {
const file = item.getAsFile()
if (file) {
files.push(file)
}
processedCount++
// When all items are processed, handle the valid files
if (processedCount === imageItems.length) {
if (files.length > 0) {
const syntheticEvent = {
target: {
files: files,
},
} as unknown as React.ChangeEvent<HTMLInputElement>
handleFileChange(syntheticEvent)
hasProcessedImage = true
}
}
})
// If we found image items but couldn't get files, fall through to modern API
if (processedCount === imageItems.length && !hasProcessedImage) {
// Continue to modern clipboard API fallback below
} else {
return // Successfully processed with traditional method
}
}
}
// Modern Clipboard API fallback (for Linux, images copied from web, etc.)
if (navigator.clipboard && 'read' in navigator.clipboard) {
e.preventDefault() e.preventDefault()
const files: File[] = [] try {
let processedCount = 0 const clipboardContents = await navigator.clipboard.read()
const files: File[] = []
imageItems.forEach((item) => { for (const item of clipboardContents) {
const file = item.getAsFile() const imageTypes = item.types.filter((type) =>
if (file) { type.startsWith('image/')
files.push(file) )
for (const type of imageTypes) {
try {
const blob = await item.getType(type)
// Convert blob to File with better naming
const extension = type.split('/')[1] || 'png'
const file = new File(
[blob],
`pasted-image-${Date.now()}.${extension}`,
{ type }
)
files.push(file)
} catch (error) {
console.error('Error reading clipboard item:', error)
}
}
} }
processedCount++
// When all items are processed, handle the valid files if (files.length > 0) {
if (processedCount === imageItems.length && files.length > 0) {
const syntheticEvent = { const syntheticEvent = {
target: { target: {
files: files, files: files,
@ -403,8 +464,16 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
} as unknown as React.ChangeEvent<HTMLInputElement> } as unknown as React.ChangeEvent<HTMLInputElement>
handleFileChange(syntheticEvent) handleFileChange(syntheticEvent)
return
} }
}) } catch (error) {
console.error('Clipboard API access failed:', error)
}
}
// If we reach here, no image was found or processed
if (!hasProcessedImage) {
console.log('No image data found in clipboard or clipboard access failed')
} }
} }
@ -535,29 +604,41 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
)} )}
{/* File attachment - show only for models with mmproj */} {/* File attachment - show only for models with mmproj */}
{hasMmproj && ( {hasMmproj && (
<div <TooltipProvider>
className="h-6 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1" <Tooltip>
onClick={handleAttachmentClick} <TooltipTrigger asChild>
> <div
<IconPhoto size={18} className="text-main-view-fg/50" /> className="h-7 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1"
<input onClick={handleAttachmentClick}
type="file" >
ref={fileInputRef} <IconPhoto
className="hidden" size={18}
multiple className="text-main-view-fg/50"
onChange={handleFileChange} />
/> <input
</div> type="file"
ref={fileInputRef}
className="hidden"
multiple
onChange={handleFileChange}
/>
</div>
</TooltipTrigger>
<TooltipContent>
<p>{t('vision')}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
)} )}
{/* Microphone - always available - Temp Hide */} {/* Microphone - always available - Temp Hide */}
{/* <div className="h-6 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1"> {/* <div className="h-7 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1">
<IconMicrophone size={18} className="text-main-view-fg/50" /> <IconMicrophone size={18} className="text-main-view-fg/50" />
</div> */} </div> */}
{selectedModel?.capabilities?.includes('embeddings') && ( {selectedModel?.capabilities?.includes('embeddings') && (
<TooltipProvider> <TooltipProvider>
<Tooltip> <Tooltip>
<TooltipTrigger asChild> <TooltipTrigger asChild>
<div className="h-6 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1"> <div className="h-7 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1">
<IconCodeCircle2 <IconCodeCircle2
size={18} size={18}
className="text-main-view-fg/50" className="text-main-view-fg/50"
@ -601,7 +682,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
return ( return (
<div <div
className={cn( className={cn(
'h-6 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1 cursor-pointer relative', 'h-7 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1 cursor-pointer relative',
isOpen && 'bg-main-view-fg/10' isOpen && 'bg-main-view-fg/10'
)} )}
> >
@ -632,7 +713,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
<TooltipProvider> <TooltipProvider>
<Tooltip> <Tooltip>
<TooltipTrigger asChild> <TooltipTrigger asChild>
<div className="h-6 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1"> <div className="h-7 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1">
<IconWorld <IconWorld
size={18} size={18}
className="text-main-view-fg/50" className="text-main-view-fg/50"
@ -649,7 +730,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
<TooltipProvider> <TooltipProvider>
<Tooltip> <Tooltip>
<TooltipTrigger asChild> <TooltipTrigger asChild>
<div className="h-6 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1"> <div className="h-7 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1">
<IconAtom <IconAtom
size={18} size={18}
className="text-main-view-fg/50" className="text-main-view-fg/50"

View File

@ -178,7 +178,7 @@ export function DownloadManagement() {
description: t('common:toast.modelValidationStarted.description', { description: t('common:toast.modelValidationStarted.description', {
modelId: event.modelId, modelId: event.modelId,
}), }),
duration: 10000, duration: Infinity,
}) })
}, },
[t] [t]
@ -199,7 +199,7 @@ export function DownloadManagement() {
description: t('common:toast.modelValidationFailed.description', { description: t('common:toast.modelValidationFailed.description', {
modelId: event.modelId, modelId: event.modelId,
}), }),
duration: 30000, // Requires manual dismissal for security-critical message duration: 30000,
}) })
}, },
[removeDownload, removeLocalDownloadingModel, t] [removeDownload, removeLocalDownloadingModel, t]
@ -244,9 +244,12 @@ export function DownloadManagement() {
removeLocalDownloadingModel(state.modelId) removeLocalDownloadingModel(state.modelId)
toast.success(t('common:toast.downloadAndVerificationComplete.title'), { toast.success(t('common:toast.downloadAndVerificationComplete.title'), {
id: 'download-complete', id: 'download-complete',
description: t('common:toast.downloadAndVerificationComplete.description', { description: t(
item: state.modelId, 'common:toast.downloadAndVerificationComplete.description',
}), {
item: state.modelId,
}
),
}) })
}, },
[removeDownload, removeLocalDownloadingModel, t] [removeDownload, removeLocalDownloadingModel, t]
@ -260,7 +263,10 @@ export function DownloadManagement() {
events.on(DownloadEvent.onFileDownloadStopped, onFileDownloadStopped) events.on(DownloadEvent.onFileDownloadStopped, onFileDownloadStopped)
events.on(DownloadEvent.onModelValidationStarted, onModelValidationStarted) events.on(DownloadEvent.onModelValidationStarted, onModelValidationStarted)
events.on(DownloadEvent.onModelValidationFailed, onModelValidationFailed) events.on(DownloadEvent.onModelValidationFailed, onModelValidationFailed)
events.on(DownloadEvent.onFileDownloadAndVerificationSuccess, onFileDownloadAndVerificationSuccess) events.on(
DownloadEvent.onFileDownloadAndVerificationSuccess,
onFileDownloadAndVerificationSuccess
)
// Register app update event listeners // Register app update event listeners
events.on(AppEvent.onAppUpdateDownloadUpdate, onAppUpdateDownloadUpdate) events.on(AppEvent.onAppUpdateDownloadUpdate, onAppUpdateDownloadUpdate)
@ -278,7 +284,10 @@ export function DownloadManagement() {
onModelValidationStarted onModelValidationStarted
) )
events.off(DownloadEvent.onModelValidationFailed, onModelValidationFailed) events.off(DownloadEvent.onModelValidationFailed, onModelValidationFailed)
events.off(DownloadEvent.onFileDownloadAndVerificationSuccess, onFileDownloadAndVerificationSuccess) events.off(
DownloadEvent.onFileDownloadAndVerificationSuccess,
onFileDownloadAndVerificationSuccess
)
// Unregister app update event listeners // Unregister app update event listeners
events.off(AppEvent.onAppUpdateDownloadUpdate, onAppUpdateDownloadUpdate) events.off(AppEvent.onAppUpdateDownloadUpdate, onAppUpdateDownloadUpdate)

View File

@ -414,13 +414,15 @@ const DropdownModelProvider = ({
</span> </span>
</button> </button>
</PopoverTrigger> </PopoverTrigger>
{currentModel?.settings && provider && ( {currentModel?.settings &&
<ModelSetting provider &&
model={currentModel as Model} provider.provider === 'llamacpp' && (
provider={provider} <ModelSetting
smallIcon model={currentModel as Model}
/> provider={provider}
)} smallIcon
/>
)}
<ModelSupportStatus <ModelSupportStatus
modelId={selectedModel?.id} modelId={selectedModel?.id}
provider={selectedProvider} provider={selectedProvider}

View File

@ -5,11 +5,11 @@ import {
} from '@/components/ui/hover-card' } from '@/components/ui/hover-card'
import { IconInfoCircle } from '@tabler/icons-react' import { IconInfoCircle } from '@tabler/icons-react'
import { CatalogModel, ModelQuant } from '@/services/models' import { CatalogModel, ModelQuant } from '@/services/models'
import { extractDescription } from '@/lib/models'
interface ModelInfoHoverCardProps { interface ModelInfoHoverCardProps {
model: CatalogModel model: CatalogModel
variant?: ModelQuant variant?: ModelQuant
isDefaultVariant?: boolean
defaultModelQuantizations: string[] defaultModelQuantizations: string[]
modelSupportStatus: Record<string, string> modelSupportStatus: Record<string, string>
onCheckModelSupport: (variant: ModelQuant) => void onCheckModelSupport: (variant: ModelQuant) => void
@ -19,12 +19,12 @@ interface ModelInfoHoverCardProps {
export const ModelInfoHoverCard = ({ export const ModelInfoHoverCard = ({
model, model,
variant, variant,
isDefaultVariant,
defaultModelQuantizations, defaultModelQuantizations,
modelSupportStatus, modelSupportStatus,
onCheckModelSupport, onCheckModelSupport,
children, children,
}: ModelInfoHoverCardProps) => { }: ModelInfoHoverCardProps) => {
const isVariantMode = !!variant
const displayVariant = const displayVariant =
variant || variant ||
model.quants.find((m) => model.quants.find((m) =>
@ -79,6 +79,15 @@ export const ModelInfoHoverCard = ({
</span> </span>
</div> </div>
) )
} else if (status === 'GREY') {
return (
<div className="flex items-start gap-2">
<div className="size-2 shrink-0 bg-neutral-500 rounded-full mt-1"></div>
<span className="text-neutral-500 font-medium">
Unable to determine model compatibility with your current device
</span>
</div>
)
} else { } else {
return ( return (
<div className="flex items-start gap-2"> <div className="flex items-start gap-2">
@ -95,8 +104,8 @@ export const ModelInfoHoverCard = ({
{children || ( {children || (
<div className="cursor-pointer"> <div className="cursor-pointer">
<IconInfoCircle <IconInfoCircle
size={14} size={isDefaultVariant ? 20 : 14}
className="mt-0.5 text-main-view-fg/50 hover:text-main-view-fg/80 transition-colors" className="mt-0.5 text-main-view-fg/80 hover:text-main-view-fg/80 transition-colors"
/> />
</div> </div>
)} )}
@ -106,10 +115,10 @@ export const ModelInfoHoverCard = ({
{/* Header */} {/* Header */}
<div className="border-b border-main-view-fg/10 pb-3"> <div className="border-b border-main-view-fg/10 pb-3">
<h4 className="text-sm font-semibold text-main-view-fg"> <h4 className="text-sm font-semibold text-main-view-fg">
{isVariantMode ? variant.model_id : model.model_name} {!isDefaultVariant ? variant?.model_id : model?.model_name}
</h4> </h4>
<p className="text-xs text-main-view-fg/60 mt-1"> <p className="text-xs text-main-view-fg/60 mt-1">
{isVariantMode {!isDefaultVariant
? 'Model Variant Information' ? 'Model Variant Information'
: 'Model Information'} : 'Model Information'}
</p> </p>
@ -118,57 +127,19 @@ export const ModelInfoHoverCard = ({
{/* Main Info Grid */} {/* Main Info Grid */}
<div className="grid grid-cols-2 gap-3 text-xs"> <div className="grid grid-cols-2 gap-3 text-xs">
<div className="space-y-2"> <div className="space-y-2">
{isVariantMode ? ( <>
<> <div>
<div> <span className="text-main-view-fg/50 block">
<span className="text-main-view-fg/50 block"> {isDefaultVariant ? 'Default Quantization' : 'Quantization'}
File Size </span>
</span> <span className="text-main-view-fg font-medium mt-1 inline-block">
<span className="text-main-view-fg font-medium mt-1 inline-block"> {variant?.model_id.split('-').pop()?.toUpperCase() || 'N/A'}
{variant.file_size} </span>
</span> </div>
</div> </>
<div>
<span className="text-main-view-fg/50 block">
Quantization
</span>
<span className="text-main-view-fg font-medium mt-1 inline-block">
{variant.model_id.split('-').pop()?.toUpperCase() ||
'N/A'}
</span>
</div>
</>
) : (
<>
<div>
<span className="text-main-view-fg/50 block">
Downloads
</span>
<span className="text-main-view-fg font-medium mt-1 inline-block">
{model.downloads?.toLocaleString() || '0'}
</span>
</div>
<div>
<span className="text-main-view-fg/50 block">Variants</span>
<span className="text-main-view-fg font-medium mt-1 inline-block">
{model.quants?.length || 0}
</span>
</div>
</>
)}
</div> </div>
<div className="space-y-2"> <div className="space-y-2">
{!isVariantMode && (
<div>
<span className="text-main-view-fg/50 block">
Default Size
</span>
<span className="text-main-view-fg font-medium mt-1 inline-block">
{displayVariant?.file_size || 'N/A'}
</span>
</div>
)}
<div> <div>
<span className="text-main-view-fg/50 block"> <span className="text-main-view-fg/50 block">
Compatibility Compatibility
@ -204,21 +175,6 @@ export const ModelInfoHoverCard = ({
</div> </div>
</div> </div>
)} )}
{/* Content Section */}
<div className="border-t border-main-view-fg/10 pt-3">
<h5 className="text-xs font-medium text-main-view-fg/70 mb-1">
{isVariantMode ? 'Download URL' : 'Description'}
</h5>
<div className="text-xs text-main-view-fg/60 bg-main-view-fg/5 rounded p-2">
{isVariantMode ? (
<div className="font-mono break-all">{variant.path}</div>
) : (
extractDescription(model?.description) ||
'No description available'
)}
</div>
</div>
</div> </div>
</HoverCardContent> </HoverCardContent>
</HoverCard> </HoverCard>

View File

@ -7,7 +7,8 @@ import {
TooltipTrigger, TooltipTrigger,
} from '@/components/ui/tooltip' } from '@/components/ui/tooltip'
import { isModelSupported } from '@/services/models' import { isModelSupported } from '@/services/models'
import { getJanDataFolderPath, joinPath } from '@janhq/core' import { getJanDataFolderPath, joinPath, fs } from '@janhq/core'
import { invoke } from '@tauri-apps/api/core'
interface ModelSupportStatusProps { interface ModelSupportStatusProps {
modelId: string | undefined modelId: string | undefined
@ -23,7 +24,7 @@ export const ModelSupportStatus = ({
className, className,
}: ModelSupportStatusProps) => { }: ModelSupportStatusProps) => {
const [modelSupportStatus, setModelSupportStatus] = useState< const [modelSupportStatus, setModelSupportStatus] = useState<
'RED' | 'YELLOW' | 'GREEN' | 'LOADING' | null 'RED' | 'YELLOW' | 'GREEN' | 'LOADING' | null | 'GREY'
>(null) >(null)
// Helper function to check model support with proper path resolution // Helper function to check model support with proper path resolution
@ -31,12 +32,12 @@ export const ModelSupportStatus = ({
async ( async (
id: string, id: string,
ctxSize: number ctxSize: number
): Promise<'RED' | 'YELLOW' | 'GREEN'> => { ): Promise<'RED' | 'YELLOW' | 'GREEN' | 'GREY' | null> => {
try { try {
// Get Jan's data folder path and construct the full model file path
// Following the llamacpp extension structure: <Jan's data folder>/llamacpp/models/<modelId>/model.gguf
const janDataFolder = await getJanDataFolderPath() const janDataFolder = await getJanDataFolderPath()
const modelFilePath = await joinPath([
// First try the standard downloaded model path
const ggufModelPath = await joinPath([
janDataFolder, janDataFolder,
'llamacpp', 'llamacpp',
'models', 'models',
@ -44,14 +45,47 @@ export const ModelSupportStatus = ({
'model.gguf', 'model.gguf',
]) ])
return await isModelSupported(modelFilePath, ctxSize) // Check if the standard model.gguf file exists
if (await fs.existsSync(ggufModelPath)) {
return await isModelSupported(ggufModelPath, ctxSize)
}
// If model.gguf doesn't exist, try reading from model.yml (for imported models)
const modelConfigPath = await joinPath([
janDataFolder,
'llamacpp',
'models',
id,
'model.yml',
])
if (!(await fs.existsSync(modelConfigPath))) {
console.error(
`Neither model.gguf nor model.yml found for model: ${id}`
)
return null
}
// Read the model configuration to get the actual model path
const modelConfig = await invoke<{ model_path: string }>('read_yaml', {
path: `llamacpp/models/${id}/model.yml`,
})
// Handle both absolute and relative paths
const actualModelPath =
modelConfig.model_path.startsWith('/') ||
modelConfig.model_path.match(/^[A-Za-z]:/)
? modelConfig.model_path // absolute path, use as-is
: await joinPath([janDataFolder, modelConfig.model_path]) // relative path, join with data folder
return await isModelSupported(actualModelPath, ctxSize)
} catch (error) { } catch (error) {
console.error( console.error(
'Error checking model support with constructed path:', 'Error checking model support with path resolution:',
error error
) )
// If path construction or model support check fails, assume not supported // If path construction or model support check fails, assume not supported
return 'RED' return null
} }
}, },
[] []

View File

@ -1,4 +1,3 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
/* eslint-disable react-hooks/exhaustive-deps */ /* eslint-disable react-hooks/exhaustive-deps */
import ReactMarkdown, { Components } from 'react-markdown' import ReactMarkdown, { Components } from 'react-markdown'
import remarkGfm from 'remark-gfm' import remarkGfm from 'remark-gfm'
@ -8,7 +7,6 @@ import rehypeKatex from 'rehype-katex'
import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter' import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter'
import * as prismStyles from 'react-syntax-highlighter/dist/cjs/styles/prism' import * as prismStyles from 'react-syntax-highlighter/dist/cjs/styles/prism'
import { memo, useState, useMemo } from 'react' import { memo, useState, useMemo } from 'react'
import virtualizedRenderer from 'react-syntax-highlighter-virtualized-renderer'
import { getReadableLanguageName } from '@/lib/utils' import { getReadableLanguageName } from '@/lib/utils'
import { cn } from '@/lib/utils' import { cn } from '@/lib/utils'
import { useCodeblock } from '@/hooks/useCodeblock' import { useCodeblock } from '@/hooks/useCodeblock'
@ -75,8 +73,6 @@ function RenderMarkdownComponent({
// Generate a stable ID based on code content and language // Generate a stable ID based on code content and language
const codeId = `code-${hashString(code.substring(0, 40) + language)}` const codeId = `code-${hashString(code.substring(0, 40) + language)}`
const shouldVirtualize = code.split('\n').length > 300
return !isInline && !isUser ? ( return !isInline && !isUser ? (
<div className="relative overflow-hidden border rounded-md border-main-view-fg/2"> <div className="relative overflow-hidden border rounded-md border-main-view-fg/2">
<style> <style>
@ -147,11 +143,6 @@ function RenderMarkdownComponent({
overflow: 'auto', overflow: 'auto',
border: 'none', border: 'none',
}} }}
renderer={
shouldVirtualize
? (virtualizedRenderer() as (props: any) => React.ReactNode)
: undefined
}
PreTag="div" PreTag="div"
CodeTag={'code'} CodeTag={'code'}
{...props} {...props}

View File

@ -40,6 +40,7 @@ import TokenSpeedIndicator from '@/containers/TokenSpeedIndicator'
import CodeEditor from '@uiw/react-textarea-code-editor' import CodeEditor from '@uiw/react-textarea-code-editor'
import '@uiw/react-textarea-code-editor/dist.css' import '@uiw/react-textarea-code-editor/dist.css'
import { useTranslation } from '@/i18n/react-i18next-compat' import { useTranslation } from '@/i18n/react-i18next-compat'
import { useModelProvider } from '@/hooks/useModelProvider'
const CopyButton = ({ text }: { text: string }) => { const CopyButton = ({ text }: { text: string }) => {
const [copied, setCopied] = useState(false) const [copied, setCopied] = useState(false)
@ -152,6 +153,7 @@ export const ThreadContent = memo(
} }
) => { ) => {
const { t } = useTranslation() const { t } = useTranslation()
const { selectedModel } = useModelProvider()
// Use useMemo to stabilize the components prop // Use useMemo to stabilize the components prop
const linkComponents = useMemo( const linkComponents = useMemo(
@ -517,7 +519,7 @@ export const ThreadContent = memo(
</DialogContent> </DialogContent>
</Dialog> </Dialog>
{item.isLastMessage && ( {item.isLastMessage && selectedModel && (
<Tooltip> <Tooltip>
<TooltipTrigger asChild> <TooltipTrigger asChild>
<button <button

View File

@ -7,11 +7,7 @@ import {
DialogTrigger, DialogTrigger,
} from '@/components/ui/dialog' } from '@/components/ui/dialog'
import { Switch } from '@/components/ui/switch' import { Switch } from '@/components/ui/switch'
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from '@/components/ui/tooltip'
import { useModelProvider } from '@/hooks/useModelProvider' import { useModelProvider } from '@/hooks/useModelProvider'
import { import {
IconPencil, IconPencil,
@ -19,7 +15,7 @@ import {
IconTool, IconTool,
// IconWorld, // IconWorld,
// IconAtom, // IconAtom,
IconCodeCircle2, // IconCodeCircle2,
} from '@tabler/icons-react' } from '@tabler/icons-react'
import { useState, useEffect } from 'react' import { useState, useEffect } from 'react'
import { useTranslation } from '@/i18n/react-i18next-compat' import { useTranslation } from '@/i18n/react-i18next-compat'
@ -177,24 +173,16 @@ export const DialogEditModel = ({
{t('providers:editModel.vision')} {t('providers:editModel.vision')}
</span> </span>
</div> </div>
<Tooltip> <Switch
<TooltipTrigger> id="vision-capability"
<Switch checked={capabilities.vision}
id="vision-capability" onCheckedChange={(checked) =>
checked={capabilities.vision} handleCapabilityChange('vision', checked)
disabled={true} }
onCheckedChange={(checked) => />
handleCapabilityChange('vision', checked)
}
/>
</TooltipTrigger>
<TooltipContent>
{t('providers:editModel.notAvailable')}
</TooltipContent>
</Tooltip>
</div> </div>
<div className="flex items-center justify-between"> {/* <div className="flex items-center justify-between">
<div className="flex items-center space-x-2"> <div className="flex items-center space-x-2">
<IconCodeCircle2 className="size-4 text-main-view-fg/70" /> <IconCodeCircle2 className="size-4 text-main-view-fg/70" />
<span className="text-sm"> <span className="text-sm">
@ -216,7 +204,7 @@ export const DialogEditModel = ({
{t('providers:editModel.notAvailable')} {t('providers:editModel.notAvailable')}
</TooltipContent> </TooltipContent>
</Tooltip> </Tooltip>
</div> </div> */}
{/* <div className="flex items-center justify-between"> {/* <div className="flex items-center justify-between">
<div className="flex items-center space-x-2"> <div className="flex items-center space-x-2">

View File

@ -27,6 +27,11 @@ type MCPServerStoreState = {
setLeftPanel: (value: boolean) => void setLeftPanel: (value: boolean) => void
addServer: (key: string, config: MCPServerConfig) => void addServer: (key: string, config: MCPServerConfig) => void
editServer: (key: string, config: MCPServerConfig) => void editServer: (key: string, config: MCPServerConfig) => void
renameServer: (
oldKey: string,
newKey: string,
config: MCPServerConfig
) => void
deleteServer: (key: string) => void deleteServer: (key: string) => void
setServers: (servers: MCPServers) => void setServers: (servers: MCPServers) => void
syncServers: () => Promise<void> syncServers: () => Promise<void>
@ -47,7 +52,10 @@ export const useMCPServers = create<MCPServerStoreState>()((set, get) => ({
// Add a new MCP server or update if the key already exists // Add a new MCP server or update if the key already exists
addServer: (key, config) => addServer: (key, config) =>
set((state) => { set((state) => {
const mcpServers = { ...state.mcpServers, [key]: config } // Remove the key first if it exists to maintain insertion order
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const { [key]: _, ...restServers } = state.mcpServers
const mcpServers = { [key]: config, ...restServers }
return { mcpServers } return { mcpServers }
}), }),
@ -60,6 +68,27 @@ export const useMCPServers = create<MCPServerStoreState>()((set, get) => ({
const mcpServers = { ...state.mcpServers, [key]: config } const mcpServers = { ...state.mcpServers, [key]: config }
return { mcpServers } return { mcpServers }
}), }),
// Rename a server while preserving its position
renameServer: (oldKey, newKey, config) =>
set((state) => {
// Only proceed if the server exists
if (!state.mcpServers[oldKey]) return state
const entries = Object.entries(state.mcpServers)
const mcpServers: MCPServers = {}
// Rebuild the object with the same order, replacing the old key with the new key
entries.forEach(([key, serverConfig]) => {
if (key === oldKey) {
mcpServers[newKey] = config
} else {
mcpServers[key] = serverConfig
}
})
return { mcpServers }
}),
setServers: (servers) => setServers: (servers) =>
set((state) => { set((state) => {
const mcpServers = { ...state.mcpServers, ...servers } const mcpServers = { ...state.mcpServers, ...servers }

View File

@ -241,7 +241,7 @@ export const useModelProvider = create<ModelProviderState>()(
} }
// Migrate model settings // Migrate model settings
if (provider.models) { if (provider.models && provider.provider === 'llamacpp') {
provider.models.forEach((model) => { provider.models.forEach((model) => {
if (!model.settings) model.settings = {} if (!model.settings) model.settings = {}

View File

@ -37,7 +37,7 @@
"reportAnIssueDesc": "Found a bug? Help us out by filing an issue on GitHub.", "reportAnIssueDesc": "Found a bug? Help us out by filing an issue on GitHub.",
"reportIssue": "Report Issue", "reportIssue": "Report Issue",
"credits": "Credits", "credits": "Credits",
"creditsDesc1": "Jan is built with ❤️ by the Menlo Team.", "creditsDesc1": "👋 Jan is built with ❤️ by the Menlo Research team.",
"creditsDesc2": "Special thanks to our open-source dependencies—especially llama.cpp and Tauri—and to our amazing AI community.", "creditsDesc2": "Special thanks to our open-source dependencies—especially llama.cpp and Tauri—and to our amazing AI community.",
"appVersion": "App Version", "appVersion": "App Version",
"dataFolder": { "dataFolder": {
@ -234,7 +234,7 @@
"reportAnIssueDesc": "Found a bug? Help us out by filing an issue on GitHub.", "reportAnIssueDesc": "Found a bug? Help us out by filing an issue on GitHub.",
"reportIssue": "Report Issue", "reportIssue": "Report Issue",
"credits": "Credits", "credits": "Credits",
"creditsDesc1": "Jan is built with ❤️ by the Menlo Team.", "creditsDesc1": "👋 Jan is built with ❤️ by the Menlo Research team.",
"creditsDesc2": "Special thanks to our open-source dependencies—especially llama.cpp and Tauri—and to our amazing AI community." "creditsDesc2": "Special thanks to our open-source dependencies—especially llama.cpp and Tauri—and to our amazing AI community."
}, },
"extensions": { "extensions": {

View File

@ -64,7 +64,7 @@ function HubModelDetail() {
// State for model support status // State for model support status
const [modelSupportStatus, setModelSupportStatus] = useState< const [modelSupportStatus, setModelSupportStatus] = useState<
Record<string, 'RED' | 'YELLOW' | 'GREEN' | 'LOADING'> Record<string, 'RED' | 'YELLOW' | 'GREEN' | 'LOADING' | 'GREY'>
>({}) >({})
useEffect(() => { useEffect(() => {

View File

@ -353,12 +353,7 @@ function Hub() {
// Immediately set local downloading state // Immediately set local downloading state
addLocalDownloadingModel(modelId) addLocalDownloadingModel(modelId)
const mmprojPath = model.mmproj_models?.[0]?.path const mmprojPath = model.mmproj_models?.[0]?.path
pullModelWithMetadata( pullModelWithMetadata(modelId, modelUrl, mmprojPath, huggingfaceToken)
modelId,
modelUrl,
mmprojPath,
huggingfaceToken
)
} }
return ( return (
@ -399,13 +394,13 @@ function Hub() {
) )
} }
}, [ }, [
localDownloadingModels,
downloadProcesses, downloadProcesses,
llamaProvider?.models, llamaProvider?.models,
isRecommendedModel, isRecommendedModel,
downloadButtonRef,
localDownloadingModels,
addLocalDownloadingModel,
t, t,
addLocalDownloadingModel,
huggingfaceToken,
handleUseModel, handleUseModel,
]) ])
@ -482,9 +477,9 @@ function Hub() {
const isLastStep = currentStepIndex === steps.length - 1 const isLastStep = currentStepIndex === steps.length - 1
const renderFilter = () => { const renderFilter = () => {
if (searchValue.length === 0) return (
return ( <>
<> {searchValue.length === 0 && (
<DropdownMenu> <DropdownMenu>
<DropdownMenuTrigger> <DropdownMenuTrigger>
<span className="flex cursor-pointer items-center gap-1 px-2 py-1 rounded-sm bg-main-view-fg/15 text-sm outline-none text-main-view-fg font-medium"> <span className="flex cursor-pointer items-center gap-1 px-2 py-1 rounded-sm bg-main-view-fg/15 text-sm outline-none text-main-view-fg font-medium">
@ -509,17 +504,18 @@ function Hub() {
))} ))}
</DropdownMenuContent> </DropdownMenuContent>
</DropdownMenu> </DropdownMenu>
<div className="flex items-center gap-2"> )}
<Switch <div className="flex items-center gap-2">
checked={showOnlyDownloaded} <Switch
onCheckedChange={setShowOnlyDownloaded} checked={showOnlyDownloaded}
/> onCheckedChange={setShowOnlyDownloaded}
<span className="text-xs text-main-view-fg/70 font-medium whitespace-nowrap"> />
{t('hub:downloaded')} <span className="text-xs text-main-view-fg/70 font-medium whitespace-nowrap">
</span> {t('hub:downloaded')}
</div> </span>
</> </div>
) </>
)
} }
return ( return (
@ -661,6 +657,18 @@ function Hub() {
defaultModelQuantizations={ defaultModelQuantizations={
defaultModelQuantizations defaultModelQuantizations
} }
variant={
filteredModels[
virtualItem.index
].quants.find((m) =>
defaultModelQuantizations.some((e) =>
m.model_id.toLowerCase().includes(e)
)
) ??
filteredModels[virtualItem.index]
.quants?.[0]
}
isDefaultVariant={true}
modelSupportStatus={modelSupportStatus} modelSupportStatus={modelSupportStatus}
onCheckModelSupport={checkModelSupport} onCheckModelSupport={checkModelSupport}
/> />

View File

@ -93,6 +93,7 @@ function MCPServers() {
mcpServers, mcpServers,
addServer, addServer,
editServer, editServer,
renameServer,
deleteServer, deleteServer,
syncServers, syncServers,
syncServersAndRestart, syncServersAndRestart,
@ -137,22 +138,27 @@ function MCPServers() {
} }
const handleSaveServer = async (name: string, config: MCPServerConfig) => { const handleSaveServer = async (name: string, config: MCPServerConfig) => {
toggleServer(name, false)
if (editingKey) { if (editingKey) {
// If server name changed, delete old one and add new one // If server name changed, rename it while preserving position
if (editingKey !== name) { if (editingKey !== name) {
deleteServer(editingKey) toggleServer(editingKey, false)
addServer(name, config) renameServer(editingKey, name, config)
toggleServer(name, true)
// Restart servers to update tool references with new server name
syncServersAndRestart()
} else { } else {
toggleServer(editingKey, false)
editServer(editingKey, config) editServer(editingKey, config)
toggleServer(editingKey, true)
syncServers()
} }
} else { } else {
// Add new server // Add new server
toggleServer(name, false)
addServer(name, config) addServer(name, config)
toggleServer(name, true)
syncServers()
} }
syncServers()
toggleServer(name, true)
} }
const handleEdit = (serverKey: string) => { const handleEdit = (serverKey: string) => {

View File

@ -584,10 +584,12 @@ function ProviderDetail() {
} }
actions={ actions={
<div className="flex items-center gap-0.5"> <div className="flex items-center gap-0.5">
<DialogEditModel {provider && provider.provider !== 'llamacpp' && (
provider={provider} <DialogEditModel
modelId={model.id} provider={provider}
/> modelId={model.id}
/>
)}
{model.settings && ( {model.settings && (
<ModelSetting <ModelSetting
provider={provider} provider={provider}

View File

@ -924,7 +924,7 @@ describe('models service', () => {
expect(result).toBe('YELLOW') // Should use fallback expect(result).toBe('YELLOW') // Should use fallback
}) })
it('should return RED when there is an error', async () => { it('should return GREY when there is an error', async () => {
const mockEngineWithError = { const mockEngineWithError = {
...mockEngine, ...mockEngine,
isModelSupported: vi.fn().mockRejectedValue(new Error('Test error')), isModelSupported: vi.fn().mockRejectedValue(new Error('Test error')),
@ -934,7 +934,7 @@ describe('models service', () => {
const result = await isModelSupported('/path/to/model.gguf') const result = await isModelSupported('/path/to/model.gguf')
expect(result).toBe('RED') expect(result).toBe('GREY')
}) })
}) })
}) })

View File

@ -491,7 +491,7 @@ export const checkMmprojExistsAndUpdateOffloadMMprojSetting = async (
key: 'offload_mmproj', key: 'offload_mmproj',
title: 'Offload MMProj', title: 'Offload MMProj',
description: description:
'Offload multimodal projection layers to GPU', 'Offload multimodal projection model to GPU',
controller_type: 'checkbox', controller_type: 'checkbox',
controller_props: { controller_props: {
value: true, value: true,
@ -592,7 +592,7 @@ export const checkMmprojExists = async (modelId: string): Promise<boolean> => {
export const isModelSupported = async ( export const isModelSupported = async (
modelPath: string, modelPath: string,
ctxSize?: number ctxSize?: number
): Promise<'RED' | 'YELLOW' | 'GREEN'> => { ): Promise<'RED' | 'YELLOW' | 'GREEN' | 'GREY'> => {
try { try {
const engine = getEngine('llamacpp') as AIEngine & { const engine = getEngine('llamacpp') as AIEngine & {
isModelSupported?: ( isModelSupported?: (
@ -608,6 +608,6 @@ export const isModelSupported = async (
return 'YELLOW' // Conservative fallback return 'YELLOW' // Conservative fallback
} catch (error) { } catch (error) {
console.error(`Error checking model support for ${modelPath}:`, error) console.error(`Error checking model support for ${modelPath}:`, error)
return 'RED' // Error state, assume not supported return 'GREY' // Error state, assume not supported
} }
} }