Merge pull request #1032 from janhq/copyFixes

style: in app copy fixes
This commit is contained in:
0xSage 2023-12-15 16:34:40 +08:00 committed by GitHub
commit 974cbff76d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 10 additions and 10 deletions

View File

@ -1,7 +1,7 @@
{ {
"name": "@janhq/assistant-extension", "name": "@janhq/assistant-extension",
"version": "1.0.0", "version": "1.0.0",
"description": "Assistant extension", "description": "This extension enables assistants, including Jan, a default assistant that can call all downloaded models",
"main": "dist/index.js", "main": "dist/index.js",
"module": "dist/module.js", "module": "dist/module.js",
"author": "Jan <service@jan.ai>", "author": "Jan <service@jan.ai>",

View File

@ -1,7 +1,7 @@
{ {
"name": "@janhq/conversational-extension", "name": "@janhq/conversational-extension",
"version": "1.0.0", "version": "1.0.0",
"description": "Conversational Extension - Stores jan app threads and messages in JSON files", "description": "This extension enables conversations and state persistence via your filesystem",
"main": "dist/index.js", "main": "dist/index.js",
"author": "Jan <service@jan.ai>", "author": "Jan <service@jan.ai>",
"license": "MIT", "license": "MIT",

View File

@ -1,7 +1,7 @@
{ {
"name": "@janhq/inference-nitro-extension", "name": "@janhq/inference-nitro-extension",
"version": "1.0.0", "version": "1.0.0",
"description": "Inference Engine for Nitro Extension, powered by @janhq/nitro, bring a high-performance Llama model inference in pure C++.", "description": "This extension embeds Nitro, a lightweight (3mb) inference engine written in C++. See nitro.jan.ai",
"main": "dist/index.js", "main": "dist/index.js",
"module": "dist/module.js", "module": "dist/module.js",
"author": "Jan <service@jan.ai>", "author": "Jan <service@jan.ai>",

View File

@ -1,7 +1,7 @@
{ {
"name": "@janhq/inference-openai-extension", "name": "@janhq/inference-openai-extension",
"version": "1.0.0", "version": "1.0.0",
"description": "Inference Engine for OpenAI Extension that can be used with any OpenAI compatible API", "description": "This extension enables OpenAI chat completion API calls",
"main": "dist/index.js", "main": "dist/index.js",
"module": "dist/module.js", "module": "dist/module.js",
"author": "Jan <service@jan.ai>", "author": "Jan <service@jan.ai>",

View File

@ -1,7 +1,7 @@
{ {
"name": "@janhq/inference-triton-trt-llm-extension", "name": "@janhq/inference-triton-trt-llm-extension",
"version": "1.0.0", "version": "1.0.0",
"description": "Inference Engine for NVIDIA Triton with TensorRT-LLM Extension integration on Jan extension framework", "description": "This extension enables Nvidia's TensorRT-LLM as an inference engine option",
"main": "dist/index.js", "main": "dist/index.js",
"module": "dist/module.js", "module": "dist/module.js",
"author": "Jan <service@jan.ai>", "author": "Jan <service@jan.ai>",

View File

@ -1,7 +1,7 @@
{ {
"name": "@janhq/model-extension", "name": "@janhq/model-extension",
"version": "1.0.13", "version": "1.0.13",
"description": "Model Management Extension provides model exploration and seamless downloads", "description": "This extension provides model downloads and controls the model lifecycle",
"main": "dist/index.js", "main": "dist/index.js",
"module": "dist/module.js", "module": "dist/module.js",
"author": "Jan <service@jan.ai>", "author": "Jan <service@jan.ai>",

View File

@ -1,7 +1,7 @@
{ {
"name": "@janhq/monitoring-extension", "name": "@janhq/monitoring-extension",
"version": "1.0.9", "version": "1.0.9",
"description": "Utilizing systeminformation, it provides essential System and OS information retrieval", "description": "This extension provides system health and OS level data",
"main": "dist/index.js", "main": "dist/index.js",
"module": "dist/module.js", "module": "dist/module.js",
"author": "Jan <service@jan.ai>", "author": "Jan <service@jan.ai>",

View File

@ -34,8 +34,8 @@ const ExploreModelsScreen = () => {
const [searchValue, setsearchValue] = useState('') const [searchValue, setsearchValue] = useState('')
const [tabActive, setTabActive] = useState('Model') const [tabActive, setTabActive] = useState('Model')
const { downloadedModels } = useGetDownloadedModels() const { downloadedModels } = useGetDownloadedModels()
const [sortSelected, setSortSelected] = useState('All Model') const [sortSelected, setSortSelected] = useState('All Models')
const sortMenu = ['All Model', 'Recommended', 'Downloaded'] const sortMenu = ['All Models', 'Recommended', 'Downloaded']
const filteredModels = models.filter((x) => { const filteredModels = models.filter((x) => {
if (sortSelected === 'Downloaded') { if (sortSelected === 'Downloaded') {

View File

@ -20,7 +20,7 @@ const Advanced = () => {
</h6> </h6>
</div> </div>
<p className="whitespace-pre-wrap leading-relaxed"> <p className="whitespace-pre-wrap leading-relaxed">
Enable experimental features that may be unstable or not fully Enable experimental features that may be unstable
tested. tested.
</p> </p>
</div> </div>