Release/v0.4.9 (#2421)
* fix: turn off experimental settings should also turn off quick ask (#2411) * fix: app glitches 1s generating response before starting model (#2412) * fix: disable experimental feature should also disable vulkan (#2414) * fix: model load stuck on windows when can't get CPU core count (#2413) Signed-off-by: James <james@jan.ai> Co-authored-by: James <james@jan.ai> * feat: TensorRT-LLM engine update support (#2415) * fix: engine update * chore: add remove prepopulated models Signed-off-by: James <james@jan.ai> * update tinyjensen url Signed-off-by: James <james@jan.ai> * update llamacorn Signed-off-by: James <james@jan.ai> * update Mistral 7B Instruct v0.1 int4 Signed-off-by: James <james@jan.ai> * update tensorrt Signed-off-by: James <james@jan.ai> * update Signed-off-by: hiro <hiro@jan.ai> * update Signed-off-by: James <james@jan.ai> * prettier Signed-off-by: James <james@jan.ai> * update mistral config Signed-off-by: James <james@jan.ai> * fix some lint Signed-off-by: James <james@jan.ai> --------- Signed-off-by: James <james@jan.ai> Signed-off-by: hiro <hiro@jan.ai> Co-authored-by: James <james@jan.ai> Co-authored-by: hiro <hiro@jan.ai> * Tensorrt LLM disable turing support (#2418) Co-authored-by: Hien To <tominhhien97@gmail.com> * chore: add prompt template tensorrtllm (#2375) * chore: add prompt template tensorrtllm * Add Prompt template for mistral and correct model metadata --------- Co-authored-by: Hien To <tominhhien97@gmail.com> * fix: correct tensorrt mistral model.json (#2419) --------- Signed-off-by: James <james@jan.ai> Signed-off-by: hiro <hiro@jan.ai> Co-authored-by: Louis <louis@jan.ai> Co-authored-by: James <james@jan.ai> Co-authored-by: hiro <hiro@jan.ai> Co-authored-by: hiento09 <136591877+hiento09@users.noreply.github.com> Co-authored-by: Hien To <tominhhien97@gmail.com>
This commit is contained in:
parent
c81a33f382
commit
3a3bceb0c0
@ -22,6 +22,7 @@ on:
|
|||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- dev
|
- dev
|
||||||
|
- release/**
|
||||||
paths:
|
paths:
|
||||||
- "electron/**"
|
- "electron/**"
|
||||||
- .github/workflows/jan-electron-linter-and-test.yml
|
- .github/workflows/jan-electron-linter-and-test.yml
|
||||||
|
|||||||
@ -46,7 +46,7 @@
|
|||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@types/jest": "^29.5.12",
|
"@types/jest": "^29.5.12",
|
||||||
"@types/node": "^12.0.2",
|
"@types/node": "^20.11.4",
|
||||||
"eslint": "8.57.0",
|
"eslint": "8.57.0",
|
||||||
"eslint-plugin-jest": "^27.9.0",
|
"eslint-plugin-jest": "^27.9.0",
|
||||||
"jest": "^29.7.0",
|
"jest": "^29.7.0",
|
||||||
|
|||||||
@ -96,6 +96,7 @@ export enum FileManagerRoute {
|
|||||||
fileStat = 'fileStat',
|
fileStat = 'fileStat',
|
||||||
writeBlob = 'writeBlob',
|
writeBlob = 'writeBlob',
|
||||||
mkdir = 'mkdir',
|
mkdir = 'mkdir',
|
||||||
|
rm = 'rm',
|
||||||
}
|
}
|
||||||
|
|
||||||
export type ApiFunction = (...args: any[]) => any
|
export type ApiFunction = (...args: any[]) => any
|
||||||
|
|||||||
@ -19,6 +19,7 @@ export interface Compatibility {
|
|||||||
const ALL_INSTALLATION_STATE = [
|
const ALL_INSTALLATION_STATE = [
|
||||||
'NotRequired', // not required.
|
'NotRequired', // not required.
|
||||||
'Installed', // require and installed. Good to go.
|
'Installed', // require and installed. Good to go.
|
||||||
|
'Updatable', // require and installed but need to be updated.
|
||||||
'NotInstalled', // require to be installed.
|
'NotInstalled', // require to be installed.
|
||||||
'Corrupted', // require but corrupted. Need to redownload.
|
'Corrupted', // require but corrupted. Need to redownload.
|
||||||
] as const
|
] as const
|
||||||
@ -59,6 +60,13 @@ export abstract class BaseExtension implements ExtensionType {
|
|||||||
return undefined
|
return undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determine if the extension is updatable.
|
||||||
|
*/
|
||||||
|
updatable(): boolean {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Determine if the prerequisites for the extension are installed.
|
* Determine if the prerequisites for the extension are installed.
|
||||||
*
|
*
|
||||||
|
|||||||
@ -45,6 +45,9 @@ const mkdir = (...args: any[]) => global.core.api?.mkdir(...args)
|
|||||||
*/
|
*/
|
||||||
const rmdirSync = (...args: any[]) =>
|
const rmdirSync = (...args: any[]) =>
|
||||||
global.core.api?.rmdirSync(...args, { recursive: true, force: true })
|
global.core.api?.rmdirSync(...args, { recursive: true, force: true })
|
||||||
|
|
||||||
|
const rm = (path: string) => global.core.api?.rm(path)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes a file from the local file system.
|
* Deletes a file from the local file system.
|
||||||
* @param {string} path - The path of the file to delete.
|
* @param {string} path - The path of the file to delete.
|
||||||
@ -96,6 +99,7 @@ export const fs = {
|
|||||||
mkdirSync,
|
mkdirSync,
|
||||||
mkdir,
|
mkdir,
|
||||||
rmdirSync,
|
rmdirSync,
|
||||||
|
rm,
|
||||||
unlinkSync,
|
unlinkSync,
|
||||||
appendFileSync,
|
appendFileSync,
|
||||||
copyFileSync,
|
copyFileSync,
|
||||||
|
|||||||
@ -100,4 +100,16 @@ export class FSExt implements Processor {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rmdir(path: string): Promise<void> {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
fs.rm(path, { recursive: true }, (err) => {
|
||||||
|
if (err) {
|
||||||
|
reject(err)
|
||||||
|
} else {
|
||||||
|
resolve()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -82,26 +82,34 @@ export const getJanExtensionsPath = (): string => {
|
|||||||
*/
|
*/
|
||||||
export const physicalCpuCount = async (): Promise<number> => {
|
export const physicalCpuCount = async (): Promise<number> => {
|
||||||
const platform = os.platform()
|
const platform = os.platform()
|
||||||
if (platform === 'linux') {
|
try {
|
||||||
const output = await exec('lscpu -p | egrep -v "^#" | sort -u -t, -k 2,4 | wc -l')
|
if (platform === 'linux') {
|
||||||
return parseInt(output.trim(), 10)
|
const output = await exec('lscpu -p | egrep -v "^#" | sort -u -t, -k 2,4 | wc -l')
|
||||||
} else if (platform === 'darwin') {
|
return parseInt(output.trim(), 10)
|
||||||
const output = await exec('sysctl -n hw.physicalcpu_max')
|
} else if (platform === 'darwin') {
|
||||||
return parseInt(output.trim(), 10)
|
const output = await exec('sysctl -n hw.physicalcpu_max')
|
||||||
} else if (platform === 'win32') {
|
return parseInt(output.trim(), 10)
|
||||||
const output = await exec('WMIC CPU Get NumberOfCores')
|
} else if (platform === 'win32') {
|
||||||
return output
|
const output = await exec('WMIC CPU Get NumberOfCores')
|
||||||
.split(os.EOL)
|
return output
|
||||||
.map((line: string) => parseInt(line))
|
.split(os.EOL)
|
||||||
.filter((value: number) => !isNaN(value))
|
.map((line: string) => parseInt(line))
|
||||||
.reduce((sum: number, number: number) => sum + number, 1)
|
.filter((value: number) => !isNaN(value))
|
||||||
} else {
|
.reduce((sum: number, number: number) => sum + number, 1)
|
||||||
const cores = os.cpus().filter((cpu: any, index: number) => {
|
} else {
|
||||||
const hasHyperthreading = cpu.model.includes('Intel')
|
const cores = os.cpus().filter((cpu: any, index: number) => {
|
||||||
const isOdd = index % 2 === 1
|
const hasHyperthreading = cpu.model.includes('Intel')
|
||||||
return !hasHyperthreading || isOdd
|
const isOdd = index % 2 === 1
|
||||||
})
|
return !hasHyperthreading || isOdd
|
||||||
return cores.length
|
})
|
||||||
|
return cores.length
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
console.warn('Failed to get physical CPU count', err)
|
||||||
|
// Divide by 2 to get rid of hyper threading
|
||||||
|
const coreCount = Math.ceil(os.cpus().length / 2)
|
||||||
|
console.debug('Using node API to get physical CPU count:', coreCount)
|
||||||
|
return coreCount
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
import { SystemResourceInfo } from '../../types'
|
import { SystemResourceInfo } from '../../types'
|
||||||
import { physicalCpuCount } from './config'
|
import { physicalCpuCount } from './config'
|
||||||
import { log, logServer } from './log'
|
import { log } from './log'
|
||||||
|
|
||||||
export const getSystemResourceInfo = async (): Promise<SystemResourceInfo> => {
|
export const getSystemResourceInfo = async (): Promise<SystemResourceInfo> => {
|
||||||
const cpu = await physicalCpuCount()
|
const cpu = await physicalCpuCount()
|
||||||
|
|||||||
@ -38,7 +38,7 @@ export default class JanModelExtension extends ModelExtension {
|
|||||||
private static readonly _tensorRtEngineFormat = '.engine'
|
private static readonly _tensorRtEngineFormat = '.engine'
|
||||||
private static readonly _configDirName = 'config'
|
private static readonly _configDirName = 'config'
|
||||||
private static readonly _defaultModelFileName = 'default-model.json'
|
private static readonly _defaultModelFileName = 'default-model.json'
|
||||||
private static readonly _supportedGpuArch = ['turing', 'ampere', 'ada']
|
private static readonly _supportedGpuArch = ['ampere', 'ada']
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Called when the extension is loaded.
|
* Called when the extension is loaded.
|
||||||
|
|||||||
@ -181,8 +181,7 @@ const updateNvidiaDriverInfo = async () =>
|
|||||||
const getGpuArch = (gpuName: string): string => {
|
const getGpuArch = (gpuName: string): string => {
|
||||||
if (!gpuName.toLowerCase().includes('nvidia')) return 'unknown'
|
if (!gpuName.toLowerCase().includes('nvidia')) return 'unknown'
|
||||||
|
|
||||||
if (gpuName.includes('20')) return 'turing'
|
if (gpuName.includes('30')) return 'ampere'
|
||||||
else if (gpuName.includes('30')) return 'ampere'
|
|
||||||
else if (gpuName.includes('40')) return 'ada'
|
else if (gpuName.includes('40')) return 'ada'
|
||||||
else return 'unknown'
|
else return 'unknown'
|
||||||
}
|
}
|
||||||
|
|||||||
@ -3,27 +3,31 @@
|
|||||||
"sources": [
|
"sources": [
|
||||||
{
|
{
|
||||||
"filename": "config.json",
|
"filename": "config.json",
|
||||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/LlamaCorn-1.1B-Chat-fp16/config.json"
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/config.json"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "rank0.engine",
|
"filename": "mistral_float16_tp1_rank0.engine",
|
||||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/LlamaCorn-1.1B-Chat-fp16/rank0.engine"
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/mistral_float16_tp1_rank0.engine"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "tokenizer.model",
|
"filename": "tokenizer.model",
|
||||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/LlamaCorn-1.1B-Chat-fp16/tokenizer.model"
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/tokenizer.model"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "special_tokens_map.json",
|
"filename": "special_tokens_map.json",
|
||||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/LlamaCorn-1.1B-Chat-fp16/special_tokens_map.json"
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/special_tokens_map.json"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "tokenizer.json",
|
"filename": "tokenizer.json",
|
||||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/LlamaCorn-1.1B-Chat-fp16/tokenizer.json"
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/tokenizer.json"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "tokenizer_config.json",
|
"filename": "tokenizer_config.json",
|
||||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/LlamaCorn-1.1B-Chat-fp16/tokenizer_config.json"
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/tokenizer_config.json"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "model.cache",
|
||||||
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/model.cache"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"id": "llamacorn-1.1b-chat-fp16",
|
"id": "llamacorn-1.1b-chat-fp16",
|
||||||
@ -50,27 +54,31 @@
|
|||||||
"sources": [
|
"sources": [
|
||||||
{
|
{
|
||||||
"filename": "config.json",
|
"filename": "config.json",
|
||||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/TinyJensen-1.1B-Chat-fp16/config.json"
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/config.json"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "rank0.engine",
|
"filename": "mistral_float16_tp1_rank0.engine",
|
||||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/TinyJensen-1.1B-Chat-fp16/rank0.engine"
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/mistral_float16_tp1_rank0.engine"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "tokenizer.model",
|
"filename": "tokenizer.model",
|
||||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/TinyJensen-1.1B-Chat-fp16/tokenizer.model"
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/tokenizer.model"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "special_tokens_map.json",
|
"filename": "special_tokens_map.json",
|
||||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/TinyJensen-1.1B-Chat-fp16/special_tokens_map.json"
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/special_tokens_map.json"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "tokenizer.json",
|
"filename": "tokenizer.json",
|
||||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/TinyJensen-1.1B-Chat-fp16/tokenizer.json"
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/tokenizer.json"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "tokenizer_config.json",
|
"filename": "tokenizer_config.json",
|
||||||
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/TinyJensen-1.1B-Chat-fp16/tokenizer_config.json"
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/tokenizer_config.json"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "model.cache",
|
||||||
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/model.cache"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"id": "tinyjensen-1.1b-chat-fp16",
|
"id": "tinyjensen-1.1b-chat-fp16",
|
||||||
@ -92,5 +100,57 @@
|
|||||||
"size": 2151000000
|
"size": 2151000000
|
||||||
},
|
},
|
||||||
"engine": "nitro-tensorrt-llm"
|
"engine": "nitro-tensorrt-llm"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"sources": [
|
||||||
|
{
|
||||||
|
"filename": "config.json",
|
||||||
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/config.json"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "mistral_float16_tp1_rank0.engine",
|
||||||
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/mistral_float16_tp1_rank0.engine"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "tokenizer.model",
|
||||||
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/tokenizer.model"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "special_tokens_map.json",
|
||||||
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/special_tokens_map.json"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "tokenizer.json",
|
||||||
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/tokenizer.json"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "tokenizer_config.json",
|
||||||
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/tokenizer_config.json"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "model.cache",
|
||||||
|
"url": "https://delta.jan.ai/dist/models/<gpuarch>/<os>/tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/model.cache"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"id": "mistral-7b-instruct-int4",
|
||||||
|
"object": "model",
|
||||||
|
"name": "Mistral 7B Instruct v0.1 INT4",
|
||||||
|
"version": "1.0",
|
||||||
|
"description": "Mistral 7B Instruct v0.1 INT4",
|
||||||
|
"format": "TensorRT-LLM",
|
||||||
|
"settings": {
|
||||||
|
"ctx_len": 2048,
|
||||||
|
"text_model": false,
|
||||||
|
"prompt_template": "[INST] {prompt} [/INST]"
|
||||||
|
},
|
||||||
|
"parameters": {
|
||||||
|
"max_tokens": 4096
|
||||||
|
},
|
||||||
|
"metadata": {
|
||||||
|
"author": "MistralAI",
|
||||||
|
"tags": ["TensorRT-LLM", "7B", "Finetuned"],
|
||||||
|
"size": 3840000000
|
||||||
|
},
|
||||||
|
"engine": "nitro-tensorrt-llm"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|||||||
@ -18,7 +18,7 @@
|
|||||||
"0.1.0"
|
"0.1.0"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"tensorrtVersion": "0.1.6",
|
"tensorrtVersion": "0.1.8",
|
||||||
"provider": "nitro-tensorrt-llm",
|
"provider": "nitro-tensorrt-llm",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"build": "tsc --module commonjs && rollup -c rollup.config.ts",
|
"build": "tsc --module commonjs && rollup -c rollup.config.ts",
|
||||||
|
|||||||
@ -21,7 +21,7 @@ export default [
|
|||||||
DOWNLOAD_RUNNER_URL:
|
DOWNLOAD_RUNNER_URL:
|
||||||
process.platform === 'win32'
|
process.platform === 'win32'
|
||||||
? JSON.stringify(
|
? JSON.stringify(
|
||||||
'https://github.com/janhq/nitro-tensorrt-llm/releases/download/windows-v<version>/nitro-windows-v<version>-amd64-tensorrt-llm-<gpuarch>.tar.gz'
|
'https://github.com/janhq/nitro-tensorrt-llm/releases/download/windows-v<version>-tensorrt-llm-v0.7.1/nitro-windows-v<version>-tensorrt-llm-v0.7.1-amd64-all-arch.tar.gz'
|
||||||
)
|
)
|
||||||
: JSON.stringify(
|
: JSON.stringify(
|
||||||
'https://github.com/janhq/nitro-tensorrt-llm/releases/download/linux-v<version>/nitro-linux-v<version>-amd64-tensorrt-llm-<gpuarch>.tar.gz'
|
'https://github.com/janhq/nitro-tensorrt-llm/releases/download/linux-v<version>/nitro-linux-v<version>-amd64-tensorrt-llm-<gpuarch>.tar.gz'
|
||||||
|
|||||||
@ -39,8 +39,9 @@ export default class TensorRTLLMExtension extends LocalOAIEngine {
|
|||||||
override inferenceUrl = INFERENCE_URL
|
override inferenceUrl = INFERENCE_URL
|
||||||
override nodeModule = NODE
|
override nodeModule = NODE
|
||||||
|
|
||||||
private supportedGpuArch = ['turing', 'ampere', 'ada']
|
private supportedGpuArch = ['ampere', 'ada']
|
||||||
private supportedPlatform = ['win32', 'linux']
|
private supportedPlatform = ['win32', 'linux']
|
||||||
|
private isUpdateAvailable = false
|
||||||
|
|
||||||
compatibility() {
|
compatibility() {
|
||||||
return COMPATIBILITY as unknown as Compatibility
|
return COMPATIBILITY as unknown as Compatibility
|
||||||
@ -56,6 +57,8 @@ export default class TensorRTLLMExtension extends LocalOAIEngine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
override async install(): Promise<void> {
|
override async install(): Promise<void> {
|
||||||
|
await this.removePopulatedModels()
|
||||||
|
|
||||||
const info = await systemInformation()
|
const info = await systemInformation()
|
||||||
console.debug(
|
console.debug(
|
||||||
`TensorRTLLMExtension installing pre-requisites... ${JSON.stringify(info)}`
|
`TensorRTLLMExtension installing pre-requisites... ${JSON.stringify(info)}`
|
||||||
@ -141,6 +144,22 @@ export default class TensorRTLLMExtension extends LocalOAIEngine {
|
|||||||
events.on(DownloadEvent.onFileDownloadSuccess, onFileDownloadSuccess)
|
events.on(DownloadEvent.onFileDownloadSuccess, onFileDownloadSuccess)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async removePopulatedModels(): Promise<void> {
|
||||||
|
console.debug(`removePopulatedModels`, JSON.stringify(models))
|
||||||
|
const janDataFolderPath = await getJanDataFolderPath()
|
||||||
|
const modelFolderPath = await joinPath([janDataFolderPath, 'models'])
|
||||||
|
|
||||||
|
for (const model of models) {
|
||||||
|
const modelPath = await joinPath([modelFolderPath, model.id])
|
||||||
|
console.debug(`modelPath: ${modelPath}`)
|
||||||
|
if (await fs.existsSync(modelPath)) {
|
||||||
|
console.debug(`Removing model ${modelPath}`)
|
||||||
|
await fs.rmdirSync(modelPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
events.emit(ModelEvent.OnModelsUpdate, {})
|
||||||
|
}
|
||||||
|
|
||||||
async onModelInit(model: Model): Promise<void> {
|
async onModelInit(model: Model): Promise<void> {
|
||||||
if (model.engine !== this.provider) return
|
if (model.engine !== this.provider) return
|
||||||
|
|
||||||
@ -156,6 +175,10 @@ export default class TensorRTLLMExtension extends LocalOAIEngine {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
updatable() {
|
||||||
|
return this.isUpdateAvailable
|
||||||
|
}
|
||||||
|
|
||||||
override async installationState(): Promise<InstallationState> {
|
override async installationState(): Promise<InstallationState> {
|
||||||
const info = await systemInformation()
|
const info = await systemInformation()
|
||||||
|
|
||||||
|
|||||||
@ -5,12 +5,13 @@ import fetchRT from 'fetch-retry'
|
|||||||
import { log, getJanDataFolderPath } from '@janhq/core/node'
|
import { log, getJanDataFolderPath } from '@janhq/core/node'
|
||||||
import decompress from 'decompress'
|
import decompress from 'decompress'
|
||||||
import { SystemInformation } from '@janhq/core'
|
import { SystemInformation } from '@janhq/core'
|
||||||
|
import { PromptTemplate } from '@janhq/core'
|
||||||
|
|
||||||
// Polyfill fetch with retry
|
// Polyfill fetch with retry
|
||||||
const fetchRetry = fetchRT(fetch)
|
const fetchRetry = fetchRT(fetch)
|
||||||
|
|
||||||
const supportedPlatform = (): string[] => ['win32', 'linux']
|
const supportedPlatform = (): string[] => ['win32', 'linux']
|
||||||
const supportedGpuArch = (): string[] => ['turing', 'ampere', 'ada']
|
const supportedGpuArch = (): string[] => ['ampere', 'ada']
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The response object for model init operation.
|
* The response object for model init operation.
|
||||||
@ -35,9 +36,21 @@ async function loadModel(
|
|||||||
// e.g. ~/jan/models/llama-2
|
// e.g. ~/jan/models/llama-2
|
||||||
let modelFolder = params.modelFolder
|
let modelFolder = params.modelFolder
|
||||||
|
|
||||||
|
if (params.model.settings.prompt_template) {
|
||||||
|
const promptTemplate = params.model.settings.prompt_template
|
||||||
|
const prompt = promptTemplateConverter(promptTemplate)
|
||||||
|
if (prompt?.error) {
|
||||||
|
return Promise.reject(prompt.error)
|
||||||
|
}
|
||||||
|
params.model.settings.system_prompt = prompt.system_prompt
|
||||||
|
params.model.settings.user_prompt = prompt.user_prompt
|
||||||
|
params.model.settings.ai_prompt = prompt.ai_prompt
|
||||||
|
}
|
||||||
|
|
||||||
const settings: ModelLoadParams = {
|
const settings: ModelLoadParams = {
|
||||||
engine_path: modelFolder,
|
engine_path: modelFolder,
|
||||||
ctx_len: params.model.settings.ctx_len ?? 2048,
|
ctx_len: params.model.settings.ctx_len ?? 2048,
|
||||||
|
...params.model.settings,
|
||||||
}
|
}
|
||||||
if (!systemInfo) {
|
if (!systemInfo) {
|
||||||
throw new Error('Cannot get system info. Unable to start nitro x tensorrt.')
|
throw new Error('Cannot get system info. Unable to start nitro x tensorrt.')
|
||||||
@ -220,6 +233,52 @@ const decompressRunner = async (zipPath: string, output: string) => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse prompt template into agrs settings
|
||||||
|
* @param promptTemplate Template as string
|
||||||
|
* @returns
|
||||||
|
*/
|
||||||
|
function promptTemplateConverter(promptTemplate: string): PromptTemplate {
|
||||||
|
// Split the string using the markers
|
||||||
|
const systemMarker = '{system_message}'
|
||||||
|
const promptMarker = '{prompt}'
|
||||||
|
|
||||||
|
if (
|
||||||
|
promptTemplate.includes(systemMarker) &&
|
||||||
|
promptTemplate.includes(promptMarker)
|
||||||
|
) {
|
||||||
|
// Find the indices of the markers
|
||||||
|
const systemIndex = promptTemplate.indexOf(systemMarker)
|
||||||
|
const promptIndex = promptTemplate.indexOf(promptMarker)
|
||||||
|
|
||||||
|
// Extract the parts of the string
|
||||||
|
const system_prompt = promptTemplate.substring(0, systemIndex)
|
||||||
|
const user_prompt = promptTemplate.substring(
|
||||||
|
systemIndex + systemMarker.length,
|
||||||
|
promptIndex
|
||||||
|
)
|
||||||
|
const ai_prompt = promptTemplate.substring(
|
||||||
|
promptIndex + promptMarker.length
|
||||||
|
)
|
||||||
|
|
||||||
|
// Return the split parts
|
||||||
|
return { system_prompt, user_prompt, ai_prompt }
|
||||||
|
} else if (promptTemplate.includes(promptMarker)) {
|
||||||
|
// Extract the parts of the string for the case where only promptMarker is present
|
||||||
|
const promptIndex = promptTemplate.indexOf(promptMarker)
|
||||||
|
const user_prompt = promptTemplate.substring(0, promptIndex)
|
||||||
|
const ai_prompt = promptTemplate.substring(
|
||||||
|
promptIndex + promptMarker.length
|
||||||
|
)
|
||||||
|
|
||||||
|
// Return the split parts
|
||||||
|
return { user_prompt, ai_prompt }
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an error if none of the conditions are met
|
||||||
|
return { error: 'Cannot split prompt template' }
|
||||||
|
}
|
||||||
|
|
||||||
export default {
|
export default {
|
||||||
supportedPlatform,
|
supportedPlatform,
|
||||||
supportedGpuArch,
|
supportedGpuArch,
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
'use client'
|
'use client'
|
||||||
|
|
||||||
import { PropsWithChildren, useEffect, useState } from 'react'
|
import { PropsWithChildren, useCallback, useEffect, useState } from 'react'
|
||||||
|
|
||||||
import { Toaster } from 'react-hot-toast'
|
import { Toaster } from 'react-hot-toast'
|
||||||
|
|
||||||
@ -37,7 +37,7 @@ const Providers = (props: PropsWithChildren) => {
|
|||||||
const [activated, setActivated] = useState(false)
|
const [activated, setActivated] = useState(false)
|
||||||
const [settingUp, setSettingUp] = useState(false)
|
const [settingUp, setSettingUp] = useState(false)
|
||||||
|
|
||||||
async function setupExtensions() {
|
const setupExtensions = useCallback(async () => {
|
||||||
// Register all active extensions
|
// Register all active extensions
|
||||||
await extensionManager.registerActive()
|
await extensionManager.registerActive()
|
||||||
|
|
||||||
@ -57,7 +57,7 @@ const Providers = (props: PropsWithChildren) => {
|
|||||||
setSettingUp(false)
|
setSettingUp(false)
|
||||||
setActivated(true)
|
setActivated(true)
|
||||||
}, 500)
|
}, 500)
|
||||||
}
|
}, [pathname])
|
||||||
|
|
||||||
// Services Setup
|
// Services Setup
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@ -78,7 +78,7 @@ const Providers = (props: PropsWithChildren) => {
|
|||||||
setActivated(true)
|
setActivated(true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}, [setupCore])
|
}, [setupCore, setupExtensions])
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<JotaiWrapper>
|
<JotaiWrapper>
|
||||||
|
|||||||
@ -102,7 +102,6 @@ export default function useSendChatMessage() {
|
|||||||
console.error('No active thread')
|
console.error('No active thread')
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
setIsGeneratingResponse(true)
|
|
||||||
updateThreadWaiting(activeThreadRef.current.id, true)
|
updateThreadWaiting(activeThreadRef.current.id, true)
|
||||||
const messages: ChatCompletionMessage[] = [
|
const messages: ChatCompletionMessage[] = [
|
||||||
activeThreadRef.current.assistants[0]?.instructions,
|
activeThreadRef.current.assistants[0]?.instructions,
|
||||||
@ -148,7 +147,7 @@ export default function useSendChatMessage() {
|
|||||||
await waitForModelStarting(modelId)
|
await waitForModelStarting(modelId)
|
||||||
setQueuedMessage(false)
|
setQueuedMessage(false)
|
||||||
}
|
}
|
||||||
|
setIsGeneratingResponse(true)
|
||||||
if (currentMessage.role !== ChatCompletionRole.User) {
|
if (currentMessage.role !== ChatCompletionRole.User) {
|
||||||
// Delete last response before regenerating
|
// Delete last response before regenerating
|
||||||
deleteMessage(currentMessage.id ?? '')
|
deleteMessage(currentMessage.id ?? '')
|
||||||
@ -171,7 +170,6 @@ export default function useSendChatMessage() {
|
|||||||
console.error('No active thread')
|
console.error('No active thread')
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
setIsGeneratingResponse(true)
|
|
||||||
|
|
||||||
if (engineParamsUpdate) setReloadModel(true)
|
if (engineParamsUpdate) setReloadModel(true)
|
||||||
|
|
||||||
@ -361,7 +359,7 @@ export default function useSendChatMessage() {
|
|||||||
await waitForModelStarting(modelId)
|
await waitForModelStarting(modelId)
|
||||||
setQueuedMessage(false)
|
setQueuedMessage(false)
|
||||||
}
|
}
|
||||||
|
setIsGeneratingResponse(true)
|
||||||
events.emit(MessageEvent.OnMessageSent, messageRequest)
|
events.emit(MessageEvent.OnMessageSent, messageRequest)
|
||||||
|
|
||||||
setReloadModel(false)
|
setReloadModel(false)
|
||||||
|
|||||||
@ -70,11 +70,6 @@ export const useSettings = () => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
await fs.writeFileSync(settingsFile, JSON.stringify(settings))
|
await fs.writeFileSync(settingsFile, JSON.stringify(settings))
|
||||||
|
|
||||||
// Relaunch to apply settings
|
|
||||||
if (vulkan != null) {
|
|
||||||
window.location.reload()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|||||||
@ -90,12 +90,38 @@ const Advanced = () => {
|
|||||||
[setPartialProxy, setProxy]
|
[setPartialProxy, setProxy]
|
||||||
)
|
)
|
||||||
|
|
||||||
const updateQuickAskEnabled = async (e: boolean) => {
|
const updateQuickAskEnabled = async (
|
||||||
|
e: boolean,
|
||||||
|
relaunch: boolean = true
|
||||||
|
) => {
|
||||||
const appConfiguration: AppConfiguration =
|
const appConfiguration: AppConfiguration =
|
||||||
await window.core?.api?.getAppConfigurations()
|
await window.core?.api?.getAppConfigurations()
|
||||||
appConfiguration.quick_ask = e
|
appConfiguration.quick_ask = e
|
||||||
await window.core?.api?.updateAppConfiguration(appConfiguration)
|
await window.core?.api?.updateAppConfiguration(appConfiguration)
|
||||||
window.core?.api?.relaunch()
|
if (relaunch) window.core?.api?.relaunch()
|
||||||
|
}
|
||||||
|
|
||||||
|
const updateVulkanEnabled = async (e: boolean, relaunch: boolean = true) => {
|
||||||
|
toaster({
|
||||||
|
title: 'Reload',
|
||||||
|
description: 'Vulkan settings updated. Reload now to apply the changes.',
|
||||||
|
})
|
||||||
|
stopModel()
|
||||||
|
setVulkanEnabled(e)
|
||||||
|
await saveSettings({ vulkan: e, gpusInUse: [] })
|
||||||
|
// Relaunch to apply settings
|
||||||
|
if (relaunch) window.location.reload()
|
||||||
|
}
|
||||||
|
|
||||||
|
const updateExperimentalEnabled = async (e: boolean) => {
|
||||||
|
setExperimentalEnabled(e)
|
||||||
|
if (e) return
|
||||||
|
|
||||||
|
// It affects other settings, so we need to reset them
|
||||||
|
const isRelaunch = quickAskEnabled || vulkanEnabled
|
||||||
|
if (quickAskEnabled) await updateQuickAskEnabled(false, false)
|
||||||
|
if (vulkanEnabled) await updateVulkanEnabled(false, false)
|
||||||
|
if (isRelaunch) window.core?.api?.relaunch()
|
||||||
}
|
}
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@ -179,7 +205,7 @@ const Advanced = () => {
|
|||||||
</div>
|
</div>
|
||||||
<Switch
|
<Switch
|
||||||
checked={experimentalEnabled}
|
checked={experimentalEnabled}
|
||||||
onCheckedChange={setExperimentalEnabled}
|
onCheckedChange={updateExperimentalEnabled}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@ -381,16 +407,7 @@ const Advanced = () => {
|
|||||||
|
|
||||||
<Switch
|
<Switch
|
||||||
checked={vulkanEnabled}
|
checked={vulkanEnabled}
|
||||||
onCheckedChange={(e) => {
|
onCheckedChange={(e) => updateVulkanEnabled(e)}
|
||||||
toaster({
|
|
||||||
title: 'Reload',
|
|
||||||
description:
|
|
||||||
'Vulkan settings updated. Reload now to apply the changes.',
|
|
||||||
})
|
|
||||||
stopModel()
|
|
||||||
saveSettings({ vulkan: e, gpusInUse: [] })
|
|
||||||
setVulkanEnabled(e)
|
|
||||||
}}
|
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|||||||
@ -23,6 +23,8 @@ import { useAtomValue } from 'jotai'
|
|||||||
|
|
||||||
import { Marked, Renderer } from 'marked'
|
import { Marked, Renderer } from 'marked'
|
||||||
|
|
||||||
|
import UpdateExtensionModal from './UpdateExtensionModal'
|
||||||
|
|
||||||
import { extensionManager } from '@/extension'
|
import { extensionManager } from '@/extension'
|
||||||
import Extension from '@/extension/Extension'
|
import Extension from '@/extension/Extension'
|
||||||
import { installingExtensionAtom } from '@/helpers/atoms/Extension.atom'
|
import { installingExtensionAtom } from '@/helpers/atoms/Extension.atom'
|
||||||
@ -39,7 +41,7 @@ const TensorRtExtensionItem: React.FC<Props> = ({ item }) => {
|
|||||||
useState<InstallationState>('NotRequired')
|
useState<InstallationState>('NotRequired')
|
||||||
const installingExtensions = useAtomValue(installingExtensionAtom)
|
const installingExtensions = useAtomValue(installingExtensionAtom)
|
||||||
const [isGpuSupported, setIsGpuSupported] = useState<boolean>(false)
|
const [isGpuSupported, setIsGpuSupported] = useState<boolean>(false)
|
||||||
|
const [promptUpdateModal, setPromptUpdateModal] = useState<boolean>(false)
|
||||||
const isInstalling = installingExtensions.some(
|
const isInstalling = installingExtensions.some(
|
||||||
(e) => e.extensionId === item.name
|
(e) => e.extensionId === item.name
|
||||||
)
|
)
|
||||||
@ -69,7 +71,7 @@ const TensorRtExtensionItem: React.FC<Props> = ({ item }) => {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
const supportedGpuArch = ['turing', 'ampere', 'ada']
|
const supportedGpuArch = ['ampere', 'ada']
|
||||||
setIsGpuSupported(supportedGpuArch.includes(arch))
|
setIsGpuSupported(supportedGpuArch.includes(arch))
|
||||||
}
|
}
|
||||||
getSystemInfos()
|
getSystemInfos()
|
||||||
@ -138,6 +140,7 @@ const TensorRtExtensionItem: React.FC<Props> = ({ item }) => {
|
|||||||
installProgress={progress}
|
installProgress={progress}
|
||||||
installState={installState}
|
installState={installState}
|
||||||
onInstallClick={onInstallClick}
|
onInstallClick={onInstallClick}
|
||||||
|
onUpdateClick={() => setPromptUpdateModal(true)}
|
||||||
onCancelClick={onCancelInstallingClick}
|
onCancelClick={onCancelInstallingClick}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
@ -177,6 +180,9 @@ const TensorRtExtensionItem: React.FC<Props> = ({ item }) => {
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
{promptUpdateModal && (
|
||||||
|
<UpdateExtensionModal onUpdateClick={onInstallClick} />
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -185,6 +191,7 @@ type InstallStateProps = {
|
|||||||
installProgress: number
|
installProgress: number
|
||||||
installState: InstallationState
|
installState: InstallationState
|
||||||
onInstallClick: () => void
|
onInstallClick: () => void
|
||||||
|
onUpdateClick: () => void
|
||||||
onCancelClick: () => void
|
onCancelClick: () => void
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -192,6 +199,7 @@ const InstallStateIndicator: React.FC<InstallStateProps> = ({
|
|||||||
installProgress,
|
installProgress,
|
||||||
installState,
|
installState,
|
||||||
onInstallClick,
|
onInstallClick,
|
||||||
|
onUpdateClick,
|
||||||
onCancelClick,
|
onCancelClick,
|
||||||
}) => {
|
}) => {
|
||||||
if (installProgress !== -1) {
|
if (installProgress !== -1) {
|
||||||
@ -218,6 +226,12 @@ const InstallStateIndicator: React.FC<InstallStateProps> = ({
|
|||||||
Installed
|
Installed
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
|
case 'Updatable':
|
||||||
|
return (
|
||||||
|
<Button themes="secondaryBlue" size="sm" onClick={onUpdateClick}>
|
||||||
|
Update
|
||||||
|
</Button>
|
||||||
|
)
|
||||||
case 'NotInstalled':
|
case 'NotInstalled':
|
||||||
return (
|
return (
|
||||||
<Button themes="secondaryBlue" size="sm" onClick={onInstallClick}>
|
<Button themes="secondaryBlue" size="sm" onClick={onInstallClick}>
|
||||||
|
|||||||
58
web/screens/Settings/CoreExtensions/UpdateExtensionModal.tsx
Normal file
58
web/screens/Settings/CoreExtensions/UpdateExtensionModal.tsx
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
import React from 'react'
|
||||||
|
|
||||||
|
import {
|
||||||
|
Button,
|
||||||
|
Modal,
|
||||||
|
ModalClose,
|
||||||
|
ModalContent,
|
||||||
|
ModalFooter,
|
||||||
|
ModalHeader,
|
||||||
|
ModalPortal,
|
||||||
|
ModalTitle,
|
||||||
|
ModalTrigger,
|
||||||
|
} from '@janhq/uikit'
|
||||||
|
import { Paintbrush } from 'lucide-react'
|
||||||
|
|
||||||
|
type Props = {
|
||||||
|
onUpdateClick: () => void
|
||||||
|
}
|
||||||
|
|
||||||
|
const UpdateExtensionModal: React.FC<Props> = ({ onUpdateClick }) => {
|
||||||
|
return (
|
||||||
|
<Modal>
|
||||||
|
<ModalTrigger asChild onClick={(e) => e.stopPropagation()}>
|
||||||
|
<div className="flex cursor-pointer items-center space-x-2 px-4 py-2 hover:bg-secondary">
|
||||||
|
<Paintbrush size={16} className="text-muted-foreground" />
|
||||||
|
<span className="text-bold text-black dark:text-muted-foreground">
|
||||||
|
Update extension
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</ModalTrigger>
|
||||||
|
<ModalPortal />
|
||||||
|
<ModalContent>
|
||||||
|
<ModalHeader>
|
||||||
|
<ModalTitle>Clean Thread</ModalTitle>
|
||||||
|
</ModalHeader>
|
||||||
|
<p>
|
||||||
|
Updating this extension may result in the loss of any custom models or
|
||||||
|
data associated with the current version. We recommend backing up any
|
||||||
|
important data before proceeding with the update.
|
||||||
|
</p>
|
||||||
|
<ModalFooter>
|
||||||
|
<div className="flex gap-x-2">
|
||||||
|
<ModalClose asChild onClick={(e) => e.stopPropagation()}>
|
||||||
|
<Button themes="ghost">No</Button>
|
||||||
|
</ModalClose>
|
||||||
|
<ModalClose asChild>
|
||||||
|
<Button themes="danger" onClick={onUpdateClick} autoFocus>
|
||||||
|
Yes
|
||||||
|
</Button>
|
||||||
|
</ModalClose>
|
||||||
|
</div>
|
||||||
|
</ModalFooter>
|
||||||
|
</ModalContent>
|
||||||
|
</Modal>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export default React.memo(UpdateExtensionModal)
|
||||||
Loading…
x
Reference in New Issue
Block a user