jan/core/src/node/helper/config.ts
Louis 489e8aab24
Sync release 0.4.9 to dev (#2407)
* fix: move tensorrt executable to engine (#2400)

* fix: move tensorrt executable to engine

Signed-off-by: James <james@jan.ai>

* some update

Signed-off-by: hiro <hiro@jan.ai>

* chore: bump tensorrt version

* fix: wrong destroy path

* fix: install extensions in parallel

* chore: update path for tensorrt engine (#2404)

Signed-off-by: James <james@jan.ai>
Co-authored-by: James <james@jan.ai>

---------

Signed-off-by: James <james@jan.ai>
Signed-off-by: hiro <hiro@jan.ai>
Co-authored-by: James <james@jan.ai>
Co-authored-by: hiro <hiro@jan.ai>
Co-authored-by: Louis <louis@jan.ai>

* Release/v0.4.9 (#2421)

* fix: turn off experimental settings should also turn off quick ask (#2411)

* fix: app glitches 1s generating response before starting model (#2412)

* fix: disable experimental feature should also disable vulkan (#2414)

* fix: model load stuck on windows when can't get CPU core count (#2413)

Signed-off-by: James <james@jan.ai>
Co-authored-by: James <james@jan.ai>

* feat: TensorRT-LLM engine update support (#2415)

* fix: engine update

* chore: add remove prepopulated models

Signed-off-by: James <james@jan.ai>

* update tinyjensen url

Signed-off-by: James <james@jan.ai>

* update llamacorn

Signed-off-by: James <james@jan.ai>

* update Mistral 7B Instruct v0.1 int4

Signed-off-by: James <james@jan.ai>

* update tensorrt

Signed-off-by: James <james@jan.ai>

* update

Signed-off-by: hiro <hiro@jan.ai>

* update

Signed-off-by: James <james@jan.ai>

* prettier

Signed-off-by: James <james@jan.ai>

* update mistral config

Signed-off-by: James <james@jan.ai>

* fix some lint

Signed-off-by: James <james@jan.ai>

---------

Signed-off-by: James <james@jan.ai>
Signed-off-by: hiro <hiro@jan.ai>
Co-authored-by: James <james@jan.ai>
Co-authored-by: hiro <hiro@jan.ai>

* Tensorrt LLM disable turing support (#2418)

Co-authored-by: Hien To <tominhhien97@gmail.com>

* chore: add prompt template tensorrtllm (#2375)

* chore: add prompt template tensorrtllm

* Add Prompt template for mistral and correct model metadata

---------

Co-authored-by: Hien To <tominhhien97@gmail.com>

* fix: correct tensorrt mistral model.json (#2419)

---------

Signed-off-by: James <james@jan.ai>
Signed-off-by: hiro <hiro@jan.ai>
Co-authored-by: Louis <louis@jan.ai>
Co-authored-by: James <james@jan.ai>
Co-authored-by: hiro <hiro@jan.ai>
Co-authored-by: hiento09 <136591877+hiento09@users.noreply.github.com>
Co-authored-by: Hien To <tominhhien97@gmail.com>

---------

Signed-off-by: James <james@jan.ai>
Signed-off-by: hiro <hiro@jan.ai>
Co-authored-by: NamH <NamNh0122@gmail.com>
Co-authored-by: James <james@jan.ai>
Co-authored-by: hiro <hiro@jan.ai>
Co-authored-by: hiento09 <136591877+hiento09@users.noreply.github.com>
Co-authored-by: Hien To <tominhhien97@gmail.com>
2024-03-19 12:20:09 +07:00

165 lines
5.0 KiB
TypeScript

import { AppConfiguration } from '../../types'
import { join } from 'path'
import fs from 'fs'
import os from 'os'
import childProcess from 'child_process'
const configurationFileName = 'settings.json'
// TODO: do no specify app name in framework module
const defaultJanDataFolder = join(os.homedir(), 'jan')
const defaultAppConfig: AppConfiguration = {
data_folder: defaultJanDataFolder,
quick_ask: false,
}
/**
* Getting App Configurations.
*
* @returns {AppConfiguration} The app configurations.
*/
export const getAppConfigurations = (): AppConfiguration => {
// Retrieve Application Support folder path
// Fallback to user home directory if not found
const configurationFile = getConfigurationFilePath()
if (!fs.existsSync(configurationFile)) {
// create default app config if we don't have one
console.debug(`App config not found, creating default config at ${configurationFile}`)
fs.writeFileSync(configurationFile, JSON.stringify(defaultAppConfig))
return defaultAppConfig
}
try {
const appConfigurations: AppConfiguration = JSON.parse(
fs.readFileSync(configurationFile, 'utf-8')
)
return appConfigurations
} catch (err) {
console.error(`Failed to read app config, return default config instead! Err: ${err}`)
return defaultAppConfig
}
}
const getConfigurationFilePath = () =>
join(
global.core?.appPath() || process.env[process.platform == 'win32' ? 'USERPROFILE' : 'HOME'],
configurationFileName
)
export const updateAppConfiguration = (configuration: AppConfiguration): Promise<void> => {
const configurationFile = getConfigurationFilePath()
console.debug('updateAppConfiguration, configurationFile: ', configurationFile)
fs.writeFileSync(configurationFile, JSON.stringify(configuration))
return Promise.resolve()
}
/**
* Utility function to get data folder path
*
* @returns {string} The data folder path.
*/
export const getJanDataFolderPath = (): string => {
const appConfigurations = getAppConfigurations()
return appConfigurations.data_folder
}
/**
* Utility function to get extension path
*
* @returns {string} The extensions path.
*/
export const getJanExtensionsPath = (): string => {
const appConfigurations = getAppConfigurations()
return join(appConfigurations.data_folder, 'extensions')
}
/**
* Utility function to physical cpu count
*
* @returns {number} The physical cpu count.
*/
export const physicalCpuCount = async (): Promise<number> => {
const platform = os.platform()
try {
if (platform === 'linux') {
const output = await exec('lscpu -p | egrep -v "^#" | sort -u -t, -k 2,4 | wc -l')
return parseInt(output.trim(), 10)
} else if (platform === 'darwin') {
const output = await exec('sysctl -n hw.physicalcpu_max')
return parseInt(output.trim(), 10)
} else if (platform === 'win32') {
const output = await exec('WMIC CPU Get NumberOfCores')
return output
.split(os.EOL)
.map((line: string) => parseInt(line))
.filter((value: number) => !isNaN(value))
.reduce((sum: number, number: number) => sum + number, 1)
} else {
const cores = os.cpus().filter((cpu: any, index: number) => {
const hasHyperthreading = cpu.model.includes('Intel')
const isOdd = index % 2 === 1
return !hasHyperthreading || isOdd
})
return cores.length
}
} catch (err) {
console.warn('Failed to get physical CPU count', err)
// Divide by 2 to get rid of hyper threading
const coreCount = Math.ceil(os.cpus().length / 2)
console.debug('Using node API to get physical CPU count:', coreCount)
return coreCount
}
}
const exec = async (command: string): Promise<string> => {
return new Promise((resolve, reject) => {
childProcess.exec(command, { encoding: 'utf8' }, (error, stdout) => {
if (error) {
reject(error)
} else {
resolve(stdout)
}
})
})
}
export const getEngineConfiguration = async (engineId: string) => {
if (engineId !== 'openai' && engineId !== 'groq') {
return undefined
}
const directoryPath = join(getJanDataFolderPath(), 'engines')
const filePath = join(directoryPath, `${engineId}.json`)
const data = fs.readFileSync(filePath, 'utf-8')
return JSON.parse(data)
}
/**
* Utility function to get server log path
*
* @returns {string} The log path.
*/
export const getServerLogPath = (): string => {
const appConfigurations = getAppConfigurations()
const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
fs.mkdirSync(logFolderPath, { recursive: true })
}
return join(logFolderPath, 'server.log')
}
/**
* Utility function to get app log path
*
* @returns {string} The log path.
*/
export const getAppLogPath = (): string => {
const appConfigurations = getAppConfigurations()
const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
fs.mkdirSync(logFolderPath, { recursive: true })
}
return join(logFolderPath, 'app.log')
}