chore: extension should register its own models (#2601)

* chore: extension should register its own models

Signed-off-by: James <james@jan.ai>

---------

Signed-off-by: James <james@jan.ai>
Co-authored-by: James <james@jan.ai>
This commit is contained in:
NamH 2024-04-05 14:18:58 +07:00 committed by GitHub
parent 089e311bfd
commit e0d6049d66
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
69 changed files with 311 additions and 284 deletions

View File

@ -41,7 +41,6 @@ COPY --from=builder /app/pre-install ./pre-install/
# Copy the package.json, yarn.lock, and output of web yarn space to leverage Docker cache
COPY --from=builder /app/uikit ./uikit/
COPY --from=builder /app/web ./web/
COPY --from=builder /app/models ./models/
RUN yarn workspace @janhq/uikit install && yarn workspace @janhq/uikit build
RUN yarn workspace @janhq/web install

View File

@ -65,7 +65,6 @@ COPY --from=builder /app/pre-install ./pre-install/
# Copy the package.json, yarn.lock, and output of web yarn space to leverage Docker cache
COPY --from=builder /app/uikit ./uikit/
COPY --from=builder /app/web ./web/
COPY --from=builder /app/models ./models/
RUN yarn workspace @janhq/uikit install && yarn workspace @janhq/uikit build
RUN yarn workspace @janhq/web install

View File

@ -30,6 +30,7 @@ export default [
// which external modules to include in the bundle
// https://github.com/rollup/rollup-plugin-node-resolve#usage
replace({
'preventAssignment': true,
'node:crypto': 'crypto',
'delimiters': ['"', '"'],
}),

View File

@ -10,6 +10,8 @@ import { EngineManager } from './EngineManager'
* Applicable to all AI Engines
*/
export abstract class AIEngine extends BaseExtension {
private static modelsFolder = 'models'
// The inference engine
abstract provider: string
@ -21,15 +23,6 @@ export abstract class AIEngine extends BaseExtension {
events.on(ModelEvent.OnModelInit, (model: Model) => this.loadModel(model))
events.on(ModelEvent.OnModelStop, (model: Model) => this.unloadModel(model))
this.prePopulateModels()
}
/**
* Defines models
*/
models(): Promise<Model[]> {
return Promise.resolve([])
}
/**
@ -39,6 +32,49 @@ export abstract class AIEngine extends BaseExtension {
EngineManager.instance().register(this)
}
async registerModels(models: Model[]): Promise<void> {
const modelFolderPath = await joinPath([await getJanDataFolderPath(), AIEngine.modelsFolder])
let shouldNotifyModelUpdate = false
for (const model of models) {
const modelPath = await joinPath([modelFolderPath, model.id])
const isExist = await fs.existsSync(modelPath)
if (isExist) {
await this.migrateModelIfNeeded(model, modelPath)
continue
}
await fs.mkdir(modelPath)
await fs.writeFileSync(
await joinPath([modelPath, 'model.json']),
JSON.stringify(model, null, 2)
)
shouldNotifyModelUpdate = true
}
if (shouldNotifyModelUpdate) {
events.emit(ModelEvent.OnModelsUpdate, {})
}
}
async migrateModelIfNeeded(model: Model, modelPath: string): Promise<void> {
try {
const modelJson = await fs.readFileSync(await joinPath([modelPath, 'model.json']), 'utf-8')
const currentModel: Model = JSON.parse(modelJson)
if (currentModel.version !== model.version) {
await fs.writeFileSync(
await joinPath([modelPath, 'model.json']),
JSON.stringify(model, null, 2)
)
events.emit(ModelEvent.OnModelsUpdate, {})
}
} catch (error) {
console.warn('Error while try to migrating model', error)
}
}
/**
* Loads the model.
*/
@ -65,40 +101,4 @@ export abstract class AIEngine extends BaseExtension {
* Stop inference
*/
stopInference() {}
/**
* Pre-populate models to App Data Folder
*/
prePopulateModels(): Promise<void> {
const modelFolder = 'models'
return this.models().then((models) => {
const prePoluateOperations = models.map((model) =>
getJanDataFolderPath()
.then((janDataFolder) =>
// Attempt to create the model folder
joinPath([janDataFolder, modelFolder, model.id]).then((path) =>
fs
.mkdir(path)
.catch()
.then(() => path)
)
)
.then((path) => joinPath([path, 'model.json']))
.then((path) => {
// Do not overwite existing model.json
return fs.existsSync(path).then((exist: any) => {
if (!exist) return fs.writeFileSync(path, JSON.stringify(model, null, 2))
})
})
.catch((e: Error) => {
console.error('Error', e)
})
)
Promise.all(prePoluateOperations).then(() =>
// Emit event to update models
// So the UI can update the models list
events.emit(ModelEvent.OnModelsUpdate, {})
)
})
}
}

View File

@ -41,7 +41,7 @@ export type Model = {
/**
* The version of the model.
*/
version: number
version: string
/**
* The format of the model.

View File

@ -14,14 +14,12 @@
"renderer/**/*",
"build/**/*.{js,map}",
"pre-install",
"models/**/*",
"docs/**/*",
"scripts/**/*",
"icons/**/*"
],
"asarUnpack": [
"pre-install",
"models",
"docs",
"scripts",
"icons"

View File

@ -18,8 +18,8 @@ export default [
},
plugins: [
replace({
preventAssignment: true,
NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`),
EXTENSION_NAME: JSON.stringify(packageJson.name),
VERSION: JSON.stringify(packageJson.version),
}),
// Allow json resolution

View File

@ -1,3 +1,2 @@
declare const NODE: string
declare const EXTENSION_NAME: string
declare const VERSION: string

View File

@ -21,7 +21,7 @@ export default class JanAssistantExtension extends AssistantExtension {
JanAssistantExtension._homeDir
)
if (
localStorage.getItem(`${EXTENSION_NAME}-version`) !== VERSION ||
localStorage.getItem(`${this.name}-version`) !== VERSION ||
!assistantDirExist
) {
if (!assistantDirExist) await fs.mkdir(JanAssistantExtension._homeDir)
@ -29,7 +29,7 @@ export default class JanAssistantExtension extends AssistantExtension {
// Write assistant metadata
await this.createJanAssistant()
// Finished migration
localStorage.setItem(`${EXTENSION_NAME}-version`, VERSION)
localStorage.setItem(`${this.name}-version`, VERSION)
// Update the assistant list
events.emit(AssistantEvent.OnAssistantsUpdate, {})
}

View File

@ -18,7 +18,7 @@ export default [
},
plugins: [
replace({
EXTENSION_NAME: JSON.stringify(packageJson.name),
preventAssignment: true,
NODE_MODULE_PATH: JSON.stringify(
`${packageJson.name}/${packageJson.node}`
),

View File

@ -1,2 +1 @@
declare const EXTENSION_NAME: string
declare const NODE_MODULE_PATH: string

View File

@ -338,7 +338,7 @@ export default class JanHuggingFaceExtension extends HuggingFaceExtension {
const metadata: Model = {
object: 'model',
version: 1,
version: '1.0',
format: 'gguf',
sources: [
{

View File

@ -0,0 +1,58 @@
[
{
"sources": [
{
"url": "https://groq.com"
}
],
"id": "llama2-70b-4096",
"object": "model",
"name": "Groq Llama 2 70b",
"version": "1.0",
"description": "Groq Llama 2 70b with supercharged speed!",
"format": "api",
"settings": {
"text_model": false
},
"parameters": {
"max_tokens": 4096,
"temperature": 0.7,
"top_p": 1,
"stop": null,
"stream": true
},
"metadata": {
"author": "Meta",
"tags": ["General", "Big Context Length"]
},
"engine": "groq"
},
{
"sources": [
{
"url": "https://groq.com"
}
],
"id": "mixtral-8x7b-32768",
"object": "model",
"name": "Groq Mixtral 8x7b Instruct",
"version": "1.0",
"description": "Groq Mixtral 8x7b Instruct is Mixtral with supercharged speed!",
"format": "api",
"settings": {
"text_model": false
},
"parameters": {
"max_tokens": 4096,
"temperature": 0.7,
"top_p": 1,
"stop": null,
"stream": true
},
"metadata": {
"author": "Mistral",
"tags": ["General", "Big Context Length"]
},
"engine": "groq"
}
]

View File

@ -9,6 +9,8 @@
import { RemoteOAIEngine } from '@janhq/core'
declare const SETTINGS: Array<any>
declare const MODELS: Array<any>
enum Settings {
apiKey = 'groq-api-key',
chatCompletionsEndPoint = 'chat-completions-endpoint',
@ -27,6 +29,7 @@ export default class JanInferenceGroqExtension extends RemoteOAIEngine {
// Register Settings
this.registerSettings(SETTINGS)
this.registerModels(MODELS)
// Retrieve API Key Setting
this.apiKey = await this.getSetting<string>(Settings.apiKey, '')

View File

@ -2,6 +2,7 @@ const path = require('path')
const webpack = require('webpack')
const packageJson = require('./package.json')
const settingJson = require('./resources/settings.json')
const modelsJson = require('./resources/models.json')
module.exports = {
experiments: { outputModule: true },
@ -18,6 +19,7 @@ module.exports = {
},
plugins: [
new webpack.DefinePlugin({
MODELS: JSON.stringify(modelsJson),
SETTINGS: JSON.stringify(settingJson),
MODULE: JSON.stringify(`${packageJson.name}/${packageJson.module}`),
}),

View File

@ -7,6 +7,36 @@ import replace from '@rollup/plugin-replace'
const packageJson = require('./package.json')
const defaultSettingJson = require('./resources/default_settings.json')
const bakllavaJson = require('./resources/models/bakllava-1/model.json')
const codeninja7bJson = require('./resources/models/codeninja-1.0-7b/model.json')
const commandr34bJson = require('./resources/models/command-r-34b/model.json')
const deepseekCoder13bJson = require('./resources/models/deepseek-coder-1.3b/model.json')
const deepseekCoder34bJson = require('./resources/models/deepseek-coder-34b/model.json')
const dolphinPhi2Json = require('./resources/models/dolphin-phi-2/model.json')
const gemma2bJson = require('./resources/models/gemma-2b/model.json')
const gemma7bJson = require('./resources/models/gemma-7b/model.json')
const hermesPro7bJson = require('./resources/models/hermes-pro-7b/model.json')
const llama2Chat70bJson = require('./resources/models/llama2-chat-70b/model.json')
const llama2Chat7bJson = require('./resources/models/llama2-chat-7b/model.json')
const llamacorn1bJson = require('./resources/models/llamacorn-1.1b/model.json')
const llava13bJson = require('./resources/models/llava-13b/model.json')
const llava7bJson = require('./resources/models/llava-7b/model.json')
const miqu70bJson = require('./resources/models/miqu-70b/model.json')
const mistralIns7bq4Json = require('./resources/models/mistral-ins-7b-q4/model.json')
const mixtral8x7bInstructJson = require('./resources/models/mixtral-8x7b-instruct/model.json')
const noromaid7bJson = require('./resources/models/noromaid-7b/model.json')
const openchat357bJson = require('./resources/models/openchat-3.5-7b/model.json')
const openhermesNeural7bJson = require('./resources/models/openhermes-neural-7b/model.json')
const phind34bJson = require('./resources/models/phind-34b/model.json')
const qwen7bJson = require('./resources/models/qwen-7b/model.json')
const stableZephyr3bJson = require('./resources/models/stable-zephyr-3b/model.json')
const stealthv127bJson = require('./resources/models/stealth-v1.2-7b/model.json')
const tinyllama11bJson = require('./resources/models/tinyllama-1.1b/model.json')
const trinityv127bJson = require('./resources/models/trinity-v1.2-7b/model.json')
const vistral7bJson = require('./resources/models/vistral-7b/model.json')
const wizardcoder13bJson = require('./resources/models/wizardcoder-13b/model.json')
const yi34bJson = require('./resources/models/yi-34b/model.json')
export default [
{
input: `src/index.ts`,
@ -18,7 +48,38 @@ export default [
},
plugins: [
replace({
EXTENSION_NAME: JSON.stringify(packageJson.name),
preventAssignment: true,
MODELS: JSON.stringify([
bakllavaJson,
codeninja7bJson,
commandr34bJson,
deepseekCoder13bJson,
deepseekCoder34bJson,
dolphinPhi2Json,
gemma2bJson,
gemma7bJson,
hermesPro7bJson,
llama2Chat70bJson,
llama2Chat7bJson,
llamacorn1bJson,
llava13bJson,
llava7bJson,
miqu70bJson,
mistralIns7bq4Json,
mixtral8x7bInstructJson,
noromaid7bJson,
openchat357bJson,
openhermesNeural7bJson,
phind34bJson,
qwen7bJson,
stableZephyr3bJson,
stealthv127bJson,
tinyllama11bJson,
trinityv127bJson,
vistral7bJson,
wizardcoder13bJson,
yi34bJson,
]),
NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`),
DEFAULT_SETTINGS: JSON.stringify(defaultSettingJson),
INFERENCE_URL: JSON.stringify(

View File

@ -2,8 +2,8 @@ declare const NODE: string
declare const INFERENCE_URL: string
declare const TROUBLESHOOTING_URL: string
declare const JAN_SERVER_INFERENCE_URL: string
declare const EXTENSION_NAME: string
declare const DEFAULT_SETTINGS: Array<any>
declare const MODELS: Array<any>
/**
* The response from the initModel function.

View File

@ -23,10 +23,6 @@ export default class JanInferenceNitroExtension extends LocalOAIEngine {
nodeModule: string = NODE
provider: string = 'nitro'
models(): Promise<Model[]> {
return Promise.resolve([])
}
/**
* Checking the health for Nitro's process each 5 secs.
*/
@ -62,7 +58,8 @@ export default class JanInferenceNitroExtension extends LocalOAIEngine {
() => this.periodicallyGetNitroHealth(),
JanInferenceNitroExtension._intervalHealthCheck
)
const models = MODELS as unknown as Model[]
this.registerModels(models)
super.onLoad()
}

View File

@ -0,0 +1,97 @@
[
{
"sources": [
{
"url": "https://openai.com"
}
],
"id": "gpt-4",
"object": "model",
"name": "OpenAI GPT 4",
"version": "1.0",
"description": "OpenAI GPT 4 model is extremely good",
"format": "api",
"settings": {},
"parameters": {
"max_tokens": 4096,
"temperature": 0.7
},
"metadata": {
"author": "OpenAI",
"tags": ["General", "Big Context Length"]
},
"engine": "openai"
},
{
"sources": [
{
"url": "https://openai.com"
}
],
"id": "gpt-4-vision-preview",
"object": "model",
"name": "OpenAI GPT 4 with Vision (Preview)",
"version": "1.0",
"description": "OpenAI GPT 4 with Vision model is extremely good in preview",
"format": "api",
"settings": {
"vision_model": true,
"textModel": false
},
"parameters": {
"max_tokens": 4096,
"temperature": 0.7
},
"metadata": {
"author": "OpenAI",
"tags": ["General", "Big Context Length", "Vision"]
},
"engine": "openai"
},
{
"sources": [
{
"url": "https://openai.com"
}
],
"id": "gpt-3.5-turbo-16k-0613",
"object": "model",
"name": "OpenAI GPT 3.5 Turbo 16k 0613",
"version": "1.0",
"description": "OpenAI GPT 3.5 Turbo 16k 0613 model is extremely good",
"format": "api",
"settings": {},
"parameters": {
"max_tokens": 4096,
"temperature": 0.7
},
"metadata": {
"author": "OpenAI",
"tags": ["General", "Big Context Length"]
},
"engine": "openai"
},
{
"sources": [
{
"url": "https://openai.com"
}
],
"id": "gpt-3.5-turbo",
"object": "model",
"name": "OpenAI GPT 3.5 Turbo",
"version": "1.0",
"description": "OpenAI GPT 3.5 Turbo model is extremely good",
"format": "api",
"settings": {},
"parameters": {
"max_tokens": 4096,
"temperature": 0.7
},
"metadata": {
"author": "OpenAI",
"tags": ["General", "Big Context Length"]
},
"engine": "openai"
}
]

View File

@ -9,10 +9,13 @@
import { RemoteOAIEngine } from '@janhq/core'
declare const SETTINGS: Array<any>
declare const MODELS: Array<any>
enum Settings {
apiKey = 'openai-api-key',
chatCompletionsEndPoint = 'chat-completions-endpoint',
}
/**
* A class that implements the InferenceExtension interface from the @janhq/core package.
* The class provides methods for initializing and stopping a model, and for making inference requests.
@ -27,6 +30,7 @@ export default class JanInferenceOpenAIExtension extends RemoteOAIEngine {
// Register Settings
this.registerSettings(SETTINGS)
this.registerModels(MODELS)
this.apiKey = await this.getSetting<string>(Settings.apiKey, '')
this.inferenceUrl = await this.getSetting<string>(

View File

@ -2,6 +2,7 @@ const path = require('path')
const webpack = require('webpack')
const packageJson = require('./package.json')
const settingJson = require('./resources/settings.json')
const modelsJson = require('./resources/models.json')
module.exports = {
experiments: { outputModule: true },
@ -18,6 +19,7 @@ module.exports = {
},
plugins: [
new webpack.DefinePlugin({
MODELS: JSON.stringify(modelsJson),
SETTINGS: JSON.stringify(settingJson),
ENGINE: JSON.stringify(packageJson.engine),
}),

View File

@ -1,6 +1,6 @@
{
"object": "model",
"version": 1,
"version": "1.0",
"format": "gguf",
"sources": [
{

View File

@ -5,6 +5,7 @@ import json from '@rollup/plugin-json'
import replace from '@rollup/plugin-replace'
const packageJson = require('./package.json')
const defaultModelJson = require('./resources/default-model.json')
export default [
{
@ -17,7 +18,8 @@ export default [
},
plugins: [
replace({
EXTENSION_NAME: JSON.stringify(packageJson.name),
preventAssignment: true,
DEFAULT_MODEL: JSON.stringify(defaultModelJson),
MODULE_PATH: JSON.stringify(
`${packageJson.name}/${packageJson.module}`
),

View File

@ -1,6 +1,6 @@
export {}
declare global {
declare const EXTENSION_NAME: string
declare const DEFAULT_MODEL: object
declare const MODULE_PATH: string
declare const VERSION: string

View File

@ -2,7 +2,6 @@ import {
fs,
downloadFile,
abortDownload,
getResourcePath,
InferenceEngine,
joinPath,
ModelExtension,
@ -11,7 +10,6 @@ import {
events,
DownloadEvent,
DownloadRoute,
ModelEvent,
DownloadState,
OptionType,
ImportingModel,
@ -36,8 +34,6 @@ export default class JanModelExtension extends ModelExtension {
InferenceEngine.nitro_tensorrt_llm,
]
private static readonly _tensorRtEngineFormat = '.engine'
private static readonly _configDirName = 'config'
private static readonly _defaultModelFileName = 'default-model.json'
private static readonly _supportedGpuArch = ['ampere', 'ada']
/**
@ -45,7 +41,6 @@ export default class JanModelExtension extends ModelExtension {
* @override
*/
async onLoad() {
this.copyModelsToHomeDir()
// Handle Desktop Events
this.handleDesktopEvents()
}
@ -56,37 +51,6 @@ export default class JanModelExtension extends ModelExtension {
*/
onUnload(): void {}
private async copyModelsToHomeDir() {
try {
// Check for migration conditions
if (
localStorage.getItem(`${EXTENSION_NAME}-version`) === VERSION &&
(await fs.existsSync(JanModelExtension._homeDir))
) {
// ignore if the there is no need to migrate
console.debug('Models already persisted.')
return
}
// copy models folder from resources to home directory
const resourePath = await getResourcePath()
const srcPath = await joinPath([resourePath, 'models'])
const janDataFolderPath = await getJanDataFolderPath()
const destPath = await joinPath([janDataFolderPath, 'models'])
await fs.syncFile(srcPath, destPath)
console.debug('Finished syncing models')
// Finished migration
localStorage.setItem(`${EXTENSION_NAME}-version`, VERSION)
events.emit(ModelEvent.OnModelsUpdate, {})
} catch (err) {
console.error(err)
}
}
/**
* Downloads a machine learning model.
* @param model - The model to download.
@ -489,20 +453,9 @@ export default class JanModelExtension extends ModelExtension {
return model
}
private async getDefaultModel(): Promise<Model | undefined> {
const defaultModelPath = await joinPath([
JanModelExtension._homeDir,
JanModelExtension._configDirName,
JanModelExtension._defaultModelFileName,
])
if (!(await fs.existsSync(defaultModelPath))) {
return undefined
}
const model = await this.readModelMetadata(defaultModelPath)
return typeof model === 'object' ? model : JSON.parse(model)
private async getDefaultModel(): Promise<Model> {
const defaultModel = DEFAULT_MODEL as Model
return defaultModel
}
/**

View File

@ -17,6 +17,7 @@ export default [
},
plugins: [
replace({
preventAssignment: true,
NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`),
}),
// Allow json resolution

View File

@ -5,6 +5,7 @@ import typescript from 'rollup-plugin-typescript2'
import json from '@rollup/plugin-json'
import replace from '@rollup/plugin-replace'
const packageJson = require('./package.json')
const modelsJson = require('./resources/models.json')
export default [
{
@ -15,7 +16,8 @@ export default [
},
plugins: [
replace({
EXTENSION_NAME: JSON.stringify(packageJson.name),
preventAssignment: true,
MODELS: JSON.stringify(modelsJson),
TENSORRT_VERSION: JSON.stringify(packageJson.tensorrtVersion),
PROVIDER: JSON.stringify(packageJson.provider),
DOWNLOAD_RUNNER_URL:
@ -53,7 +55,7 @@ export default [
},
plugins: [
replace({
EXTENSION_NAME: JSON.stringify(packageJson.name),
preventAssignment: true,
TENSORRT_VERSION: JSON.stringify(packageJson.tensorrtVersion),
PROVIDER: JSON.stringify(packageJson.provider),
LOAD_MODEL_URL: JSON.stringify(

View File

@ -7,5 +7,5 @@ declare const ENGINE_PORT: string
declare const DOWNLOAD_RUNNER_URL: string
declare const TENSORRT_VERSION: string
declare const COMPATIBILITY: object
declare const EXTENSION_NAME: string
declare const PROVIDER: string
declare const MODELS: Array<any>

View File

@ -23,7 +23,6 @@ import {
ModelEvent,
getJanDataFolderPath,
} from '@janhq/core'
import models from '../models.json'
/**
* TensorRTLLMExtension - Implementation of LocalOAIEngine
@ -46,14 +45,14 @@ export default class TensorRTLLMExtension extends LocalOAIEngine {
override compatibility() {
return COMPATIBILITY as unknown as Compatibility
}
/**
* models implemented by the extension
* define pre-populated models
*/
override async models(): Promise<Model[]> {
if ((await this.installationState()) === 'Installed')
return models as unknown as Model[]
return []
override async onLoad(): Promise<void> {
super.onLoad()
if ((await this.installationState()) === 'Installed') {
const models = MODELS as unknown as Model[]
this.registerModels(models)
}
}
override async install(): Promise<void> {
@ -116,7 +115,7 @@ export default class TensorRTLLMExtension extends LocalOAIEngine {
const downloadRequest: DownloadRequest = {
url,
localPath: tarballFullPath,
extensionId: EXTENSION_NAME,
extensionId: this.name,
downloadType: 'extension',
}
downloadFile(downloadRequest)
@ -134,7 +133,8 @@ export default class TensorRTLLMExtension extends LocalOAIEngine {
events.emit(DownloadEvent.onFileUnzipSuccess, state)
// Prepopulate models as soon as it's ready
this.prePopulateModels().then(() => {
const models = MODELS as unknown as Model[]
this.registerModels(models).then(() => {
showToast(
'Extension installed successfully.',
'New models are added to Model Hub.'
@ -144,7 +144,8 @@ export default class TensorRTLLMExtension extends LocalOAIEngine {
events.on(DownloadEvent.onFileDownloadSuccess, onFileDownloadSuccess)
}
async removePopulatedModels(): Promise<void> {
private async removePopulatedModels(): Promise<void> {
const models = MODELS as unknown as Model[]
console.debug(`removePopulatedModels`, JSON.stringify(models))
const janDataFolderPath = await getJanDataFolderPath()
const modelFolderPath = await joinPath([janDataFolderPath, 'models'])

View File

@ -1,23 +0,0 @@
{
"sources": [
{
"url": "https://openai.com"
}
],
"id": "gpt-3.5-turbo-16k-0613",
"object": "model",
"name": "OpenAI GPT 3.5 Turbo 16k 0613",
"version": "1.0",
"description": "OpenAI GPT 3.5 Turbo 16k 0613 model is extremely good",
"format": "api",
"settings": {},
"parameters": {
"max_tokens": 4096,
"temperature": 0.7
},
"metadata": {
"author": "OpenAI",
"tags": ["General", "Big Context Length"]
},
"engine": "openai"
}

View File

@ -1,23 +0,0 @@
{
"sources": [
{
"url": "https://openai.com"
}
],
"id": "gpt-3.5-turbo",
"object": "model",
"name": "OpenAI GPT 3.5 Turbo",
"version": "1.0",
"description": "OpenAI GPT 3.5 Turbo model is extremely good",
"format": "api",
"settings": {},
"parameters": {
"max_tokens": 4096,
"temperature": 0.7
},
"metadata": {
"author": "OpenAI",
"tags": ["General", "Big Context Length"]
},
"engine": "openai"
}

View File

@ -1,26 +0,0 @@
{
"sources": [
{
"url": "https://openai.com"
}
],
"id": "gpt-4-vision-preview",
"object": "model",
"name": "OpenAI GPT 4 with Vision (Preview)",
"version": "1.0",
"description": "OpenAI GPT 4 with Vision model is extremely good in preview",
"format": "api",
"settings": {
"vision_model": true,
"textModel": false
},
"parameters": {
"max_tokens": 4096,
"temperature": 0.7
},
"metadata": {
"author": "OpenAI",
"tags": ["General", "Big Context Length", "Vision"]
},
"engine": "openai"
}

View File

@ -1,23 +0,0 @@
{
"sources": [
{
"url": "https://openai.com"
}
],
"id": "gpt-4",
"object": "model",
"name": "OpenAI GPT 4",
"version": "1.0",
"description": "OpenAI GPT 4 model is extremely good",
"format": "api",
"settings": {},
"parameters": {
"max_tokens": 4096,
"temperature": 0.7
},
"metadata": {
"author": "OpenAI",
"tags": ["General", "Big Context Length"]
},
"engine": "openai"
}

View File

@ -1,28 +0,0 @@
{
"sources": [
{
"url": "https://groq.com"
}
],
"id": "llama2-70b-4096",
"object": "model",
"name": "Groq Llama 2 70b",
"version": "1.0",
"description": "Groq Llama 2 70b with supercharged speed!",
"format": "api",
"settings": {
"text_model": false
},
"parameters": {
"max_tokens": 4096,
"temperature": 0.7,
"top_p": 1,
"stop": null,
"stream": true
},
"metadata": {
"author": "Meta",
"tags": ["General", "Big Context Length"]
},
"engine": "groq"
}

View File

@ -1,28 +0,0 @@
{
"sources": [
{
"url": "https://groq.com"
}
],
"id": "mixtral-8x7b-32768",
"object": "model",
"name": "Groq Mixtral 8x7b Instruct",
"version": "1.0",
"description": "Groq Mixtral 8x7b Instruct is Mixtral with supercharged speed!",
"format": "api",
"settings": {
"text_model": false
},
"parameters": {
"max_tokens": 4096,
"temperature": 0.7,
"top_p": 1,
"stop": null,
"stream": true
},
"metadata": {
"author": "Mistral",
"tags": ["General", "Big Context Length"]
},
"engine": "groq"
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 229 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 356 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 352 KiB

View File

@ -26,7 +26,7 @@
"pre-install:linux": "find extensions -type f -path \"**/*.tgz\" -exec cp {} pre-install \\;",
"pre-install:win32": "powershell -Command \"Get-ChildItem -Path \"extensions\" -Recurse -File -Filter \"*.tgz\" | ForEach-Object { Copy-Item -Path $_.FullName -Destination \"pre-install\" }\"",
"pre-install": "run-script-os",
"copy:assets": "cpx \"models/**\" \"electron/models/\" && cpx \"pre-install/*.tgz\" \"electron/pre-install/\" && cpx \"docs/openapi/**\" \"electron/docs/openapi\"",
"copy:assets": "cpx \"pre-install/*.tgz\" \"electron/pre-install/\" && cpx \"docs/openapi/**\" \"electron/docs/openapi\"",
"dev:electron": "yarn copy:assets && yarn workspace jan dev",
"dev:web": "yarn workspace @janhq/web dev",
"dev:server": "yarn copy:assets && yarn workspace @janhq/server dev",