Feat: Remote API Parameters Correction (#2802)
* fix: change to gpt4 turbo * add: params * fix: change to gpt 3.5 turbo * delete: redundant * fix: correct description * version bump * add: params * fix: version bump * delete: deprecated * add: params * add: new model * chore: version bump * fix: version correct * add: params * fix: version bump * fix: change to gpt4 turbo * add: params * fix: change to gpt 3.5 turbo * delete: redundant * fix: correct description * version bump * add: params * fix: version bump * delete: deprecated * add: params * add: new model * chore: version bump * fix: version correct * add: params * fix: version bump * fix: llama2 no longer supported * fix: reverse mistral api * fix: add params * fix: mistral api redundant params * fix: typo * fix: typo * fix: correct context length * fix: remove stop --------- Co-authored-by: Van Pham <64197333+Van-QA@users.noreply.github.com>
This commit is contained in:
parent
c21bc08793
commit
092a572684
@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@janhq/inference-groq-extension",
|
||||
"productName": "Groq Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.1",
|
||||
"description": "This extension enables fast Groq chat completion API calls",
|
||||
"main": "dist/index.js",
|
||||
"module": "dist/module.js",
|
||||
|
||||
@ -8,18 +8,18 @@
|
||||
"id": "llama3-70b-8192",
|
||||
"object": "model",
|
||||
"name": "Groq Llama 3 70b",
|
||||
"version": "1.0",
|
||||
"version": "1.1",
|
||||
"description": "Groq Llama 3 70b with supercharged speed!",
|
||||
"format": "api",
|
||||
"settings": {
|
||||
"text_model": false
|
||||
},
|
||||
"settings": {},
|
||||
"parameters": {
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.7,
|
||||
"top_p": 1,
|
||||
"stop": null,
|
||||
"stream": true
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"stop": [],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Meta",
|
||||
@ -36,18 +36,18 @@
|
||||
"id": "llama3-8b-8192",
|
||||
"object": "model",
|
||||
"name": "Groq Llama 3 8b",
|
||||
"version": "1.0",
|
||||
"version": "1.1",
|
||||
"description": "Groq Llama 3 8b with supercharged speed!",
|
||||
"format": "api",
|
||||
"settings": {
|
||||
"text_model": false
|
||||
},
|
||||
"settings": {},
|
||||
"parameters": {
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.7,
|
||||
"top_p": 1,
|
||||
"stop": null,
|
||||
"stream": true
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"stop": [],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Meta",
|
||||
@ -64,18 +64,18 @@
|
||||
"id": "gemma-7b-it",
|
||||
"object": "model",
|
||||
"name": "Groq Gemma 7b Instruct",
|
||||
"version": "1.0",
|
||||
"version": "1.1",
|
||||
"description": "Groq Gemma 7b Instruct with supercharged speed!",
|
||||
"format": "api",
|
||||
"settings": {
|
||||
"text_model": false
|
||||
},
|
||||
"settings": {},
|
||||
"parameters": {
|
||||
"max_tokens": 4096,
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.7,
|
||||
"top_p": 1,
|
||||
"stop": null,
|
||||
"stream": true
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"stop": [],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Google",
|
||||
@ -83,34 +83,6 @@
|
||||
},
|
||||
"engine": "groq"
|
||||
},
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"url": "https://groq.com"
|
||||
}
|
||||
],
|
||||
"id": "llama2-70b-4096",
|
||||
"object": "model",
|
||||
"name": "Groq Llama 2 70b",
|
||||
"version": "1.0",
|
||||
"description": "Groq Llama 2 70b with supercharged speed!",
|
||||
"format": "api",
|
||||
"settings": {
|
||||
"text_model": false
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096,
|
||||
"temperature": 0.7,
|
||||
"top_p": 1,
|
||||
"stop": null,
|
||||
"stream": true
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Meta",
|
||||
"tags": ["General", "Big Context Length"]
|
||||
},
|
||||
"engine": "groq"
|
||||
},
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
@ -120,18 +92,18 @@
|
||||
"id": "mixtral-8x7b-32768",
|
||||
"object": "model",
|
||||
"name": "Groq Mixtral 8x7b Instruct",
|
||||
"version": "1.0",
|
||||
"version": "1.1",
|
||||
"description": "Groq Mixtral 8x7b Instruct is Mixtral with supercharged speed!",
|
||||
"format": "api",
|
||||
"settings": {
|
||||
"text_model": false
|
||||
},
|
||||
"settings": {},
|
||||
"parameters": {
|
||||
"max_tokens": 4096,
|
||||
"max_tokens": 32768,
|
||||
"temperature": 0.7,
|
||||
"top_p": 1,
|
||||
"stop": null,
|
||||
"stream": true
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"stop": [],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Mistral",
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@janhq/inference-mistral-extension",
|
||||
"productName": "MistralAI Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.1",
|
||||
"description": "This extension enables Mistral chat completion API calls",
|
||||
"main": "dist/index.js",
|
||||
"module": "dist/module.js",
|
||||
|
||||
@ -8,48 +8,20 @@
|
||||
"id": "mistral-small-latest",
|
||||
"object": "model",
|
||||
"name": "Mistral Small",
|
||||
"version": "1.0",
|
||||
"description": "Mistral Small is the ideal choice for simpe tasks that one can do in builk - like Classification, Customer Support, or Text Generation. It offers excellent performance at an affordable price point.",
|
||||
"version": "1.1",
|
||||
"description": "Mistral Small is the ideal choice for simple tasks (Classification, Customer Support, or Text Generation) at an affordable price.",
|
||||
"format": "api",
|
||||
"settings": {},
|
||||
"parameters": {
|
||||
"max_tokens": 4096,
|
||||
"temperature": 0.7
|
||||
"max_tokens": 32000,
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Mistral",
|
||||
"tags": [
|
||||
"Classification",
|
||||
"Customer Support",
|
||||
"Text Generation"
|
||||
]
|
||||
},
|
||||
"engine": "mistral"
|
||||
},
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"url": "https://docs.mistral.ai/api/"
|
||||
}
|
||||
],
|
||||
"id": "mistral-medium-latest",
|
||||
"object": "model",
|
||||
"name": "Mistral Medium",
|
||||
"version": "1.0",
|
||||
"description": "Mistral Medium is the ideal for intermediate tasks that require moderate reasoning - like Data extraction, Summarizing a Document, Writing a Job Description, or Writing Product Descriptions. Mistral Medium strikes a balance between performance and capability, making it suitable for a wide range of tasks that only require language transformaion",
|
||||
"format": "api",
|
||||
"settings": {},
|
||||
"parameters": {
|
||||
"max_tokens": 4096,
|
||||
"temperature": 0.7
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Mistral",
|
||||
"tags": [
|
||||
"Data extraction",
|
||||
"Summarizing a Document",
|
||||
"Writing a Job Description",
|
||||
"Writing Product Descriptions"
|
||||
"General"
|
||||
]
|
||||
},
|
||||
"engine": "mistral"
|
||||
@ -63,21 +35,47 @@
|
||||
"id": "mistral-large-latest",
|
||||
"object": "model",
|
||||
"name": "Mistral Large",
|
||||
"version": "1.0",
|
||||
"description": "Mistral Large is ideal for complex tasks that require large reasoning capabilities or are highly specialized - like Synthetic Text Generation, Code Generation, RAG, or Agents.",
|
||||
"version": "1.1",
|
||||
"description": "Mistral Large is ideal for complex tasks (Synthetic Text Generation, Code Generation, RAG, or Agents).",
|
||||
"format": "api",
|
||||
"settings": {},
|
||||
"parameters": {
|
||||
"max_tokens": 4096,
|
||||
"temperature": 0.7
|
||||
"max_tokens": 32000,
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Mistral",
|
||||
"tags": [
|
||||
"Text Generation",
|
||||
"Code Generation",
|
||||
"RAG",
|
||||
"Agents"
|
||||
"General"
|
||||
]
|
||||
},
|
||||
"engine": "mistral"
|
||||
},
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"url": "https://docs.mistral.ai/api/"
|
||||
}
|
||||
],
|
||||
"id": "open-mixtral-8x22b",
|
||||
"object": "model",
|
||||
"name": "Mixtral 8x22B",
|
||||
"version": "1.1",
|
||||
"description": "Mixtral 8x22B is a high-performance, cost-effective model designed for complex tasks.",
|
||||
"format": "api",
|
||||
"settings": {},
|
||||
"parameters": {
|
||||
"max_tokens": 32000,
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true
|
||||
},
|
||||
"metadata": {
|
||||
"author": "Mistral",
|
||||
"tags": [
|
||||
"General"
|
||||
]
|
||||
},
|
||||
"engine": "mistral"
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@janhq/inference-openai-extension",
|
||||
"productName": "OpenAI Inference Engine",
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.1",
|
||||
"description": "This extension enables OpenAI chat completion API calls",
|
||||
"main": "dist/index.js",
|
||||
"module": "dist/module.js",
|
||||
|
||||
@ -5,20 +5,25 @@
|
||||
"url": "https://openai.com"
|
||||
}
|
||||
],
|
||||
"id": "gpt-4",
|
||||
"id": "gpt-4-turbo",
|
||||
"object": "model",
|
||||
"name": "OpenAI GPT 4",
|
||||
"version": "1.0",
|
||||
"version": "1.1",
|
||||
"description": "OpenAI GPT 4 model is extremely good",
|
||||
"format": "api",
|
||||
"settings": {},
|
||||
"parameters": {
|
||||
"max_tokens": 4096,
|
||||
"temperature": 0.7
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"stop": [],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "OpenAI",
|
||||
"tags": ["General", "Big Context Length"]
|
||||
"tags": ["General"]
|
||||
},
|
||||
"engine": "openai"
|
||||
},
|
||||
@ -31,8 +36,8 @@
|
||||
"id": "gpt-4-vision-preview",
|
||||
"object": "model",
|
||||
"name": "OpenAI GPT 4 with Vision (Preview)",
|
||||
"version": "1.0",
|
||||
"description": "OpenAI GPT 4 with Vision model is extremely good in preview",
|
||||
"version": "1.1",
|
||||
"description": "OpenAI GPT-4 Vision model features vision understanding capabilities",
|
||||
"format": "api",
|
||||
"settings": {
|
||||
"vision_model": true,
|
||||
@ -40,34 +45,13 @@
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096,
|
||||
"temperature": 0.7
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true
|
||||
},
|
||||
"metadata": {
|
||||
"author": "OpenAI",
|
||||
"tags": ["General", "Big Context Length", "Vision"]
|
||||
},
|
||||
"engine": "openai"
|
||||
},
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"url": "https://openai.com"
|
||||
}
|
||||
],
|
||||
"id": "gpt-3.5-turbo-16k-0613",
|
||||
"object": "model",
|
||||
"name": "OpenAI GPT 3.5 Turbo 16k 0613",
|
||||
"version": "1.0",
|
||||
"description": "OpenAI GPT 3.5 Turbo 16k 0613 model is extremely good",
|
||||
"format": "api",
|
||||
"settings": {},
|
||||
"parameters": {
|
||||
"max_tokens": 4096,
|
||||
"temperature": 0.7
|
||||
},
|
||||
"metadata": {
|
||||
"author": "OpenAI",
|
||||
"tags": ["General", "Big Context Length"]
|
||||
"tags": ["General", "Vision"]
|
||||
},
|
||||
"engine": "openai"
|
||||
},
|
||||
@ -80,17 +64,22 @@
|
||||
"id": "gpt-3.5-turbo",
|
||||
"object": "model",
|
||||
"name": "OpenAI GPT 3.5 Turbo",
|
||||
"version": "1.0",
|
||||
"description": "OpenAI GPT 3.5 Turbo model is extremely good",
|
||||
"version": "1.1",
|
||||
"description": "OpenAI GPT 3.5 Turbo model is extremely fast",
|
||||
"format": "api",
|
||||
"settings": {},
|
||||
"parameters": {
|
||||
"max_tokens": 4096,
|
||||
"temperature": 0.7
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true,
|
||||
"stop": [],
|
||||
"frequency_penalty": 0,
|
||||
"presence_penalty": 0
|
||||
},
|
||||
"metadata": {
|
||||
"author": "OpenAI",
|
||||
"tags": ["General", "Big Context Length"]
|
||||
"tags": ["General"]
|
||||
},
|
||||
"engine": "openai"
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user