Chore: model hub v0.5.1 update (#3036)

* init model

* init qwen2

* version bump

* refactor: correct icon

* chore: Refactor/issue template feature request (#3037)

* refactor: add issue template form for bug

* refactor: config blank_issues_enabled: false

* refactor: config feature request

* refactor: config feature request

---------

Co-authored-by: Van-QA <van@jan.ai>

* refactor: correct icon

* refactor: allow blank issue

---------

Co-authored-by: Van-QA <van@jan.ai>
Co-authored-by: Van Pham <64197333+Van-QA@users.noreply.github.com>
This commit is contained in:
Hoang Ha 2024-06-13 15:06:07 +07:00 committed by GitHub
parent 34ea178f5b
commit f702506e58
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 45 additions and 6 deletions

View File

@ -1,5 +1,5 @@
## To encourage contributors to use issue templates, we don't allow blank issues
blank_issues_enabled: false
blank_issues_enabled: true
contact_links:
- name: "\u2753 Our GitHub Discussions page"

View File

@ -1,5 +1,5 @@
---
name: "\U+1F56E Documentation request"
name: "📖 Documentation request"
about: Documentation requests
title: 'docs: TITLE'
labels: 'type: documentation'

View File

@ -1,7 +1,7 @@
---
name: Epic request
name: "💥 Epic request"
about: Suggest an idea for this project
title: '\U0001F4A5 epic: [DESCRIPTION]'
title: 'epic: [DESCRIPTION]'
labels: 'type: epic'
assignees: ''

View File

@ -1,7 +1,7 @@
{
"name": "@janhq/inference-cortex-extension",
"productName": "Cortex Inference Engine",
"version": "1.0.11",
"version": "1.0.12",
"description": "This extension embeds cortex.cpp, a lightweight inference engine written in C++. See https://nitro.jan.ai.\nAdditional dependencies could be installed to run without Cuda Toolkit installation.",
"main": "dist/index.js",
"node": "dist/node/index.cjs.js",

View File

@ -0,0 +1,36 @@
{
"sources": [
{
"filename": "Qwen2-7B-Instruct-Q4_K_M.gguf",
"url": "https://huggingface.co/bartowski/Qwen2-7B-Instruct-GGUF/resolve/main/Qwen2-7B-Instruct-Q4_K_M.gguf"
}
],
"id": "qwen2-7b",
"object": "model",
"name": "Qwen 2 Instruct 7B Q4",
"version": "1.0",
"description": "Qwen is optimized at Chinese, ideal for everyday tasks.",
"format": "gguf",
"settings": {
"ctx_len": 32768,
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"llama_model_path": "Qwen2-7B-Instruct-Q4_K_M.gguf",
"ngl": 28
},
"parameters": {
"temperature": 0.7,
"top_p": 0.95,
"stream": true,
"max_tokens": 32768,
"stop": [],
"frequency_penalty": 0,
"presence_penalty": 0
},
"metadata": {
"author": "Alibaba",
"tags": ["7B", "Finetuned"],
"size": 4680000000
},
"engine": "nitro"
}

View File

@ -39,6 +39,8 @@ const aya8bJson = require('./resources/models/aya-23-8b/model.json')
const aya35bJson = require('./resources/models/aya-23-35b/model.json')
const phimediumJson = require('./resources/models/phi3-medium/model.json')
const codestralJson = require('./resources/models/codestral-22b/model.json')
const qwen2Json = require('./resources/models/qwen2-7b/model.json')
export default [
{
@ -84,7 +86,8 @@ export default [
phimediumJson,
aya8bJson,
aya35bJson,
codestralJson
codestralJson,
qwen2Json
]),
NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`),
DEFAULT_SETTINGS: JSON.stringify(defaultSettingJson),