diff --git a/README.md b/README.md index bc5918cb9..035126faa 100644 --- a/README.md +++ b/README.md @@ -76,31 +76,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
jan.exe
Intel
M1/M2
jan.deb
jan.AppImage
diff --git a/core/src/node/api/processors/download.ts b/core/src/node/api/processors/download.ts
index bff6f47f0..4ddeff160 100644
--- a/core/src/node/api/processors/download.ts
+++ b/core/src/node/api/processors/download.ts
@@ -42,6 +42,24 @@ export class Downloader implements Processor {
// Downloading file to a temp file first
const downloadingTempFile = `${destination}.download`
+ // adding initial download state
+ const initialDownloadState: DownloadState = {
+ modelId,
+ fileName,
+ time: {
+ elapsed: 0,
+ remaining: 0,
+ },
+ speed: 0,
+ percent: 0,
+ size: {
+ total: 0,
+ transferred: 0,
+ },
+ downloadState: 'downloading',
+ }
+ DownloadManager.instance.downloadProgressMap[modelId] = initialDownloadState
+
progress(rq, {})
.on('progress', (state: any) => {
const downloadState: DownloadState = {
diff --git a/docs/openapi/jan.yaml b/docs/openapi/jan.yaml
index 76906acf3..633c9abc6 100644
--- a/docs/openapi/jan.yaml
+++ b/docs/openapi/jan.yaml
@@ -93,6 +93,110 @@ paths:
"temperature": 0.7,
"top_p": 0.95
}'
+ - lang: JavaScript
+ source: |-
+ const data = {
+ messages: [
+ {
+ content: 'You are a helpful assistant.',
+ role: 'system'
+ },
+ {
+ content: 'Hello!',
+ role: 'user'
+ }
+ ],
+ model: 'tinyllama-1.1b',
+ stream: true,
+ max_tokens: 2048,
+ stop: ['hello'],
+ frequency_penalty: 0,
+ presence_penalty: 0,
+ temperature: 0.7,
+ top_p: 0.95
+ };
+
+ fetch('http://localhost:1337/v1/chat/completions', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'
+ },
+ body: JSON.stringify(data)
+ })
+ .then(response => response.json())
+ .then(data => console.log(data));
+ - lang: Node.js
+ source: |-
+ const fetch = require('node-fetch');
+
+ const data = {
+ messages: [
+ {
+ content: 'You are a helpful assistant.',
+ role: 'system'
+ },
+ {
+ content: 'Hello!',
+ role: 'user'
+ }
+ ],
+ model: 'tinyllama-1.1b',
+ stream: true,
+ max_tokens: 2048,
+ stop: ['hello'],
+ frequency_penalty: 0,
+ presence_penalty: 0,
+ temperature: 0.7,
+ top_p: 0.95
+ };
+
+ fetch('http://localhost:1337/v1/chat/completions', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'
+ },
+ body: JSON.stringify(data)
+ })
+ .then(response => response.json())
+ .then(data => console.log(data));
+ - lang: Python
+ source: >-
+ import requests
+
+ import json
+
+
+ data = {
+ "messages": [
+ {
+ "content": "You are a helpful assistant.",
+ "role": "system"
+ },
+ {
+ "content": "Hello!",
+ "role": "user"
+ }
+ ],
+ "model": "tinyllama-1.1b",
+ "stream": true,
+ "max_tokens": 2048,
+ "stop": [
+ "hello"
+ ],
+ "frequency_penalty": 0,
+ "presence_penalty": 0,
+ "temperature": 0.7,
+ "top_p": 0.95
+ }
+
+
+ response =
+ requests.post('http://localhost:1337/v1/chat/completions',
+ json=data)
+
+ print(response.json())
/models:
get:
operationId: listModels
@@ -113,10 +217,38 @@ paths:
$ref: specs/models.yaml#/components/schemas/ListModelsResponse
x-codeSamples:
- lang: cURL
- source: |
+ source: |-
curl -X 'GET' \
'http://localhost:1337/v1/models' \
-H 'accept: application/json'
+ - lang: JavaScript
+ source: |-
+ const response = await fetch('http://localhost:1337/v1/models', {
+ method: 'GET',
+ headers: {Accept: 'application/json'}
+ });
+ const data = await response.json();
+ - lang: Python
+ source: |-
+ import requests
+
+ url = 'http://localhost:1337/v1/models'
+ headers = {'Accept': 'application/json'}
+ response = requests.get(url, headers=headers)
+ data = response.json()
+ - lang: Node.js
+ source: |-
+ const fetch = require('node-fetch');
+
+ const url = 'http://localhost:1337/v1/models';
+ const options = {
+ method: 'GET',
+ headers: { Accept: 'application/json' }
+ };
+
+ fetch(url, options)
+ .then(res => res.json())
+ .then(json => console.log(json));
/models/download/{model_id}:
get:
operationId: downloadModel
@@ -143,10 +275,40 @@ paths:
$ref: specs/models.yaml#/components/schemas/DownloadModelResponse
x-codeSamples:
- lang: cURL
- source: |
- curl -X 'GET' \
- 'http://localhost:1337/v1/models/download/{model_id}' \
+ source: |-
+ curl -X 'GET' \
+ 'http://localhost:1337/v1/models/download/{model_id}' \
-H 'accept: application/json'
+ - lang: JavaScript
+ source: >-
+ const response = await
+ fetch('http://localhost:1337/v1/models/download/{model_id}', {
+ method: 'GET',
+ headers: {accept: 'application/json'}
+ });
+
+
+ const data = await response.json();
+ - lang: Node.js
+ source: |-
+ const fetch = require('node-fetch');
+
+ fetch('http://localhost:1337/v1/models/download/{model_id}', {
+ method: 'GET',
+ headers: {accept: 'application/json'}
+ })
+ .then(res => res.json())
+ .then(data => console.log(data));
+ - lang: Python
+ source: >-
+ import requests
+
+
+ response =
+ requests.get('http://localhost:1337/v1/models/download/{model_id}',
+ headers={'accept': 'application/json'})
+
+ data = response.json()
/models/{model_id}:
get:
operationId: retrieveModel
@@ -176,10 +338,47 @@ paths:
$ref: specs/models.yaml#/components/schemas/GetModelResponse
x-codeSamples:
- lang: cURL
- source: |
- curl -X 'GET' \
- 'http://localhost:1337/v1/models/{model_id}' \
+ source: |-
+ curl -X 'GET' \
+ 'http://localhost:1337/v1/models/{model_id}' \
-H 'accept: application/json'
+ - lang: JavaScript
+ source: |-
+ const fetch = require('node-fetch');
+
+ const modelId = 'mistral-ins-7b-q4';
+
+ fetch(`http://localhost:1337/v1/models/${modelId}`, {
+ method: 'GET',
+ headers: {'accept': 'application/json'}
+ })
+ .then(res => res.json())
+ .then(json => console.log(json));
+ - lang: Node.js
+ source: |-
+ const fetch = require('node-fetch');
+
+ const modelId = 'mistral-ins-7b-q4';
+
+ fetch(`http://localhost:1337/v1/models/${modelId}`, {
+ method: 'GET',
+ headers: {'accept': 'application/json'}
+ })
+ .then(res => res.json())
+ .then(json => console.log(json));
+ - lang: Python
+ source: >-
+ import requests
+
+
+ model_id = 'mistral-ins-7b-q4'
+
+
+ response =
+ requests.get(f'http://localhost:1337/v1/models/{model_id}',
+ headers={'accept': 'application/json'})
+
+ print(response.json())
delete:
operationId: deleteModel
tags:
@@ -207,10 +406,45 @@ paths:
$ref: specs/models.yaml#/components/schemas/DeleteModelResponse
x-codeSamples:
- lang: cURL
- source: |
+ source: |-
curl -X 'DELETE' \
'http://localhost:1337/v1/models/{model_id}' \
-H 'accept: application/json'
+ - lang: JavaScript
+ source: |-
+ const fetch = require('node-fetch');
+
+ const modelId = 'mistral-ins-7b-q4';
+
+ fetch(`http://localhost:1337/v1/models/${modelId}`, {
+ method: 'DELETE',
+ headers: { 'accept': 'application/json' }
+ })
+ .then(res => res.json())
+ .then(json => console.log(json));
+ - lang: Node.js
+ source: |-
+ const fetch = require('node-fetch');
+
+ const modelId = 'mistral-ins-7b-q4';
+
+ fetch(`http://localhost:1337/v1/models/${modelId}`, {
+ method: 'DELETE',
+ headers: { 'accept': 'application/json' }
+ })
+ .then(res => res.json())
+ .then(json => console.log(json));
+ - lang: Python
+ source: >-
+ import requests
+
+
+ model_id = 'mistral-ins-7b-q4'
+
+
+ response =
+ requests.delete(f'http://localhost:1337/v1/models/{model_id}',
+ headers={'accept': 'application/json'})
/threads:
post:
operationId: createThread
@@ -237,8 +471,8 @@ paths:
x-codeSamples:
- lang: cURL
source: |
- curl -X POST http://localhost:1337/v1/threads \
- -H "Content-Type: application/json" \
+ curl -X POST http://localhost:1337/v1/threads \
+ -H "Content-Type: application/json" \
-d '{
"messages": [{
"role": "user",
@@ -249,6 +483,73 @@ paths:
"content": "How does AI work? Explain it in simple terms."
}]
}'
+ - lang: JavaScript
+ source: |-
+ const fetch = require('node-fetch');
+
+ fetch('http://localhost:1337/v1/threads', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({
+ messages: [
+ {
+ role: 'user',
+ content: 'Hello, what is AI?',
+ file_ids: ['file-abc123']
+ },
+ {
+ role: 'user',
+ content: 'How does AI work? Explain it in simple terms.'
+ }
+ ]
+ })
+ });
+ - lang: Node.js
+ source: |-
+ const fetch = require('node-fetch');
+
+ fetch('http://localhost:1337/v1/threads', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({
+ messages: [
+ {
+ role: 'user',
+ content: 'Hello, what is AI?',
+ file_ids: ['file-abc123']
+ },
+ {
+ role: 'user',
+ content: 'How does AI work? Explain it in simple terms.'
+ }
+ ]
+ })
+ });
+ - lang: Python
+ source: |-
+ import requests
+
+ url = 'http://localhost:1337/v1/threads'
+ payload = {
+ 'messages': [
+ {
+ 'role': 'user',
+ 'content': 'Hello, what is AI?',
+ 'file_ids': ['file-abc123']
+ },
+ {
+ 'role': 'user',
+ 'content': 'How does AI work? Explain it in simple terms.'
+ }
+ ]
+ }
+
+ response = requests.post(url, json=payload)
+ print(response.text)
get:
operationId: listThreads
tags:
@@ -282,9 +583,36 @@ paths:
metadata: {}
x-codeSamples:
- lang: cURL
- source: |
- curl http://localhost:1337/v1/threads \
- -H "Content-Type: application/json" \
+ source: |-
+ curl http://localhost:1337/v1/threads \
+ -H "Content-Type: application/json"
+ - lang: JavaScript
+ source: |-
+ const fetch = require('node-fetch');
+
+ fetch('http://localhost:1337/v1/threads', {
+ method: 'GET',
+ headers: {'Content-Type': 'application/json'}
+ }).then(res => res.json())
+ .then(json => console.log(json));
+ - lang: Node.js
+ source: |-
+ const fetch = require('node-fetch');
+
+ fetch('http://localhost:1337/v1/threads', {
+ method: 'GET',
+ headers: {'Content-Type': 'application/json'}
+ }).then(res => res.json())
+ .then(json => console.log(json));
+ - lang: Python
+ source: |-
+ import requests
+
+ url = 'http://localhost:1337/v1/threads'
+ headers = {'Content-Type': 'application/json'}
+
+ response = requests.get(url, headers=headers)
+ print(response.json())
/threads/{thread_id}:
get:
operationId: getThread
diff --git a/uikit/src/main.scss b/uikit/src/main.scss
index f3294e12e..e31b53c68 100644
--- a/uikit/src/main.scss
+++ b/uikit/src/main.scss
@@ -42,69 +42,10 @@
--danger: 346.8 77.2% 49.8%;
--danger-foreground: 355.7 100% 97.3%;
+ --secondary: 60 4.8% 95.9%;
+ --secondary-foreground: 24 9.8% 10%;
+
--border: 20 5.9% 90%;
--input: 20 5.9% 90%;
--ring: 20 14.3% 4.1%;
-
- .primary-blue {
- --primary: 221 83% 53%;
- --primary-foreground: 210 40% 98%;
-
- --secondary: 60 4.8% 95.9%;
- --secondary-foreground: 24 9.8% 10%;
- }
-
- .primary-green {
- --primary: 142.1 76.2% 36.3%;
- --primary-foreground: 355.7 100% 97.3%;
-
- --secondary: 240 4.8% 95.9%;
- --secondary-foreground: 240 5.9% 10%;
- }
-
- .primary-purple {
- --primary: 262.1 83.3% 57.8%;
- --primary-foreground: 210 20% 98%;
-
- --secondary: 220 14.3% 95.9%;
- --secondary-foreground: 220.9 39.3% 11%;
- }
-}
-
-.dark {
- --background: 20 14.3% 4.1%;
- --foreground: 60 9.1% 97.8%;
-
- --muted: 12 6.5% 15.1%;
- --muted-foreground: 24 5.4% 63.9%;
-
- --danger: 346.8 77.2% 49.8%;
- --danger-foreground: 355.7 100% 97.3%;
-
- --border: 12 6.5% 15.1%;
- --input: 12 6.5% 15.1%;
- --ring: 35.5 91.7% 32.9%;
-
- .primary-blue {
- --primary: 221 83% 53%;
- --primary-foreground: 222.2 47.4% 11.2%;
-
- --secondary: 12 6.5% 15.1%;
- --secondary-foreground: 60 9.1% 97.8%;
- }
-
- .primary-green {
- --primary: 142.1 70.6% 45.3%;
- --primary-foreground: 144.9 80.4% 10%;
- --secondary: 240 3.7% 15.9%;
- --secondary-foreground: 0 0% 98%;
- }
-
- .primary-purple {
- --primary: 263.4 70% 50.4%;
- --primary-foreground: 210 20% 98%;
-
- --secondary: 215 27.9% 16.9%;
- --secondary-foreground: 210 20% 98%;
- }
}