From d7bf98b68aed9e1c07f54c9015339373e01e93ed Mon Sep 17 00:00:00 2001 From: avb-is-me <104213687+avb-is-me@users.noreply.github.com> Date: Tue, 27 Feb 2024 00:43:57 +0000 Subject: [PATCH 1/9] adds code samples for list models --- docs/openapi/jan.yaml | 119 ++++++++++++++++++++++++++---------------- 1 file changed, 74 insertions(+), 45 deletions(-) diff --git a/docs/openapi/jan.yaml b/docs/openapi/jan.yaml index 864c80fdf..e58c6a4f2 100644 --- a/docs/openapi/jan.yaml +++ b/docs/openapi/jan.yaml @@ -1,11 +1,11 @@ ---- openapi: 3.0.0 info: title: API Reference description: > # Introduction - Jan API is compatible with the [OpenAI API](https://platform.openai.com/docs/api-reference). + Jan API is compatible with the [OpenAI + API](https://platform.openai.com/docs/api-reference). version: 0.1.8 contact: name: Jan Discord @@ -20,12 +20,12 @@ tags: description: List and describe the various models available in the API. - name: Chat description: > - Given a list of messages comprising a conversation, the model will - return a response. + Given a list of messages comprising a conversation, the model will return + a response. - name: Messages description: > - Messages capture a conversation's content. This can include the - content from LLM responses and other metadata from [chat + Messages capture a conversation's content. This can include the content + from LLM responses and other metadata from [chat completions](/specs/chats). - name: Threads - name: Assistants @@ -49,16 +49,16 @@ paths: summary: | Create chat completion description: > - Creates a model response for the given chat conversation. - Equivalent to OpenAI's create chat completion. + Creates a model response for the given chat conversation. Equivalent + to OpenAI's create chat completion. requestBody: content: application/json: schema: $ref: specs/chat.yaml#/components/schemas/ChatCompletionRequest responses: - "200": + '200': description: OK content: application/json: @@ -100,12 +100,12 @@ paths: - Models summary: List models description: > - Lists the currently available models, and provides basic - information about each one such as the owner and availability. - Equivalent to OpenAI's list model. + Lists the currently available models, and provides basic information + about each one such as the owner and availability. Equivalent + to OpenAI's list model. responses: - "200": + '200': description: OK content: application/json: @@ -113,11 +113,39 @@ paths: $ref: specs/models.yaml#/components/schemas/ListModelsResponse x-codeSamples: - lang: cURL - source: | + source: |- curl -X 'GET' \ 'http://localhost:1337/v1/models' \ -H 'accept: application/json' - "/models/download/{model_id}": + - lang: JavaScript + source: |- + const response = await fetch('http://localhost:1337/v1/models', { + method: 'GET', + headers: {Accept: 'application/json'} + }); + const data = await response.json(); + - lang: Python + source: |- + import requests + + url = 'http://localhost:1337/v1/models' + headers = {'Accept': 'application/json'} + response = requests.get(url, headers=headers) + data = response.json() + - lang: Node.js + source: |- + const fetch = require('node-fetch'); + + const url = 'http://localhost:1337/v1/models'; + const options = { + method: 'GET', + headers: { Accept: 'application/json' } + }; + + fetch(url, options) + .then(res => res.json()) + .then(json => console.log(json)); + /models/download/{model_id}: get: operationId: downloadModel tags: @@ -135,7 +163,7 @@ paths: description: | The ID of the model to use for this request. responses: - "200": + '200': description: OK content: application/json: @@ -147,15 +175,15 @@ paths: curl -X 'GET' \ 'http://localhost:1337/v1/models/download/{model_id}' \ -H 'accept: application/json' - "/models/{model_id}": + /models/{model_id}: get: operationId: retrieveModel tags: - Models summary: Retrieve model description: > - Get a model instance, providing basic information about the model - such as the owner and permissioning. Equivalent to OpenAI's retrieve model. parameters: @@ -168,7 +196,7 @@ paths: description: | The ID of the model to use for this request. responses: - "200": + '200': description: OK content: application/json: @@ -199,7 +227,7 @@ paths: description: | The model id to delete responses: - "200": + '200': description: OK content: application/json: @@ -228,7 +256,7 @@ paths: schema: $ref: specs/threads.yaml#/components/schemas/CreateThreadObject responses: - "200": + '200': description: Thread created successfully content: application/json: @@ -257,7 +285,7 @@ paths: description: | Retrieves a list of all threads available in the system. responses: - "200": + '200': description: List of threads retrieved successfully content: application/json: @@ -285,7 +313,7 @@ paths: source: | curl http://localhost:1337/v1/threads \ -H "Content-Type: application/json" \ - "/threads/{thread_id}": + /threads/{thread_id}: get: operationId: getThread tags: @@ -305,7 +333,7 @@ paths: description: | The ID of the thread to retrieve. responses: - "200": + '200': description: Thread details retrieved successfully content: application/json: @@ -345,7 +373,7 @@ paths: items: $ref: specs/threads.yaml#/components/schemas/ThreadMessageObject responses: - "200": + '200': description: Thread modified successfully content: application/json: @@ -384,7 +412,7 @@ paths: description: | The ID of the thread to be deleted. responses: - "200": + '200': description: Thread deleted successfully content: application/json: @@ -405,7 +433,7 @@ paths: "https://platform.openai.com/docs/api-reference/assistants/listAssistants"> Equivalent to OpenAI's list assistants. responses: - "200": + '200': description: List of assistants retrieved successfully content: application/json: @@ -445,7 +473,7 @@ paths: source: | curl http://localhost:1337/v1/assistants \ -H "Content-Type: application/json" \ - "/assistants/{assistant_id}": + /assistants/{assistant_id}: get: operationId: getAssistant tags: @@ -465,18 +493,19 @@ paths: description: | The ID of the assistant to retrieve. responses: - "200": + '200': description: null content: application/json: schema: - $ref: specs/assistants.yaml#/components/schemas/RetrieveAssistantResponse + $ref: >- + specs/assistants.yaml#/components/schemas/RetrieveAssistantResponse x-codeSamples: - lang: cURL source: | curl http://localhost:1337/v1/assistants/{assistant_id} \ -H "Content-Type: application/json" \ - "/threads/{thread_id}/messages": + /threads/{thread_id}/messages: get: operationId: listMessages tags: @@ -495,7 +524,7 @@ paths: description: | The ID of the thread from which to retrieve messages. responses: - "200": + '200': description: List of messages retrieved successfully content: application/json: @@ -547,7 +576,7 @@ paths: - role - content responses: - "200": + '200': description: Message created successfully content: application/json: @@ -562,7 +591,7 @@ paths: "role": "user", "content": "How does AI work? Explain it in simple terms." }' - "/threads/{thread_id}/messages/{message_id}": + /threads/{thread_id}/messages/{message_id}: get: operationId: retrieveMessage tags: @@ -589,7 +618,7 @@ paths: description: | The ID of the message to retrieve. responses: - "200": + '200': description: OK content: application/json: @@ -598,8 +627,8 @@ paths: x-codeSamples: - lang: cURL source: > - curl http://localhost:1337/v1/threads/{thread_id}/messages/{message_id} - \ + curl + http://localhost:1337/v1/threads/{thread_id}/messages/{message_id} \ -H "Content-Type: application/json" x-webhooks: ModelObject: @@ -621,10 +650,9 @@ x-webhooks: post: summary: The assistant object description: > - Build assistants that can call models and use tools to perform - tasks. Equivalent - to OpenAI's assistants object. + Build assistants that can call models and use tools to perform tasks. + + Equivalent to OpenAI's assistants object. operationId: AssistantObjects tags: - Assistants @@ -651,7 +679,8 @@ x-webhooks: ThreadObject: post: summary: The thread object - description: Represents a thread that contains messages. - + Represents a thread that contains messages. Equivalent to OpenAI's thread object. operationId: ThreadObject From 16357178bf24b7a57166600d41ee6c2f6e6fc549 Mon Sep 17 00:00:00 2001 From: avb-is-me <104213687+avb-is-me@users.noreply.github.com> Date: Tue, 27 Feb 2024 07:50:15 +0000 Subject: [PATCH 2/9] add docs for the model/model_id endpoints --- docs/openapi/jan.yaml | 110 +++++++++++++++++++++++++++++------------- 1 file changed, 77 insertions(+), 33 deletions(-) diff --git a/docs/openapi/jan.yaml b/docs/openapi/jan.yaml index e58c6a4f2..e3f1ff3b1 100644 --- a/docs/openapi/jan.yaml +++ b/docs/openapi/jan.yaml @@ -113,38 +113,10 @@ paths: $ref: specs/models.yaml#/components/schemas/ListModelsResponse x-codeSamples: - lang: cURL - source: |- + source: | curl -X 'GET' \ 'http://localhost:1337/v1/models' \ -H 'accept: application/json' - - lang: JavaScript - source: |- - const response = await fetch('http://localhost:1337/v1/models', { - method: 'GET', - headers: {Accept: 'application/json'} - }); - const data = await response.json(); - - lang: Python - source: |- - import requests - - url = 'http://localhost:1337/v1/models' - headers = {'Accept': 'application/json'} - response = requests.get(url, headers=headers) - data = response.json() - - lang: Node.js - source: |- - const fetch = require('node-fetch'); - - const url = 'http://localhost:1337/v1/models'; - const options = { - method: 'GET', - headers: { Accept: 'application/json' } - }; - - fetch(url, options) - .then(res => res.json()) - .then(json => console.log(json)); /models/download/{model_id}: get: operationId: downloadModel @@ -204,10 +176,47 @@ paths: $ref: specs/models.yaml#/components/schemas/GetModelResponse x-codeSamples: - lang: cURL - source: | - curl -X 'GET' \ - 'http://localhost:1337/v1/models/{model_id}' \ + source: |- + curl -X 'GET' \ + 'http://localhost:1337/v1/models/{model_id}' \ -H 'accept: application/json' + - lang: JavaScript + source: |- + const fetch = require('node-fetch'); + + const modelId = 'mistral-ins-7b-q4'; + + fetch(`http://localhost:1337/v1/models/${modelId}`, { + method: 'GET', + headers: {'accept': 'application/json'} + }) + .then(res => res.json()) + .then(json => console.log(json)); + - lang: Node.js + source: |- + const fetch = require('node-fetch'); + + const modelId = 'mistral-ins-7b-q4'; + + fetch(`http://localhost:1337/v1/models/${modelId}`, { + method: 'GET', + headers: {'accept': 'application/json'} + }) + .then(res => res.json()) + .then(json => console.log(json)); + - lang: Python + source: >- + import requests + + + model_id = 'mistral-ins-7b-q4' + + + response = + requests.get(f'http://localhost:1337/v1/models/{model_id}', + headers={'accept': 'application/json'}) + + print(response.json()) delete: operationId: deleteModel tags: @@ -235,10 +244,45 @@ paths: $ref: specs/models.yaml#/components/schemas/DeleteModelResponse x-codeSamples: - lang: cURL - source: | + source: |- curl -X 'DELETE' \ 'http://localhost:1337/v1/models/{model_id}' \ -H 'accept: application/json' + - lang: JavaScript + source: |- + const fetch = require('node-fetch'); + + const modelId = 'mistral-ins-7b-q4'; + + fetch(`http://localhost:1337/v1/models/${modelId}`, { + method: 'DELETE', + headers: { 'accept': 'application/json' } + }) + .then(res => res.json()) + .then(json => console.log(json)); + - lang: Node.js + source: |- + const fetch = require('node-fetch'); + + const modelId = 'mistral-ins-7b-q4'; + + fetch(`http://localhost:1337/v1/models/${modelId}`, { + method: 'DELETE', + headers: { 'accept': 'application/json' } + }) + .then(res => res.json()) + .then(json => console.log(json)); + - lang: Python + source: >- + import requests + + + model_id = 'mistral-ins-7b-q4' + + + response = + requests.delete(f'http://localhost:1337/v1/models/{model_id}', + headers={'accept': 'application/json'}) /threads: post: operationId: createThread From a6dbcf3a1bcf19ed5a2c7b8fb15fcc19bd02d187 Mon Sep 17 00:00:00 2001 From: avb-is-me <104213687+avb-is-me@users.noreply.github.com> Date: Tue, 27 Feb 2024 07:58:22 +0000 Subject: [PATCH 3/9] adds code snippets for download --- docs/openapi/jan.yaml | 66 ++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 32 deletions(-) diff --git a/docs/openapi/jan.yaml b/docs/openapi/jan.yaml index e58c6a4f2..3c2abcc0f 100644 --- a/docs/openapi/jan.yaml +++ b/docs/openapi/jan.yaml @@ -113,38 +113,10 @@ paths: $ref: specs/models.yaml#/components/schemas/ListModelsResponse x-codeSamples: - lang: cURL - source: |- + source: | curl -X 'GET' \ 'http://localhost:1337/v1/models' \ -H 'accept: application/json' - - lang: JavaScript - source: |- - const response = await fetch('http://localhost:1337/v1/models', { - method: 'GET', - headers: {Accept: 'application/json'} - }); - const data = await response.json(); - - lang: Python - source: |- - import requests - - url = 'http://localhost:1337/v1/models' - headers = {'Accept': 'application/json'} - response = requests.get(url, headers=headers) - data = response.json() - - lang: Node.js - source: |- - const fetch = require('node-fetch'); - - const url = 'http://localhost:1337/v1/models'; - const options = { - method: 'GET', - headers: { Accept: 'application/json' } - }; - - fetch(url, options) - .then(res => res.json()) - .then(json => console.log(json)); /models/download/{model_id}: get: operationId: downloadModel @@ -171,10 +143,40 @@ paths: $ref: specs/models.yaml#/components/schemas/DownloadModelResponse x-codeSamples: - lang: cURL - source: | - curl -X 'GET' \ - 'http://localhost:1337/v1/models/download/{model_id}' \ + source: |- + curl -X 'GET' \ + 'http://localhost:1337/v1/models/download/{model_id}' \ -H 'accept: application/json' + - lang: JavaScript + source: >- + const response = await + fetch('http://localhost:1337/v1/models/download/{model_id}', { + method: 'GET', + headers: {accept: 'application/json'} + }); + + + const data = await response.json(); + - lang: Node.js + source: |- + const fetch = require('node-fetch'); + + fetch('http://localhost:1337/v1/models/download/{model_id}', { + method: 'GET', + headers: {accept: 'application/json'} + }) + .then(res => res.json()) + .then(data => console.log(data)); + - lang: Python + source: >- + import requests + + + response = + requests.get('http://localhost:1337/v1/models/download/{model_id}', + headers={'accept': 'application/json'}) + + data = response.json() /models/{model_id}: get: operationId: retrieveModel From 90a11ea4c4867e6c83afea94ee2f46777f7c8103 Mon Sep 17 00:00:00 2001 From: avb-is-me <104213687+avb-is-me@users.noreply.github.com> Date: Tue, 27 Feb 2024 08:02:23 +0000 Subject: [PATCH 4/9] adds code snippets for chat completeions --- docs/openapi/jan.yaml | 134 +++++++++++++++++++++++++++++++++--------- 1 file changed, 105 insertions(+), 29 deletions(-) diff --git a/docs/openapi/jan.yaml b/docs/openapi/jan.yaml index e58c6a4f2..cf5db102a 100644 --- a/docs/openapi/jan.yaml +++ b/docs/openapi/jan.yaml @@ -93,6 +93,110 @@ paths: "temperature": 0.7, "top_p": 0.95 }' + - lang: JavaScript + source: |- + const data = { + messages: [ + { + content: 'You are a helpful assistant.', + role: 'system' + }, + { + content: 'Hello!', + role: 'user' + } + ], + model: 'tinyllama-1.1b', + stream: true, + max_tokens: 2048, + stop: ['hello'], + frequency_penalty: 0, + presence_penalty: 0, + temperature: 0.7, + top_p: 0.95 + }; + + fetch('http://localhost:1337/v1/chat/completions', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Accept': 'application/json' + }, + body: JSON.stringify(data) + }) + .then(response => response.json()) + .then(data => console.log(data)); + - lang: Node.js + source: |- + const fetch = require('node-fetch'); + + const data = { + messages: [ + { + content: 'You are a helpful assistant.', + role: 'system' + }, + { + content: 'Hello!', + role: 'user' + } + ], + model: 'tinyllama-1.1b', + stream: true, + max_tokens: 2048, + stop: ['hello'], + frequency_penalty: 0, + presence_penalty: 0, + temperature: 0.7, + top_p: 0.95 + }; + + fetch('http://localhost:1337/v1/chat/completions', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Accept': 'application/json' + }, + body: JSON.stringify(data) + }) + .then(response => response.json()) + .then(data => console.log(data)); + - lang: Python + source: >- + import requests + + import json + + + data = { + "messages": [ + { + "content": "You are a helpful assistant.", + "role": "system" + }, + { + "content": "Hello!", + "role": "user" + } + ], + "model": "tinyllama-1.1b", + "stream": true, + "max_tokens": 2048, + "stop": [ + "hello" + ], + "frequency_penalty": 0, + "presence_penalty": 0, + "temperature": 0.7, + "top_p": 0.95 + } + + + response = + requests.post('http://localhost:1337/v1/chat/completions', + json=data) + + print(response.json()) /models: get: operationId: listModels @@ -113,38 +217,10 @@ paths: $ref: specs/models.yaml#/components/schemas/ListModelsResponse x-codeSamples: - lang: cURL - source: |- + source: | curl -X 'GET' \ 'http://localhost:1337/v1/models' \ -H 'accept: application/json' - - lang: JavaScript - source: |- - const response = await fetch('http://localhost:1337/v1/models', { - method: 'GET', - headers: {Accept: 'application/json'} - }); - const data = await response.json(); - - lang: Python - source: |- - import requests - - url = 'http://localhost:1337/v1/models' - headers = {'Accept': 'application/json'} - response = requests.get(url, headers=headers) - data = response.json() - - lang: Node.js - source: |- - const fetch = require('node-fetch'); - - const url = 'http://localhost:1337/v1/models'; - const options = { - method: 'GET', - headers: { Accept: 'application/json' } - }; - - fetch(url, options) - .then(res => res.json()) - .then(json => console.log(json)); /models/download/{model_id}: get: operationId: downloadModel From 0309d74ccd1f2e1fae1c8491823d904f1b64ea53 Mon Sep 17 00:00:00 2001 From: avb-is-me <104213687+avb-is-me@users.noreply.github.com> Date: Tue, 27 Feb 2024 22:38:22 +0000 Subject: [PATCH 5/9] adds code snippets for threads --- docs/openapi/jan.yaml | 193 +++++++++++++++++++++++++++++++----------- 1 file changed, 144 insertions(+), 49 deletions(-) diff --git a/docs/openapi/jan.yaml b/docs/openapi/jan.yaml index 864c80fdf..ed80faf4f 100644 --- a/docs/openapi/jan.yaml +++ b/docs/openapi/jan.yaml @@ -1,11 +1,11 @@ ---- openapi: 3.0.0 info: title: API Reference description: > # Introduction - Jan API is compatible with the [OpenAI API](https://platform.openai.com/docs/api-reference). + Jan API is compatible with the [OpenAI + API](https://platform.openai.com/docs/api-reference). version: 0.1.8 contact: name: Jan Discord @@ -20,12 +20,12 @@ tags: description: List and describe the various models available in the API. - name: Chat description: > - Given a list of messages comprising a conversation, the model will - return a response. + Given a list of messages comprising a conversation, the model will return + a response. - name: Messages description: > - Messages capture a conversation's content. This can include the - content from LLM responses and other metadata from [chat + Messages capture a conversation's content. This can include the content + from LLM responses and other metadata from [chat completions](/specs/chats). - name: Threads - name: Assistants @@ -49,16 +49,16 @@ paths: summary: | Create chat completion description: > - Creates a model response for the given chat conversation. - Equivalent to OpenAI's create chat completion. + Creates a model response for the given chat conversation. Equivalent + to OpenAI's create chat completion. requestBody: content: application/json: schema: $ref: specs/chat.yaml#/components/schemas/ChatCompletionRequest responses: - "200": + '200': description: OK content: application/json: @@ -100,12 +100,12 @@ paths: - Models summary: List models description: > - Lists the currently available models, and provides basic - information about each one such as the owner and availability. - Equivalent to OpenAI's list model. + Lists the currently available models, and provides basic information + about each one such as the owner and availability. Equivalent + to OpenAI's list model. responses: - "200": + '200': description: OK content: application/json: @@ -117,7 +117,7 @@ paths: curl -X 'GET' \ 'http://localhost:1337/v1/models' \ -H 'accept: application/json' - "/models/download/{model_id}": + /models/download/{model_id}: get: operationId: downloadModel tags: @@ -135,7 +135,7 @@ paths: description: | The ID of the model to use for this request. responses: - "200": + '200': description: OK content: application/json: @@ -147,15 +147,15 @@ paths: curl -X 'GET' \ 'http://localhost:1337/v1/models/download/{model_id}' \ -H 'accept: application/json' - "/models/{model_id}": + /models/{model_id}: get: operationId: retrieveModel tags: - Models summary: Retrieve model description: > - Get a model instance, providing basic information about the model - such as the owner and permissioning. Equivalent to OpenAI's retrieve model. parameters: @@ -168,7 +168,7 @@ paths: description: | The ID of the model to use for this request. responses: - "200": + '200': description: OK content: application/json: @@ -199,7 +199,7 @@ paths: description: | The model id to delete responses: - "200": + '200': description: OK content: application/json: @@ -228,7 +228,7 @@ paths: schema: $ref: specs/threads.yaml#/components/schemas/CreateThreadObject responses: - "200": + '200': description: Thread created successfully content: application/json: @@ -237,8 +237,8 @@ paths: x-codeSamples: - lang: cURL source: | - curl -X POST http://localhost:1337/v1/threads \ - -H "Content-Type: application/json" \ + curl -X POST http://localhost:1337/v1/threads \ + -H "Content-Type: application/json" \ -d '{ "messages": [{ "role": "user", @@ -249,6 +249,73 @@ paths: "content": "How does AI work? Explain it in simple terms." }] }' + - lang: JavaScript + source: |- + const fetch = require('node-fetch'); + + fetch('http://localhost:1337/v1/threads', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + messages: [ + { + role: 'user', + content: 'Hello, what is AI?', + file_ids: ['file-abc123'] + }, + { + role: 'user', + content: 'How does AI work? Explain it in simple terms.' + } + ] + }) + }); + - lang: Node.js + source: |- + const fetch = require('node-fetch'); + + fetch('http://localhost:1337/v1/threads', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + messages: [ + { + role: 'user', + content: 'Hello, what is AI?', + file_ids: ['file-abc123'] + }, + { + role: 'user', + content: 'How does AI work? Explain it in simple terms.' + } + ] + }) + }); + - lang: Python + source: |- + import requests + + url = 'http://localhost:1337/v1/threads' + payload = { + 'messages': [ + { + 'role': 'user', + 'content': 'Hello, what is AI?', + 'file_ids': ['file-abc123'] + }, + { + 'role': 'user', + 'content': 'How does AI work? Explain it in simple terms.' + } + ] + } + + response = requests.post(url, json=payload) + print(response.text) get: operationId: listThreads tags: @@ -257,7 +324,7 @@ paths: description: | Retrieves a list of all threads available in the system. responses: - "200": + '200': description: List of threads retrieved successfully content: application/json: @@ -282,10 +349,37 @@ paths: metadata: {} x-codeSamples: - lang: cURL - source: | - curl http://localhost:1337/v1/threads \ - -H "Content-Type: application/json" \ - "/threads/{thread_id}": + source: |- + curl http://localhost:1337/v1/threads \ + -H "Content-Type: application/json" + - lang: JavaScript + source: |- + const fetch = require('node-fetch'); + + fetch('http://localhost:1337/v1/threads', { + method: 'GET', + headers: {'Content-Type': 'application/json'} + }).then(res => res.json()) + .then(json => console.log(json)); + - lang: Node.js + source: |- + const fetch = require('node-fetch'); + + fetch('http://localhost:1337/v1/threads', { + method: 'GET', + headers: {'Content-Type': 'application/json'} + }).then(res => res.json()) + .then(json => console.log(json)); + - lang: Python + source: |- + import requests + + url = 'http://localhost:1337/v1/threads' + headers = {'Content-Type': 'application/json'} + + response = requests.get(url, headers=headers) + print(response.json()) + /threads/{thread_id}: get: operationId: getThread tags: @@ -305,7 +399,7 @@ paths: description: | The ID of the thread to retrieve. responses: - "200": + '200': description: Thread details retrieved successfully content: application/json: @@ -345,7 +439,7 @@ paths: items: $ref: specs/threads.yaml#/components/schemas/ThreadMessageObject responses: - "200": + '200': description: Thread modified successfully content: application/json: @@ -384,7 +478,7 @@ paths: description: | The ID of the thread to be deleted. responses: - "200": + '200': description: Thread deleted successfully content: application/json: @@ -405,7 +499,7 @@ paths: "https://platform.openai.com/docs/api-reference/assistants/listAssistants"> Equivalent to OpenAI's list assistants. responses: - "200": + '200': description: List of assistants retrieved successfully content: application/json: @@ -445,7 +539,7 @@ paths: source: | curl http://localhost:1337/v1/assistants \ -H "Content-Type: application/json" \ - "/assistants/{assistant_id}": + /assistants/{assistant_id}: get: operationId: getAssistant tags: @@ -465,18 +559,19 @@ paths: description: | The ID of the assistant to retrieve. responses: - "200": + '200': description: null content: application/json: schema: - $ref: specs/assistants.yaml#/components/schemas/RetrieveAssistantResponse + $ref: >- + specs/assistants.yaml#/components/schemas/RetrieveAssistantResponse x-codeSamples: - lang: cURL source: | curl http://localhost:1337/v1/assistants/{assistant_id} \ -H "Content-Type: application/json" \ - "/threads/{thread_id}/messages": + /threads/{thread_id}/messages: get: operationId: listMessages tags: @@ -495,7 +590,7 @@ paths: description: | The ID of the thread from which to retrieve messages. responses: - "200": + '200': description: List of messages retrieved successfully content: application/json: @@ -547,7 +642,7 @@ paths: - role - content responses: - "200": + '200': description: Message created successfully content: application/json: @@ -562,7 +657,7 @@ paths: "role": "user", "content": "How does AI work? Explain it in simple terms." }' - "/threads/{thread_id}/messages/{message_id}": + /threads/{thread_id}/messages/{message_id}: get: operationId: retrieveMessage tags: @@ -589,7 +684,7 @@ paths: description: | The ID of the message to retrieve. responses: - "200": + '200': description: OK content: application/json: @@ -598,8 +693,8 @@ paths: x-codeSamples: - lang: cURL source: > - curl http://localhost:1337/v1/threads/{thread_id}/messages/{message_id} - \ + curl + http://localhost:1337/v1/threads/{thread_id}/messages/{message_id} \ -H "Content-Type: application/json" x-webhooks: ModelObject: @@ -621,10 +716,9 @@ x-webhooks: post: summary: The assistant object description: > - Build assistants that can call models and use tools to perform - tasks. Equivalent - to OpenAI's assistants object. + Build assistants that can call models and use tools to perform tasks. + + Equivalent to OpenAI's assistants object. operationId: AssistantObjects tags: - Assistants @@ -651,7 +745,8 @@ x-webhooks: ThreadObject: post: summary: The thread object - description: Represents a thread that contains messages. - + Represents a thread that contains messages. Equivalent to OpenAI's thread object. operationId: ThreadObject From 08c4540f6ad5610228f2eec31aec2ecfeb7e8c2f Mon Sep 17 00:00:00 2001 From: avb-is-me <104213687+avb-is-me@users.noreply.github.com> Date: Wed, 28 Feb 2024 16:23:18 +0000 Subject: [PATCH 6/9] adds updates to assistant --- docs/openapi/jan.yaml | 119 ++++++++++++++++++++++++++---------------- 1 file changed, 73 insertions(+), 46 deletions(-) diff --git a/docs/openapi/jan.yaml b/docs/openapi/jan.yaml index 864c80fdf..76906acf3 100644 --- a/docs/openapi/jan.yaml +++ b/docs/openapi/jan.yaml @@ -1,11 +1,11 @@ ---- openapi: 3.0.0 info: title: API Reference description: > # Introduction - Jan API is compatible with the [OpenAI API](https://platform.openai.com/docs/api-reference). + Jan API is compatible with the [OpenAI + API](https://platform.openai.com/docs/api-reference). version: 0.1.8 contact: name: Jan Discord @@ -20,12 +20,12 @@ tags: description: List and describe the various models available in the API. - name: Chat description: > - Given a list of messages comprising a conversation, the model will - return a response. + Given a list of messages comprising a conversation, the model will return + a response. - name: Messages description: > - Messages capture a conversation's content. This can include the - content from LLM responses and other metadata from [chat + Messages capture a conversation's content. This can include the content + from LLM responses and other metadata from [chat completions](/specs/chats). - name: Threads - name: Assistants @@ -49,16 +49,16 @@ paths: summary: | Create chat completion description: > - Creates a model response for the given chat conversation. - Equivalent to OpenAI's create chat completion. + Creates a model response for the given chat conversation. Equivalent + to OpenAI's create chat completion. requestBody: content: application/json: schema: $ref: specs/chat.yaml#/components/schemas/ChatCompletionRequest responses: - "200": + '200': description: OK content: application/json: @@ -100,12 +100,12 @@ paths: - Models summary: List models description: > - Lists the currently available models, and provides basic - information about each one such as the owner and availability. - Equivalent to OpenAI's list model. + Lists the currently available models, and provides basic information + about each one such as the owner and availability. Equivalent + to OpenAI's list model. responses: - "200": + '200': description: OK content: application/json: @@ -117,7 +117,7 @@ paths: curl -X 'GET' \ 'http://localhost:1337/v1/models' \ -H 'accept: application/json' - "/models/download/{model_id}": + /models/download/{model_id}: get: operationId: downloadModel tags: @@ -135,7 +135,7 @@ paths: description: | The ID of the model to use for this request. responses: - "200": + '200': description: OK content: application/json: @@ -147,15 +147,15 @@ paths: curl -X 'GET' \ 'http://localhost:1337/v1/models/download/{model_id}' \ -H 'accept: application/json' - "/models/{model_id}": + /models/{model_id}: get: operationId: retrieveModel tags: - Models summary: Retrieve model description: > - Get a model instance, providing basic information about the model - such as the owner and permissioning. Equivalent to OpenAI's retrieve model. parameters: @@ -168,7 +168,7 @@ paths: description: | The ID of the model to use for this request. responses: - "200": + '200': description: OK content: application/json: @@ -199,7 +199,7 @@ paths: description: | The model id to delete responses: - "200": + '200': description: OK content: application/json: @@ -228,7 +228,7 @@ paths: schema: $ref: specs/threads.yaml#/components/schemas/CreateThreadObject responses: - "200": + '200': description: Thread created successfully content: application/json: @@ -257,7 +257,7 @@ paths: description: | Retrieves a list of all threads available in the system. responses: - "200": + '200': description: List of threads retrieved successfully content: application/json: @@ -285,7 +285,7 @@ paths: source: | curl http://localhost:1337/v1/threads \ -H "Content-Type: application/json" \ - "/threads/{thread_id}": + /threads/{thread_id}: get: operationId: getThread tags: @@ -305,7 +305,7 @@ paths: description: | The ID of the thread to retrieve. responses: - "200": + '200': description: Thread details retrieved successfully content: application/json: @@ -345,7 +345,7 @@ paths: items: $ref: specs/threads.yaml#/components/schemas/ThreadMessageObject responses: - "200": + '200': description: Thread modified successfully content: application/json: @@ -384,7 +384,7 @@ paths: description: | The ID of the thread to be deleted. responses: - "200": + '200': description: Thread deleted successfully content: application/json: @@ -405,7 +405,7 @@ paths: "https://platform.openai.com/docs/api-reference/assistants/listAssistants"> Equivalent to OpenAI's list assistants. responses: - "200": + '200': description: List of assistants retrieved successfully content: application/json: @@ -442,10 +442,36 @@ paths: metadata: {} x-codeSamples: - lang: cURL - source: | + source: |- curl http://localhost:1337/v1/assistants \ - -H "Content-Type: application/json" \ - "/assistants/{assistant_id}": + -H "Content-Type: application/json" + - lang: JavaScript + source: |- + fetch('http://localhost:1337/v1/assistants', { + method: 'GET', + headers: { + 'Content-Type': 'application/json' + } + }) + - lang: Node.js + source: |- + const fetch = require('node-fetch'); + + fetch('http://localhost:1337/v1/assistants', { + method: 'GET', + headers: { + 'Content-Type': 'application/json' + } + }) + - lang: Python + source: |- + import requests + + url = 'http://localhost:1337/v1/assistants' + headers = {'Content-Type': 'application/json'} + + response = requests.get(url, headers=headers) + /assistants/{assistant_id}: get: operationId: getAssistant tags: @@ -465,18 +491,19 @@ paths: description: | The ID of the assistant to retrieve. responses: - "200": + '200': description: null content: application/json: schema: - $ref: specs/assistants.yaml#/components/schemas/RetrieveAssistantResponse + $ref: >- + specs/assistants.yaml#/components/schemas/RetrieveAssistantResponse x-codeSamples: - lang: cURL source: | curl http://localhost:1337/v1/assistants/{assistant_id} \ -H "Content-Type: application/json" \ - "/threads/{thread_id}/messages": + /threads/{thread_id}/messages: get: operationId: listMessages tags: @@ -495,7 +522,7 @@ paths: description: | The ID of the thread from which to retrieve messages. responses: - "200": + '200': description: List of messages retrieved successfully content: application/json: @@ -547,7 +574,7 @@ paths: - role - content responses: - "200": + '200': description: Message created successfully content: application/json: @@ -562,7 +589,7 @@ paths: "role": "user", "content": "How does AI work? Explain it in simple terms." }' - "/threads/{thread_id}/messages/{message_id}": + /threads/{thread_id}/messages/{message_id}: get: operationId: retrieveMessage tags: @@ -589,7 +616,7 @@ paths: description: | The ID of the message to retrieve. responses: - "200": + '200': description: OK content: application/json: @@ -598,8 +625,8 @@ paths: x-codeSamples: - lang: cURL source: > - curl http://localhost:1337/v1/threads/{thread_id}/messages/{message_id} - \ + curl + http://localhost:1337/v1/threads/{thread_id}/messages/{message_id} \ -H "Content-Type: application/json" x-webhooks: ModelObject: @@ -621,10 +648,9 @@ x-webhooks: post: summary: The assistant object description: > - Build assistants that can call models and use tools to perform - tasks. Equivalent - to OpenAI's assistants object. + Build assistants that can call models and use tools to perform tasks. + + Equivalent to OpenAI's assistants object. operationId: AssistantObjects tags: - Assistants @@ -651,7 +677,8 @@ x-webhooks: ThreadObject: post: summary: The thread object - description: Represents a thread that contains messages. - + Represents a thread that contains messages. Equivalent to OpenAI's thread object. operationId: ThreadObject From 44d9f5b7b91105a45764bd7da8a4ed86ff313ce1 Mon Sep 17 00:00:00 2001 From: Faisal Amir Date: Thu, 29 Feb 2024 12:13:37 +0700 Subject: [PATCH 7/9] fix: minor ui missing secondary background (#2198) --- uikit/src/main.scss | 65 +++------------------------------------------ 1 file changed, 3 insertions(+), 62 deletions(-) diff --git a/uikit/src/main.scss b/uikit/src/main.scss index f3294e12e..e31b53c68 100644 --- a/uikit/src/main.scss +++ b/uikit/src/main.scss @@ -42,69 +42,10 @@ --danger: 346.8 77.2% 49.8%; --danger-foreground: 355.7 100% 97.3%; + --secondary: 60 4.8% 95.9%; + --secondary-foreground: 24 9.8% 10%; + --border: 20 5.9% 90%; --input: 20 5.9% 90%; --ring: 20 14.3% 4.1%; - - .primary-blue { - --primary: 221 83% 53%; - --primary-foreground: 210 40% 98%; - - --secondary: 60 4.8% 95.9%; - --secondary-foreground: 24 9.8% 10%; - } - - .primary-green { - --primary: 142.1 76.2% 36.3%; - --primary-foreground: 355.7 100% 97.3%; - - --secondary: 240 4.8% 95.9%; - --secondary-foreground: 240 5.9% 10%; - } - - .primary-purple { - --primary: 262.1 83.3% 57.8%; - --primary-foreground: 210 20% 98%; - - --secondary: 220 14.3% 95.9%; - --secondary-foreground: 220.9 39.3% 11%; - } -} - -.dark { - --background: 20 14.3% 4.1%; - --foreground: 60 9.1% 97.8%; - - --muted: 12 6.5% 15.1%; - --muted-foreground: 24 5.4% 63.9%; - - --danger: 346.8 77.2% 49.8%; - --danger-foreground: 355.7 100% 97.3%; - - --border: 12 6.5% 15.1%; - --input: 12 6.5% 15.1%; - --ring: 35.5 91.7% 32.9%; - - .primary-blue { - --primary: 221 83% 53%; - --primary-foreground: 222.2 47.4% 11.2%; - - --secondary: 12 6.5% 15.1%; - --secondary-foreground: 60 9.1% 97.8%; - } - - .primary-green { - --primary: 142.1 70.6% 45.3%; - --primary-foreground: 144.9 80.4% 10%; - --secondary: 240 3.7% 15.9%; - --secondary-foreground: 0 0% 98%; - } - - .primary-purple { - --primary: 263.4 70% 50.4%; - --primary-foreground: 210 20% 98%; - - --secondary: 215 27.9% 16.9%; - --secondary-foreground: 210 20% 98%; - } } From 5c185d2740a8baa91cc732f0822b9ca31b2b95e6 Mon Sep 17 00:00:00 2001 From: NamH Date: Thu, 29 Feb 2024 14:35:05 +0700 Subject: [PATCH 8/9] fix: download model error does not reset state in model hub (#2199) Signed-off-by: James Co-authored-by: James --- core/src/node/api/processors/download.ts | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/core/src/node/api/processors/download.ts b/core/src/node/api/processors/download.ts index bff6f47f0..4ddeff160 100644 --- a/core/src/node/api/processors/download.ts +++ b/core/src/node/api/processors/download.ts @@ -42,6 +42,24 @@ export class Downloader implements Processor { // Downloading file to a temp file first const downloadingTempFile = `${destination}.download` + // adding initial download state + const initialDownloadState: DownloadState = { + modelId, + fileName, + time: { + elapsed: 0, + remaining: 0, + }, + speed: 0, + percent: 0, + size: { + total: 0, + transferred: 0, + }, + downloadState: 'downloading', + } + DownloadManager.instance.downloadProgressMap[modelId] = initialDownloadState + progress(rq, {}) .on('progress', (state: any) => { const downloadState: DownloadState = { From 8e12e3a4a3bb9cb07dede6bef348fd4d49e83444 Mon Sep 17 00:00:00 2001 From: Service Account Date: Fri, 1 Mar 2024 01:24:03 +0000 Subject: [PATCH 9/9] janhq/jan: Update README.md with nightly build artifact URL --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index bc5918cb9..035126faa 100644 --- a/README.md +++ b/README.md @@ -76,31 +76,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute Experimental (Nightly Build) - + jan.exe - + Intel - + M1/M2 - + jan.deb - + jan.AppImage