diff --git a/docs/openapi/jan.yaml b/docs/openapi/jan.yaml
index 633c9abc6..f45db7d2d 100644
--- a/docs/openapi/jan.yaml
+++ b/docs/openapi/jan.yaml
@@ -1,11 +1,11 @@
+---
openapi: 3.0.0
info:
title: API Reference
description: >
# Introduction
- Jan API is compatible with the [OpenAI
- API](https://platform.openai.com/docs/api-reference).
+ Jan API is compatible with the [OpenAI API](https://platform.openai.com/docs/api-reference).
version: 0.1.8
contact:
name: Jan Discord
@@ -20,12 +20,12 @@ tags:
description: List and describe the various models available in the API.
- name: Chat
description: >
- Given a list of messages comprising a conversation, the model will return
- a response.
+ Given a list of messages comprising a conversation, the model will
+ return a response.
- name: Messages
description: >
- Messages capture a conversation's content. This can include the content
- from LLM responses and other metadata from [chat
+ Messages capture a conversation's content. This can include the
+ content from LLM responses and other metadata from [chat
completions](/specs/chats).
- name: Threads
- name: Assistants
@@ -49,16 +49,16 @@ paths:
summary: |
Create chat completion
description: >
- Creates a model response for the given chat conversation. Equivalent
- to OpenAI's create chat completion.
+ Creates a model response for the given chat conversation.
+ Equivalent to OpenAI's create chat completion.
requestBody:
content:
application/json:
schema:
$ref: specs/chat.yaml#/components/schemas/ChatCompletionRequest
responses:
- '200':
+ "200":
description: OK
content:
application/json:
@@ -192,9 +192,7 @@ paths:
}
- response =
- requests.post('http://localhost:1337/v1/chat/completions',
- json=data)
+ response = requests.post('http://localhost:1337/v1/chat/completions', json=data)
print(response.json())
/models:
@@ -204,12 +202,12 @@ paths:
- Models
summary: List models
description: >
- Lists the currently available models, and provides basic information
- about each one such as the owner and availability. Equivalent
- to OpenAI's list model.
+ Lists the currently available models, and provides basic
+ information about each one such as the owner and availability.
+ Equivalent to OpenAI's list model.
responses:
- '200':
+ "200":
description: OK
content:
application/json:
@@ -228,14 +226,6 @@ paths:
headers: {Accept: 'application/json'}
});
const data = await response.json();
- - lang: Python
- source: |-
- import requests
-
- url = 'http://localhost:1337/v1/models'
- headers = {'Accept': 'application/json'}
- response = requests.get(url, headers=headers)
- data = response.json()
- lang: Node.js
source: |-
const fetch = require('node-fetch');
@@ -249,7 +239,15 @@ paths:
fetch(url, options)
.then(res => res.json())
.then(json => console.log(json));
- /models/download/{model_id}:
+ - lang: Python
+ source: |-
+ import requests
+
+ url = 'http://localhost:1337/v1/models'
+ headers = {'Accept': 'application/json'}
+ response = requests.get(url, headers=headers)
+ data = response.json()
+ "/models/download/{model_id}":
get:
operationId: downloadModel
tags:
@@ -267,7 +265,7 @@ paths:
description: |
The ID of the model to use for this request.
responses:
- '200':
+ "200":
description: OK
content:
application/json:
@@ -304,20 +302,18 @@ paths:
import requests
- response =
- requests.get('http://localhost:1337/v1/models/download/{model_id}',
- headers={'accept': 'application/json'})
+ response = requests.get('http://localhost:1337/v1/models/download/{model_id}', headers={'accept': 'application/json'})
data = response.json()
- /models/{model_id}:
+ "/models/{model_id}":
get:
operationId: retrieveModel
tags:
- Models
summary: Retrieve model
description: >
- Get a model instance, providing basic information about the model such
- as the owner and permissioning.
Equivalent to OpenAI's retrieve model.
parameters:
@@ -330,7 +326,7 @@ paths:
description: |
The ID of the model to use for this request.
responses:
- '200':
+ "200":
description: OK
content:
application/json:
@@ -374,9 +370,7 @@ paths:
model_id = 'mistral-ins-7b-q4'
- response =
- requests.get(f'http://localhost:1337/v1/models/{model_id}',
- headers={'accept': 'application/json'})
+ response = requests.get(f'http://localhost:1337/v1/models/{model_id}', headers={'accept': 'application/json'})
print(response.json())
delete:
@@ -398,7 +392,7 @@ paths:
description: |
The model id to delete
responses:
- '200':
+ "200":
description: OK
content:
application/json:
@@ -442,9 +436,7 @@ paths:
model_id = 'mistral-ins-7b-q4'
- response =
- requests.delete(f'http://localhost:1337/v1/models/{model_id}',
- headers={'accept': 'application/json'})
+ response = requests.delete(f'http://localhost:1337/v1/models/{model_id}', headers={'accept': 'application/json'})
/threads:
post:
operationId: createThread
@@ -462,7 +454,7 @@ paths:
schema:
$ref: specs/threads.yaml#/components/schemas/CreateThreadObject
responses:
- '200':
+ "200":
description: Thread created successfully
content:
application/json:
@@ -558,7 +550,7 @@ paths:
description: |
Retrieves a list of all threads available in the system.
responses:
- '200':
+ "200":
description: List of threads retrieved successfully
content:
application/json:
@@ -613,7 +605,7 @@ paths:
response = requests.get(url, headers=headers)
print(response.json())
- /threads/{thread_id}:
+ "/threads/{thread_id}":
get:
operationId: getThread
tags:
@@ -633,7 +625,7 @@ paths:
description: |
The ID of the thread to retrieve.
responses:
- '200':
+ "200":
description: Thread details retrieved successfully
content:
application/json:
@@ -673,7 +665,7 @@ paths:
items:
$ref: specs/threads.yaml#/components/schemas/ThreadMessageObject
responses:
- '200':
+ "200":
description: Thread modified successfully
content:
application/json:
@@ -712,7 +704,7 @@ paths:
description: |
The ID of the thread to be deleted.
responses:
- '200':
+ "200":
description: Thread deleted successfully
content:
application/json:
@@ -733,7 +725,7 @@ paths:
"https://platform.openai.com/docs/api-reference/assistants/listAssistants">
Equivalent to OpenAI's list assistants.
responses:
- '200':
+ "200":
description: List of assistants retrieved successfully
content:
application/json:
@@ -799,7 +791,7 @@ paths:
headers = {'Content-Type': 'application/json'}
response = requests.get(url, headers=headers)
- /assistants/{assistant_id}:
+ "/assistants/{assistant_id}":
get:
operationId: getAssistant
tags:
@@ -819,19 +811,51 @@ paths:
description: |
The ID of the assistant to retrieve.
responses:
- '200':
+ "200":
description: null
content:
application/json:
schema:
- $ref: >-
- specs/assistants.yaml#/components/schemas/RetrieveAssistantResponse
+ $ref: specs/assistants.yaml#/components/schemas/RetrieveAssistantResponse
x-codeSamples:
- lang: cURL
- source: |
+ source: |-
curl http://localhost:1337/v1/assistants/{assistant_id} \
- -H "Content-Type: application/json" \
- /threads/{thread_id}/messages:
+ -H "Content-Type: application/json"
+ - lang: JavaScript
+ source: |-
+ const fetch = require('node-fetch');
+
+ let assistantId = 'abc123';
+
+ fetch(`http://localhost:1337/v1/assistants/${assistantId}`, {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json'
+ }
+ })
+ - lang: Node.js
+ source: |-
+ const fetch = require('node-fetch');
+
+ let assistantId = 'abc123';
+
+ fetch(`http://localhost:1337/v1/assistants/${assistantId}`, {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json'
+ }
+ })
+ - lang: Python
+ source: >-
+ import requests
+
+
+ assistant_id = 'abc123'
+
+
+ response = requests.get(f'http://localhost:1337/v1/assistants/{assistant_id}', headers={'Content-Type': 'application/json'})
+ "/threads/{thread_id}/messages":
get:
operationId: listMessages
tags:
@@ -850,7 +874,7 @@ paths:
description: |
The ID of the thread from which to retrieve messages.
responses:
- '200':
+ "200":
description: List of messages retrieved successfully
content:
application/json:
@@ -902,7 +926,7 @@ paths:
- role
- content
responses:
- '200':
+ "200":
description: Message created successfully
content:
application/json:
@@ -917,7 +941,7 @@ paths:
"role": "user",
"content": "How does AI work? Explain it in simple terms."
}'
- /threads/{thread_id}/messages/{message_id}:
+ "/threads/{thread_id}/messages/{message_id}":
get:
operationId: retrieveMessage
tags:
@@ -944,7 +968,7 @@ paths:
description: |
The ID of the message to retrieve.
responses:
- '200':
+ "200":
description: OK
content:
application/json:
@@ -953,8 +977,8 @@ paths:
x-codeSamples:
- lang: cURL
source: >
- curl
- http://localhost:1337/v1/threads/{thread_id}/messages/{message_id} \
+ curl http://localhost:1337/v1/threads/{thread_id}/messages/{message_id}
+ \
-H "Content-Type: application/json"
x-webhooks:
ModelObject:
@@ -976,9 +1000,10 @@ x-webhooks:
post:
summary: The assistant object
description: >
- Build assistants that can call models and use tools to perform tasks.
-
- Equivalent to OpenAI's assistants object.
+ Build assistants that can call models and use tools to perform
+ tasks. Equivalent
+ to OpenAI's assistants object.
operationId: AssistantObjects
tags:
- Assistants
@@ -1005,8 +1030,7 @@ x-webhooks:
ThreadObject:
post:
summary: The thread object
- description: >-
- Represents a thread that contains messages.
Equivalent to OpenAI's thread object.
operationId: ThreadObject