diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js
index 822c2189b..7b07016d2 100644
--- a/docs/docusaurus.config.js
+++ b/docs/docusaurus.config.js
@@ -113,11 +113,11 @@ const config = {
primaryColor: "#1a73e8",
primaryColorDark: "#1a73e8",
options: {
- disableSearch: true,
requiredPropsFirst: true,
- noAutoAuth: true
+ noAutoAuth: true,
+ hideDownloadButton: true,
+ disableSearch: true,
},
- // redocOptions: { hideDownloadButton: false },
},
},
],
diff --git a/docs/openapi/jan.yaml b/docs/openapi/jan.yaml
index c1bd48143..43c07cb3c 100644
--- a/docs/openapi/jan.yaml
+++ b/docs/openapi/jan.yaml
@@ -1,7 +1,9 @@
openapi: 3.0.0
info:
- title: Jan API Reference
- description: Please see https://jan.ai for documentation.
+ title: API Reference
+ description: |
+ # Introduction
+ Jan API is compatible with the [OpenAI API](https://platform.openai.com/docs/api-reference).
version: "0.1.8"
contact:
name: Jan Discord
@@ -14,45 +16,72 @@ servers:
tags:
- name: Models
description: List and describe the various models available in the API.
- - name: Chat Completion
- description: Given a list of messages comprising a conversation, the model will return a response.
+ - name: Chat
+ description: |
+ Given a list of messages comprising a conversation, the model will return a response.
+
- name: Messages
description: |
Messages capture a conversation's content. This can include the content from LLM responses and other metadata from [chat completions](/specs/chats).
-
- - Users and assistants can send multimedia messages.
-
- - An [OpenAI Message API](https://platform.openai.com/docs/api-reference/messages) compatible endpoint at `localhost:1337/v1/messages`.
-
- - Jan's `messages` API is compatible with [OpenAI's Messages API](https://platform.openai.com/docs/api-reference/messages), with additional methods for managing messages locally.
-
- name: Threads
- description: |
- Threads are conversations between an `assistant` and the user:
- - Users can tweak `model` params and `assistant` behavior within each thread.
- - Users can import and export threads.
- - An [OpenAI Thread API](https://platform.openai.com/docs/api-reference/threads) compatible endpoint at `localhost:1337/v1/threads`.
- name: Assistants
description: Configures and utilizes different AI assistants for varied tasks
x-tagGroups:
- name: Endpoints
tags:
- Models
- - Chat Completion
+ - Chat
- name: Chat
tags:
- Assistants
- Messages
- Threads
paths:
+ /chat/completions:
+ post:
+ operationId: createChatCompletion
+ tags:
+ - Chat
+ summary: |
+ Create chat completion
+ description: |
+ Creates a model response for the given chat conversation. Equivalent to OpenAI's create chat completion.
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: "specs/chat.yaml#/components/schemas/ChatCompletionRequest"
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ $ref: "specs/chat.yaml#/components/schemas/ChatCompletionResponse"
+ x-codeSamples:
+ - lang: "curl"
+ source: |
+ curl -X POST 'http://localhost:3982/inferences/llamacpp/chat_completion' \
+ -H "Content-Type: application/json" \
+ -d '{
+ "llama_model_path": "/path/to/your/model.gguf",
+ "messages": [
+ {
+ "role": "user",
+ "content": "hello"
+ },
+ ]
+ }'
+
### MODELS
/models:
get:
operationId: listModels
tags:
- Models
- summary: List Models
- description: Lists the currently available models, and provides basic information about each one such as the owner and availability.
+ summary: List models
+ description: |
+ Lists the currently available models, and provides basic information about each one such as the owner and availability. Equivalent to OpenAI's list model.
responses:
"200":
description: OK
@@ -68,8 +97,9 @@ paths:
operationId: downloadModel
tags:
- Models
- summary: Download Model
- description: Download a model.
+ summary: Download model
+ description: |
+ Download a model.
responses:
"200":
description: OK
@@ -83,20 +113,22 @@ paths:
curl -X POST https://localhost:1337/v1/models
/models/{model_id}:
get:
- operationId: getModel
+ operationId: retrieveModel
tags:
- Models
- summary: Get Model
- description: Get a model instance, providing basic information about the model such as the owner and permissioning.
+ summary: Retrieve model
+ description: |
+ Get a model instance, providing basic information about the model such as the owner and permissioning. Equivalent to OpenAI's retrieve model.
parameters:
- in: path
- name: source_url
+ name: model_id
required: true
schema:
type: string
# ideally this will be an actual ID, so this will always work from browser
- example: https://huggingface.com/thebloke/example.gguf
- description: The ID of the model to use for this request
+ example: zephyr-7b
+ description: |
+ The ID of the model to use for this request
responses:
"200":
description: OK
@@ -112,8 +144,9 @@ paths:
operationId: deleteModel
tags:
- Models
- summary: Delete Model
- description: Delete a model.
+ summary: Delete model
+ description: |
+ Delete a model. Equivalent to OpenAI's delete model.
parameters:
- in: path
name: model
@@ -121,7 +154,8 @@ paths:
schema:
type: string
example: zephyr-7b
- description: The model to delete
+ description: |
+ The model to delete
responses:
"200":
description: OK
@@ -138,8 +172,9 @@ paths:
operationId: startModel
tags:
- Models
- summary: Start Model
- description: Starts an imported model. Loads the model into V/RAM.
+ summary: Start model
+ description: |
+ Starts an imported model. Loads the model into V/RAM.
parameters:
- in: path
name: model
@@ -148,7 +183,8 @@ paths:
type: string
# ideally this will be an actual ID, so this will always work from browser
example: zephyr-7b
- description: The ID of the model to use for this request
+ description: |
+ The ID of the model to use for this request
responses:
"200":
description: OK
@@ -165,8 +201,9 @@ paths:
operationId: stopModel
tags:
- Models
- summary: Stop Model
- description: Stop an imported model.
+ summary: Stop model
+ description: |
+ Stop an imported model.
parameters:
- in: path
name: model
@@ -195,7 +232,8 @@ paths:
tags:
- Threads
summary: Create thread
- description: Create a thread
+ description: |
+ Create a thread. Equivalent to OpenAI's create thread.
requestBody:
required: false
content:
@@ -218,7 +256,7 @@ paths:
x-codeSamples:
- lang: "cURL"
source: |
- curl -X POST {JAN_URL}/v1/threads \
+ curl -X POST http://localhost:1337/v1/threads \
-H "Content-Type: application/json" \
-d '{
"messages": [{
@@ -235,7 +273,8 @@ paths:
tags:
- Threads
summary: List threads
- description: Retrieves a list of all threads available in the system.
+ description: |
+ Retrieves a list of all threads available in the system.
responses:
"200":
description: List of threads retrieved successfully
@@ -260,7 +299,7 @@ paths:
x-codeSamples:
- lang: "curl"
source: |
- curl {JAN_URL}/v1/threads \
+ curl http://localhost:1337/v1/threads \
-H "Content-Type: application/json" \
/threads/{thread_id}:
@@ -268,8 +307,9 @@ paths:
operationId: getThread
tags:
- Threads
- summary: Get thread
- description: Retrieves detailed information about a specific thread using its thread_id.
+ summary: Retrieve thread
+ description: |
+ Retrieves detailed information about a specific thread using its thread_id. Equivalent to OpenAI's retrieve thread.
parameters:
- in: path
name: thread_id
@@ -277,7 +317,9 @@ paths:
schema:
type: string
example: thread_abc123
- description: The ID of the thread to retrieve.
+ description: |
+ The ID of the thread to retrieve.
+
responses:
"200":
description: Thread details retrieved successfully
@@ -288,13 +330,14 @@ paths:
x-codeSamples:
- lang: "curl"
source: |
- curl {JAN_URL}/v1/threads/{thread_id}
+ curl http://localhost:1337/v1/threads/{thread_id}
post:
operationId: modifyThread
tags:
- Threads
summary: Modify thread
- description: Modifies a thread
+ description: |
+ Modifies a thread. Equivalent to OpenAI's modify thread.
parameters:
- in: path
name: thread_id
@@ -302,7 +345,9 @@ paths:
schema:
type: string
example: thread_abc123
- description: The ID of the thread to be modified.
+ description: |
+ The ID of the thread to be modified.
+
requestBody:
required: false
content:
@@ -325,7 +370,7 @@ paths:
x-codeSamples:
- lang: "curl"
source: |
- curl -X POST {JAN_URL}/v1/threads/{thread_id} \
+ curl -X POST http://localhost:1337/v1/threads/{thread_id} \
-H "Content-Type: application/json" \
-d '{
"messages": [{
@@ -337,13 +382,13 @@ paths:
"content": "How does AI work? Explain it in simple terms."
}]
}'
-
delete:
operationId: deleteThread
tags:
- Threads
summary: Delete thread
- description: Delete a thread
+ description: |
+ Delete a thread. Equivalent to OpenAI's delete thread.
parameters:
- in: path
name: thread_id
@@ -351,7 +396,8 @@ paths:
schema:
type: string
example: thread_abc123
- description: The ID of the thread to be deleted.
+ description: |
+ The ID of the thread to be deleted.
responses:
"200":
description: Thread deleted successfully
@@ -362,8 +408,7 @@ paths:
x-codeSamples:
- lang: "curl"
source: |
- curl -X DELETE {JAN_URL}/v1/threads/{thread_id}
-
+ curl -X DELETE http://localhost:1337/v1/threads/{thread_id}
/threads/{thread_id}/assistants:
get:
@@ -371,8 +416,8 @@ paths:
tags:
- Threads
summary: Get Thread.Assistants
- description:
- - Can achieve this goal by calling Get thread API
+ description: |
+ Can achieve this goal by calling Get thread API
parameters:
- in: path
name: thread_id
@@ -391,7 +436,7 @@ paths:
x-codeSamples:
- lang: "curl"
source: |
- curl {JAN_URL}/v1/threads/{thread_id}/assistants
+ curl http://localhost:1337/v1/threads/{thread_id}/assistants
/threads/{thread_id}/assistants/{assistants_id}:
post:
@@ -399,8 +444,10 @@ paths:
tags:
- Threads
summary: Modify Thread.Assistants
- description:
- - Can achieve this goal by calling Modify Assistant API with thread.assistant[]
+ description: |
+
+
+ Can achieve this goal by calling Modify Assistant API with thread.assistant[]
/threads/{thread_id}/:
get:
@@ -408,8 +455,8 @@ paths:
tags:
- Threads
summary: List Thread.Messages
- description:
- - Can achieve this goal by calling Get Thread API
+ description: |
+ Can achieve this goal by calling Get Thread API
parameters:
- in: path
name: thread_id
@@ -428,7 +475,7 @@ paths:
x-codeSamples:
- lang: "curl"
source: |
- curl {JAN_URL}/v1/threads/{thread_id}
+ curl http://localhost:1337/v1/threads/{thread_id}
### MESSAGES
/threads/{thread_id}/messages:
@@ -436,8 +483,9 @@ paths:
operationId: listMessages
tags:
- Messages
- summary: List messaages
- description: Retrieves all messages from the given thread.
+ summary: List messages
+ description: |
+ Retrieves all messages from the given thread. Equivalent to OpenAI's list messages.
parameters:
- in: path
name: thread_id
@@ -445,7 +493,8 @@ paths:
schema:
type: string
example: thread_abc123
- description: The ID of the thread from which to retrieve messages.
+ description: |
+ The ID of the thread from which to retrieve messages.
responses:
"200":
description: List of messages retrieved successfully
@@ -456,14 +505,15 @@ paths:
x-codeSamples:
- lang: "curl"
source: |
- curl {JAN_URL}/v1/threads/{thread_id}/messages \
+ curl http://localhost:1337/v1/threads/{thread_id}/messages \
-H "Content-Type: application/json"
post:
operationId: createMessage
tags:
- Messages
summary: Create message
- description: Create a message
+ description: |
+ Create a message. Equivalent to OpenAI's list messages.
parameters:
- in: path
name: thread_id
@@ -471,7 +521,8 @@ paths:
schema:
type: string
example: thread_abc123
- description: The ID of the thread to which the message will be posted.
+ description: |
+ The ID of the thread to which the message will be posted.
requestBody:
required: true
content:
@@ -481,12 +532,18 @@ paths:
properties:
role:
type: string
- description: "Role of the sender, either 'user' or 'assistant'."
+ description: |
+ "Role of the sender, either 'user' or 'assistant'."
+
+ OpenAI compatible
example: "user"
enum: ["user", "assistant"]
content:
type: string
- description: "Text content of the message."
+ description: |
+ "Text content of the message."
+
+ OpenAI compatible
example: "How does AI work? Explain it in simple terms."
required:
- role
@@ -501,7 +558,7 @@ paths:
x-codeSamples:
- lang: "curl"
source: |
- curl -X POST {JAN_URL}/v1/threads/{thread_id}/messages \
+ curl -X POST http://localhost:1337/v1/threads/{thread_id}/messages \
-H "Content-Type: application/json" \
-d '{
"role": "user",
@@ -513,8 +570,9 @@ paths:
operationId: retrieveMessage
tags:
- Messages
- summary: Retrieve Message
- description: Retrieve a specific message from a thread using its thread_id and message_id.
+ summary: Retrieve message
+ description: |
+ Retrieve a specific message from a thread using its thread_id and message_id. Equivalent to OpenAI's retrieve messages.
parameters:
- in: path
name: thread_id
@@ -522,14 +580,18 @@ paths:
schema:
type: string
example: thread_abc123
- description: The ID of the thread containing the message.
+ description: |
+ The ID of the thread containing the message.
+
- in: path
name: message_id
required: true
schema:
type: string
example: msg_abc123
- description: The ID of the message to retrieve.
+ description: |
+ The ID of the message to retrieve.
+
responses:
"200":
description: OK
@@ -540,7 +602,7 @@ paths:
x-codeSamples:
- lang: "curl"
source: |
- curl {JAN_URL}/v1/threads/{thread_id}/messages/{message_id} \
+ curl http://localhost:1337/v1/threads/{thread_id}/messages/{message_id} \
-H "Content-Type: application/json"
/threads/{thread_id}/messages/{message_id}/files:
@@ -548,8 +610,10 @@ paths:
operationId: listMessageFiles
tags:
- Messages
- summary: List message files
- description: Returns a list of message files.
+ summary: |
+ List message files
+ description: |
+ Returns a list of message files. Equivalent to OpenAI's list message files.
parameters:
- in: path
name: thread_id
@@ -557,14 +621,18 @@ paths:
schema:
type: string
example: thread_abc123
- description: The ID of the thread containing the message.
+ description: |
+ The ID of the thread containing the message.
+
- in: path
name: message_id
required: true
schema:
type: string
example: msg_abc123
- description: The ID of the message whose files are to be listed.
+ description: |
+ The ID of the message whose files are to be listed.
+
responses:
"200":
description: List of files retrieved successfully
@@ -575,7 +643,7 @@ paths:
x-codeSamples:
- lang: "curl"
source: |
- curl {JAN_URL}/v1/threads/{thread_id}/messages/{message_id}/files \
+ curl http://localhost:1337/v1/threads/{thread_id}/messages/{message_id}/files \
-H "Content-Type: application/json"
/threads/{thread_id}/messages/{message_id}/files/{file_id}:
@@ -584,7 +652,8 @@ paths:
tags:
- Messages
summary: Retrieve message file
- description: Retrieves a file associated with a specific message in a thread.
+ description: |
+ Retrieves a file associated with a specific message in a thread. Equivalent to OpenAI's retrieve message file.
parameters:
- in: path
name: thread_id
@@ -592,21 +661,27 @@ paths:
schema:
type: string
example: thread_abc123
- description: The ID of the thread containing the message.
+ description: |
+ The ID of the thread containing the message.
+
- in: path
name: message_id
required: true
schema:
type: string
example: msg_abc123
- description: The ID of the message associated with the file.
+ description: |
+ The ID of the message associated with the file.
+
- in: path
name: file_id
required: true
schema:
type: string
example: file-abc123
- description: The ID of the file to retrieve.
+ description: |
+ The ID of the file to retrieve.
+
responses:
"200":
description: File retrieved successfully
@@ -617,14 +692,15 @@ paths:
x-codeSamples:
- lang: "curl"
source: |
- curl {JAN_URL}/v1/threads/{thread_id}/messages/{message_id}/files/{file_id} \
+ curl http://localhost:1337/v1/threads/{thread_id}/messages/{message_id}/files/{file_id} \
-H "Content-Type: application/json"
x-webhooks:
ModelObject:
post:
summary: The model object
- description: Information about a model in the systems
+ description: |
+ Describe a model offering that can be used with the API. Equivalent to OpenAI's model object.
operationId: ModelObject
tags:
- Models
@@ -633,11 +709,11 @@ x-webhooks:
application/json:
schema:
$ref: 'specs/models.yaml#/components/schemas/ModelObject'
-
MessageObject:
post:
summary: The message object
- description: Information about a message in the thread
+ description: |
+ Information about a message in the thread. Equivalent to OpenAI's message object.
operationId: MessageObject
tags:
- Messages
@@ -646,15 +722,10 @@ x-webhooks:
application/json:
schema:
$ref: 'specs/messages.yaml#/components/schemas/MessageObject'
-
ThreadObject:
post:
summary: The thread object
- description: |
- - Each `thread` folder contains a `thread.json` file, which is a representation of a thread.
- - `thread.json` contains metadata and model parameter overrides.
- - There are no required fields.
-
+ description: Represents a thread that contains messages. Equivalent to OpenAI's thread object.
operationId: ThreadObject
tags:
- Threads
diff --git a/docs/openapi/specs/assistant.yaml b/docs/openapi/specs/assistant.yaml
new file mode 100644
index 000000000..95b4cd22d
--- /dev/null
+++ b/docs/openapi/specs/assistant.yaml
@@ -0,0 +1,59 @@
+AssistantObject:
+ type: object
+ properties:
+ avatar:
+ type: string
+ description: "URL of the assistant's avatar. Jan-specific property."
+ example: "https://lala.png"
+ id:
+ type: string
+ description: "The identifier of the assistant."
+ example: "asst_abc123"
+ object:
+ type: string
+ description: "Type of the object, indicating it's an assistant."
+ default: "assistant"
+ version:
+ type: integer
+ description: "Version number of the assistant."
+ example: 1
+ created_at:
+ type: integer
+ format: int64
+ description: "Unix timestamp representing the creation time of the assistant."
+ name:
+ type: string
+ description: "Name of the assistant."
+ example: "Math Tutor"
+ description:
+ type: string
+ description: "Description of the assistant. Can be null."
+ models:
+ type: array
+ description: "List of models associated with the assistant. Jan-specific property."
+ items:
+ type: object
+ properties:
+ model_id:
+ type: string
+ # Additional properties for models can be added here
+ events:
+ type: object
+ description: "Event subscription settings for the assistant."
+ properties:
+ in:
+ type: array
+ items:
+ type: string
+ out:
+ type: array
+ items:
+ type: string
+ # If there are specific event types, they can be detailed here
+ metadata:
+ type: object
+ description: "Metadata associated with the assistant."
+ required:
+ - name
+ - models
+ - events
\ No newline at end of file
diff --git a/docs/openapi/specs/chat.yaml b/docs/openapi/specs/chat.yaml
new file mode 100644
index 000000000..05444c444
--- /dev/null
+++ b/docs/openapi/specs/chat.yaml
@@ -0,0 +1,181 @@
+components:
+ schemas:
+ ChatObject:
+ type: object
+ properties:
+ messages:
+ type: arrays
+ description: |
+ Contains input data or prompts for the model to process
+ example:
+ [
+ { "content": "Hello there :wave:", "role": "assistant" },
+ { "content": "Can you write a long story", "role": "user" },
+ ]
+ stream:
+ type: boolean
+ default: true
+ description: Enables continuous output generation, allowing for streaming of model responses
+ model:
+ type: string
+ example: "gpt-3.5-turbo"
+ description: Specifies the model being used for inference or processing tasks
+ max_tokens:
+ type: number
+ default: 2048
+ description: The maximum number of tokens the model will generate in a single response
+ stop:
+ type: arrays
+ example: ["hello"]
+ description: Defines specific tokens or phrases at which the model will stop generating further output
+ frequency_penalty:
+ type: number
+ default: 0
+ description: Adjusts the likelihood of the model repeating words or phrases in its output
+ presence_penalty:
+ type: number
+ default: 0
+ description: Influences the generation of new and varied concepts in the model's output
+ temperature:
+ type: number
+ default: 0.7
+ min: 0
+ max: 1
+ description: Controls the randomness of the model's output
+ top_p:
+ type: number
+ default: 0.95
+ min: 0
+ max: 1
+ description: Set probability threshold for more relevant outputs
+ cache_prompt:
+ type: boolean
+ default: true
+ description: Optimize performance in repeated or similar requests.
+ ChatCompletionRequest:
+ type: object
+ properties:
+ messages:
+ type: arrays
+ description: |
+ Contains input data or prompts for the model to process
+ example:
+ [
+ { "content": "Hello there :wave:", "role": "assistant" },
+ { "content": "Can you write a long story", "role": "user" },
+ ]
+ model:
+ type: string
+ example: model-zephyr-7B
+ description: |
+ Specifies the model being used for inference or processing tasks
+ stream:
+ type: boolean
+ default: true
+ description: |
+ Enables continuous output generation, allowing for streaming of model responses
+ max_tokens:
+ type: number
+ default: 2048
+ description: |
+ The maximum number of tokens the model will generate in a single response
+ stop:
+ type: arrays
+ example: ["hello"]
+ description: |
+ Defines specific tokens or phrases at which the model will stop generating further output
+ frequency_penalty:
+ type: number
+ default: 0
+ description: |
+ Adjusts the likelihood of the model repeating words or phrases in its output
+ presence_penalty:
+ type: number
+ default: 0
+ description: |
+ Influences the generation of new and varied concepts in the model's output
+ temperature:
+ type: number
+ default: 0.7
+ min: 0
+ max: 1
+ description: |
+ Controls the randomness of the model's output
+ top_p:
+ type: number
+ default: 0.95
+ min: 0
+ max: 1
+ description: |
+ Set probability threshold for more relevant outputs
+
+
+
+ ChatCompletionResponse:
+ type: object
+ description: Description of the response structure
+ properties:
+ choices:
+ type: array
+ description: Array of choice objects
+ items:
+ type: object
+ properties:
+ finish_reason:
+ type: string
+ nullable: true
+ example: null
+ description: Reason for finishing the response, if applicable
+ index:
+ type: integer
+ example: 0
+ description: Index of the choice
+ message:
+ type: object
+ properties:
+ content:
+ type: string
+ example: "Hello user. What can I help you with?"
+ description: Content of the message
+ role:
+ type: string
+ example: assistant
+ description: Role of the sender
+ created:
+ type: integer
+ example: 1700193928
+ description: Timestamp of when the response was created
+ id:
+ type: string
+ example: ebwd2niJvJB1Q2Whyvkz
+ description: Unique identifier of the response
+ model:
+ type: string
+ nullable: true
+ example: _
+ description: Model used for generating the response
+ object:
+ type: string
+ example: chat.completion
+ description: Type of the response object
+ system_fingerprint:
+ type: string
+ nullable: true
+ example: _
+ description: System fingerprint
+ usage:
+ type: object
+ description: Information about the usage of tokens
+ properties:
+ completion_tokens:
+ type: integer
+ example: 500
+ description: Number of tokens used for completion
+ prompt_tokens:
+ type: integer
+ example: 33
+ description: Number of tokens used in the prompt
+ total_tokens:
+ type: integer
+ example: 533
+ description: Total number of tokens used
diff --git a/docs/openapi/specs/messages.yaml b/docs/openapi/specs/messages.yaml
index daa998767..b99fc4221 100644
--- a/docs/openapi/specs/messages.yaml
+++ b/docs/openapi/specs/messages.yaml
@@ -5,28 +5,35 @@ components:
properties:
id:
type: string
- description: "Sequential or UUID identifier of the message."
+ description: |
+ "Sequential or UUID identifier of the message."
example: 0
object:
type: string
- description: "Type of the object, defaults to 'thread.message'."
+ description: |
+ "Type of the object, defaults to 'thread.message'."
example: thread.message
created_at:
type: integer
format: int64
- description: "Unix timestamp representing the creation time of the message."
+ description: |
+ "Unix timestamp representing the creation time of the message."
thread_id:
type: string
- description: "Identifier of the thread to which this message belongs. Defaults to parent thread."
+ description: |
+ "Identifier of the thread to which this message belongs. Defaults to parent thread."
example: "thread_asdf"
assistant_id:
type: string
- description: "Identifier of the assistant involved in the message. Defaults to parent thread."
+ description: |
+ "Identifier of the assistant involved in the message. Defaults to parent thread."
example: jan
role:
type: string
enum: ["user", "assistant"]
- description: "Role of the sender, either 'user' or 'assistant'."
+ description: |
+ "Role of the sender, either 'user' or 'assistant'."
+
content:
type: array
items:
@@ -34,23 +41,30 @@ components:
properties:
type:
type: string
- description: "Type of content, e.g., 'text'."
+ description: |
+ "Type of content, e.g., 'text'."
+
+
text:
type: object
properties:
value:
type: string
- description: "Text content of the message."
+ description: |
+ "Text content of the message."
example: "Hi!?"
annotations:
type: array
items:
type: string
- description: "Annotations for the text content, if any."
+ description: |
+ "Annotations for the text content, if any."
example: []
metadata:
type: object
- description: "Metadata associated with the message, defaults to an empty object."
+ description: |
+ "Metadata associated with the message, defaults to an empty object."
+
example: {}
GetMessageResponse:
diff --git a/docs/openapi/specs/models.yaml b/docs/openapi/specs/models.yaml
index 6f136d30e..aa5cc4155 100644
--- a/docs/openapi/specs/models.yaml
+++ b/docs/openapi/specs/models.yaml
@@ -117,26 +117,29 @@ components:
properties:
id:
type: string
- description: "The identifier of the model."
+ description: |
+ "The identifier of the model."
+
example: "zephyr-7b"
object:
type: string
- description: "The type of the object, indicating it's a model."
+ description: |
+ "The type of the object, indicating it's a model."
+
default: "model"
created:
type: integer
format: int64
- description: "Unix timestamp representing the creation time of the model."
+ description: |
+ "Unix timestamp representing the creation time of the model."
+
example: "1253935178"
owned_by:
type: string
- description: "The entity that owns the model."
+ description: |
+ "The entity that owns the model."
+
example: "_"
- required:
- - id
- - object
- - created
- - owned_by
GetModelResponse:
type: object
@@ -253,10 +256,7 @@ components:
type: boolean
description: "Indicates whether the model was successfully deleted."
example: true
- required:
- - id
- - object
- - deleted
+
StartModelResponse:
type: object
@@ -313,8 +313,4 @@ components:
type: string
description: "The current state of the model after the start operation."
example: "downloaded"
- required:
- - id
- - object
- - state
diff --git a/docs/openapi/specs/threads.yaml b/docs/openapi/specs/threads.yaml
index 317447f90..c009381e3 100644
--- a/docs/openapi/specs/threads.yaml
+++ b/docs/openapi/specs/threads.yaml
@@ -5,47 +5,69 @@ components:
properties:
id:
type: string
- description: "The identifier of the thread, defaults to foldername."
+ description: |
+ "The identifier of the thread, defaults to foldername."
+
example: thread_....
object:
type: string
- description: "Type of the object, defaults to thread."
+ description: |
+ "Type of the object, defaults to thread."
+
example: thread
- summary:
+ title:
type: string
- description: "A brief summary or description of the thread, defaults to an empty string."
+ description: |
+ "A brief summary or description of the thread, defaults to an empty string."
+
+
example: "funny physics joke"
assistants:
type: array
+ description: |
+
items:
- type: string
- description: "List of assistants involved in the thread, defaults to [\"jan\"]."
- example: ["jan"]
+ properties:
+ assistant_id:
+ type: string
+ description: |
+ The identifier of assistant, defaults to "jan"
+
+
+ example: jan
+ model:
+ type: object
+ properties:
+ id:
+ type: string
+ description: |
+
+ example: ...
+ settings:
+ type: object
+ description: |
+ Defaults to and overrides assistant.json's "settings" (and if none, then model.json "settings")
+
+
+ parameters:
+ type: object
+ description: |
+ Defaults to and overrides assistant.json's "parameters" (and if none, then model.json "parameters")
+
+
created:
type: integer
format: int64
- description: "Unix timestamp representing the creation time of the thread, defaults to file creation time."
+ description: |
+ "Unix timestamp representing the creation time of the thread, defaults to file creation time."
+
example: 1231231
metadata:
type: object
- description: "Metadata associated with the thread, defaults to an empty object."
+ description: |
+ "Metadata associated with the thread, defaults to an empty object."
+
example: {}
- messages:
- type: array
- description: "List of messages within the thread."
- items:
- type: string
- example: []
- model_id:
- type: string
- description: "Model identifier associated with the thread, defaults to assistant.model."
- example: "..."
- settings:
- type: object
- description: "Settings for the thread, defaults to and overrides assistant.settings."
- parameters:
- type: object
- description: "Parameters for the thread, defaults to and overrides assistant.settings."
GetThreadResponse:
type: object
@@ -106,23 +128,30 @@ components:
properties:
role:
type: string
- description: "Role of the sender, either 'user' or 'assistant'."
+ description: |
+ "Role of the sender, either 'user' or 'assistant'."
+
enum: ["user", "assistant"]
content:
type: string
- description: "Text content of the message."
+ description: |
+ "Text content of the message."
+
file_ids:
type: array
items:
type: string
- description: "Array of file IDs associated with the message, if any."
+ description: |
+ "Array of file IDs associated with the message, if any."
+
ModifyThreadResponse:
type: object
properties:
id:
type: string
- description: "The identifier of the modified thread."
+ description: |
+ "The identifier of the modified thread."
example: thread_abc123
object:
type: string
diff --git a/docs/src/styles/base.scss b/docs/src/styles/base.scss
index a59df6f73..b94081d66 100644
--- a/docs/src/styles/base.scss
+++ b/docs/src/styles/base.scss
@@ -1,4 +1,5 @@
@layer base {
+
html[data-theme="light"] {
--ifm-background-color: white;
--ifm-color-primary: #2563eb; /* New Primary Blue */
@@ -39,4 +40,16 @@
text-decoration: none;
}
}
+ compatible-label {
+ display: inline-block;
+ padding: 2px 8px;
+ margin: 0;
+ background-color: #228B22;
+ color: #000;
+ font-size: 13px;
+ vertical-align: middle;
+ line-height: 1.6;
+ border-radius: 4px;
+ font-weight: var(--ifm-font-weight-bold);
+}
}