docs: standardize Chat Payload and cURL

This commit is contained in:
Ho Duc Hieu 2024-01-05 08:05:10 +07:00
parent 2f1fe6cb67
commit 24e7c1213c
2 changed files with 19 additions and 16 deletions

View File

@ -67,16 +67,19 @@ paths:
x-codeSamples:
- lang: cURL
source: >
curl -X POST
'http://localhost:3982/inferences/llamacpp/chat_completion' \
curl http://localhost:1337/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"llama_model_path": "/path/to/your/model.gguf",
"model": "tinyllama-1.1b",
"messages": [
{
"role": "user",
"content": "hello"
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Hello!"
}
]
}'
/models:

View File

@ -71,13 +71,13 @@ components:
description: |
Contains input data or prompts for the model to process.
example:
- content: "Hello there :wave:"
role: assistant
- content: Can you write a long story
- content: "You are a helpful assistant."
role: system
- content: Hello!
role: user
model:
type: string
example: model-zephyr-7B
example: tinyllama-1.1b
description: |
Specifies the model being used for inference or processing tasks.
stream: