diff --git a/docs/openapi/jan.yaml b/docs/openapi/jan.yaml index 44168014b..b7b6bd3c1 100644 --- a/docs/openapi/jan.yaml +++ b/docs/openapi/jan.yaml @@ -67,21 +67,18 @@ paths: x-codeSamples: - lang: cURL source: > - curl http://localhost:1337/v1/chat/completions \ - -H "Content-Type: application/json" \ - -d '{ - "model": "tinyllama-1.1b", - "messages": [ - { - "role": "system", - "content": "You are a helpful assistant." - }, - { - "role": "user", - "content": "Hello!" - } - ] - }' + curl -X POST + 'http://localhost:3982/inferences/llamacpp/chat_completion' \ + -H "Content-Type: application/json" \ + -d '{ + "llama_model_path": "/path/to/your/model.gguf", + "messages": [ + { + "role": "user", + "content": "hello" + }, + ] + }' /models: get: operationId: listModels diff --git a/docs/openapi/specs/chat.yaml b/docs/openapi/specs/chat.yaml index 48b863ed1..9303e9713 100644 --- a/docs/openapi/specs/chat.yaml +++ b/docs/openapi/specs/chat.yaml @@ -71,13 +71,13 @@ components: description: | Contains input data or prompts for the model to process. example: - - content: "You are a helpful assistant." - role: system - - content: Hello! + - content: "Hello there :wave:" + role: assistant + - content: Can you write a long story role: user model: type: string - example: tinyllama-1.1b + example: model-zephyr-7B description: | Specifies the model being used for inference or processing tasks. stream: