From 9dcb460137acf8f909d95c7b36315f8df8a95d0c Mon Sep 17 00:00:00 2001 From: Louis <133622055+louis-jan@users.noreply.github.com> Date: Thu, 7 Sep 2023 15:40:30 +0700 Subject: [PATCH] chore: update seed data for a generic JanGPT model (#80) * chore: update seed data for a generic JanGPT model * chore: update banner --- app-backend/hasura/seeds/jandb/1692710119690_productsSeed.sql | 2 +- .../hasura/seeds/jandb/1692710371524_collectionProductsSeed.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/app-backend/hasura/seeds/jandb/1692710119690_productsSeed.sql b/app-backend/hasura/seeds/jandb/1692710119690_productsSeed.sql index 7784aaaa5..82fbde406 100644 --- a/app-backend/hasura/seeds/jandb/1692710119690_productsSeed.sql +++ b/app-backend/hasura/seeds/jandb/1692710119690_productsSeed.sql @@ -1,4 +1,4 @@ SET check_function_bodies = false; INSERT INTO public.products ("slug", "name", "nsfw", "image_url", "description", "long_description", "technical_description", "author", "version", "source_url", "inputs", "outputs", "greeting") VALUES -('llama2', 'Llama-2-7B-Chat', 't', 'https://static-assets.jan.ai/llama2.jpg','Llama 2 is Meta`s open source large language model (LLM)', 'Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model. Links to other models can be found in the index at the bottom.', 'Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.', 'Meta', 'Llama2-7B-GGML', 'https://huggingface.co/TheBloke/airoboros-13B-gpt4-1.4-GGML', '{"body": [{"name": "messages", "type": "array", "items": [{"type": "object", "properties": [{"name": "role", "type": "string", "example": "system", "description": "Defines the role of the message."}, {"name": "content", "type": "string", "example": "Hello, world!", "description": "Contains the content of the message."}]}], "description": "An array of messages, each containing a role and content. The latest message is always at the end of the array."}, {"name": "stream", "type": "boolean", "example": true, "description": "Indicates whether the client wants to keep the connection open for streaming."}, {"name": "max_tokens", "type": "integer", "example": 500, "description": "Defines the maximum number of tokens that the client wants to receive."}], "slug": "llm", "headers": {"accept": "text/event-stream", "content-type": "application/json"}}', '{"slug": "llm", "type": "object", "properties": [{"name": "id", "type": "string", "example": "chatcmpl-4c4e5eb5-bf53-4dbc-9136-1cf69fc5fd7c", "description": "The unique identifier of the chat completion chunk."}, {"name": "model", "type": "string", "example": "gpt-3.5-turbo", "description": "The name of the GPT model used to generate the completion."}, {"name": "created", "type": "integer", "example": 1692169988, "description": "The Unix timestamp representing the time when the completion was generated."}, {"name": "object", "type": "string", "example": "chat.completion.chunk", "description": "A string indicating the type of the chat completion chunk."}, {"name": "choices", "type": "array", "items": [{"type": "object", "properties": [{"name": "index", "type": "integer", "example": 0, "description": "The index of the choice made by the GPT model."}, {"name": "delta", "type": "object", "properties": [{"name": "content", "type": "string", "example": "What", "description": "The content generated by the GPT model."}], "description": "A JSON object containing the content generated by the GPT model."}, {"name": "finish_reason", "type": "string", "example": null, "description": "A string indicating why the GPT model stopped generating content."}]}], "description": "An array containing the choices made by the GPT model to generate the completion."}], "description": "A JSON object representing a chat completion chunk."}', 'šŸ‘‹I’m a versatile AI trained on a wide range of topics, here to answer your questions about the universe. What are you curious about today?') +('jangpt', 'JanGPT', 't', 'https://static-assets.jan.ai/jan-banner.jpg','Self-hosted, local, AI Inference Platform that scales from personal use to production deployments for a team.', '', '', '', '1.0.0', 'https://cloud.jan.ai', '{"body": [{"name": "messages", "type": "array", "items": [{"type": "object", "properties": [{"name": "role", "type": "string", "example": "system", "description": "Defines the role of the message."}, {"name": "content", "type": "string", "example": "Hello, world!", "description": "Contains the content of the message."}]}], "description": "An array of messages, each containing a role and content. The latest message is always at the end of the array."}, {"name": "stream", "type": "boolean", "example": true, "description": "Indicates whether the client wants to keep the connection open for streaming."}, {"name": "max_tokens", "type": "integer", "example": 500, "description": "Defines the maximum number of tokens that the client wants to receive."}], "slug": "llm", "headers": {"accept": "text/event-stream", "content-type": "application/json"}}', '{"slug": "llm", "type": "object", "properties": [{"name": "id", "type": "string", "example": "chatcmpl-4c4e5eb5-bf53-4dbc-9136-1cf69fc5fd7c", "description": "The unique identifier of the chat completion chunk."}, {"name": "model", "type": "string", "example": "gpt-3.5-turbo", "description": "The name of the GPT model used to generate the completion."}, {"name": "created", "type": "integer", "example": 1692169988, "description": "The Unix timestamp representing the time when the completion was generated."}, {"name": "object", "type": "string", "example": "chat.completion.chunk", "description": "A string indicating the type of the chat completion chunk."}, {"name": "choices", "type": "array", "items": [{"type": "object", "properties": [{"name": "index", "type": "integer", "example": 0, "description": "The index of the choice made by the GPT model."}, {"name": "delta", "type": "object", "properties": [{"name": "content", "type": "string", "example": "What", "description": "The content generated by the GPT model."}], "description": "A JSON object containing the content generated by the GPT model."}, {"name": "finish_reason", "type": "string", "example": null, "description": "A string indicating why the GPT model stopped generating content."}]}], "description": "An array containing the choices made by the GPT model to generate the completion."}], "description": "A JSON object representing a chat completion chunk."}', 'šŸ‘‹I’m a versatile AI trained on a wide range of topics, here to answer your questions about the universe. What are you curious about today?') ON CONFLICT (slug) DO NOTHING; diff --git a/app-backend/hasura/seeds/jandb/1692710371524_collectionProductsSeed.sql b/app-backend/hasura/seeds/jandb/1692710371524_collectionProductsSeed.sql index 5fbd08e1b..dc0b0ebe5 100644 --- a/app-backend/hasura/seeds/jandb/1692710371524_collectionProductsSeed.sql +++ b/app-backend/hasura/seeds/jandb/1692710371524_collectionProductsSeed.sql @@ -3,4 +3,4 @@ SET check_function_bodies = false; INSERT INTO public.collection_products (collection_id, product_id) SELECT (SELECT id FROM public.collections WHERE slug = 'conversational') AS collection_id, id AS product_id FROM public.products -WHERE slug IN ('llama2') ON CONFLICT (collection_id, product_id) DO NOTHING;; +WHERE slug IN ('jangpt') ON CONFLICT (collection_id, product_id) DO NOTHING;;