jan/docker-compose.yml
hiento09 90aa721e7d
Update docs (#15)
* fix: not every llm stream chunked by each json data

* Docs: deploy docusaurus github page and update README.md (#14)

* add github action deploy docusaurus to github page

* README: update installation instruction

* Add sonarqube scanner github actions pipeline

---------

Co-authored-by: Hien To <>

---------

Co-authored-by: Louis <louis@jan.ai>
2023-08-30 11:19:25 +07:00

215 lines
6.2 KiB
YAML

# docker version
version: "3"
services:
keycloak:
image: quay.io/keycloak/keycloak:${KEYCLOAK_VERSION-22.0.0}
command: ["start-dev", "--import-realm", "--http-port", "8088"]
environment:
KC_DB: postgres
KC_DB_URL_HOST: keycloak_postgres
KC_DB_URL_DATABASE: ${POSTGRES_DB_NAME:-keycloak}
KC_DB_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
KC_DB_USERNAME: ${POSTGRES_USERNAME:-postgres}
KC_DB_SCHEMA: ${KC_DB_SCHEMA:-public}
KC_HEALTH_ENABLED: 'true'
KEYCLOAK_ADMIN: ${KEYCLOAK_ADMIN-admin}
KEYCLOAK_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD-admin}
volumes:
- ./conf/keycloak_conf:/opt/keycloak/data/import
ports:
- "8088:8088"
depends_on:
keycloak_postgres:
condition: service_healthy
networks:
jan_community:
ipv4_address: 172.20.0.9
keycloak_postgres:
image: postgres:15
command: postgres -c 'max_connections=200' && postgres -c 'shared_buffers=24MB'
environment:
# Environment Variables expecially for Postgres
POSTGRES_DB: ${POSTGRES_DB_NAME:-keycloak}
POSTGRES_USER: ${POSTGRES_USERNAME:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
PGDATA: /data/postgres
PGPORT: ${POSTGRES_PORT:-5432}
healthcheck:
test: "exit 0"
ports:
- ${POSTGRES_PORT:-5432}:${POSTGRES_PORT:-5432}
networks:
jan_community:
ipv4_address: 172.20.0.10
postgres:
image: postgres:15
restart: always
env_file:
- conf/sample.env_app-backend-postgres
networks:
jan_community:
ipv4_address: 172.20.0.11
graphql-engine:
image: hasura/graphql-engine:v2.31.0.cli-migrations-v3
ports:
- "8080:8080"
restart: always
env_file:
- conf/sample.env_app-backend
volumes:
- ./app-backend/hasura/migrations:/migrations
- ./app-backend/hasura/metadata:/metadata
depends_on:
data-connector-agent:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/healthz"]
interval: 30s
timeout: 10s
retries: 10
networks:
jan_community:
ipv4_address: 172.20.0.12
hasura-seed-apply:
image: hasura/graphql-engine:v2.31.0.cli-migrations-v3
entrypoint: [""]
command: ["/bin/sh", "-c", "hasura-cli seed apply --all-databases"]
env_file:
- conf/sample.env_app-backend
volumes:
- ./app-backend/hasura/config.yaml:/config.yaml
- ./app-backend/hasura/seeds:/seeds
depends_on:
graphql-engine:
condition: service_healthy
extra_hosts:
- "localhost:172.20.0.12"
networks:
jan_community:
worker:
build:
context: ./app-backend/worker
dockerfile: ./Dockerfile
restart: always
environment:
- "NODE_ENV=development"
volumes:
- ./app-backend/worker:/worker
ports:
- "8787:8787"
networks:
jan_community:
ipv4_address: 172.20.0.13
data-connector-agent:
image: hasura/graphql-data-connector:v2.31.0
restart: always
ports:
- 8081:8081
environment:
QUARKUS_LOG_LEVEL: ERROR # FATAL, ERROR, WARN, INFO, DEBUG, TRACE
## https://quarkus.io/guides/opentelemetry#configuration-reference
QUARKUS_OPENTELEMETRY_ENABLED: "false"
## QUARKUS_OPENTELEMETRY_TRACER_EXPORTER_OTLP_ENDPOINT: http://jaeger:4317
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8081/api/v1/athena/health"]
interval: 5s
timeout: 10s
retries: 5
start_period: 5s
networks:
jan_community:
ipv4_address: 172.20.0.14
web:
build:
context: ./web-client
dockerfile: ./dev.Dockerfile
container_name: jan_web
restart: always
volumes:
- ./web-client/:/app
- /app/node_modules
- /app/.next
env_file:
- conf/sample.env_web-client
ports:
- "3000:3000"
environment:
NODE_ENV: development
extra_hosts:
- "localhost:172.20.0.9"
networks:
jan_community:
ipv4_address: 172.20.0.15
# Service to download a model file.
downloader:
image: busybox
# The command extracts the model filename from MODEL_URL and downloads it if it doesn't exist.
command: /bin/sh -c "LLM_MODEL_FILE=$(basename ${MODEL_URL}); if [ ! -f /models/$LLM_MODEL_FILE ]; then wget -O /models/$LLM_MODEL_FILE ${MODEL_URL}; fi"
# Mount a local directory to store the downloaded model.
volumes:
- ./jan-inference/llm/models:/models
networks:
jan_community:
ipv4_address: 172.20.0.16
# Service to wait for the downloader service to finish downloading the model.
wait-for-downloader:
image: busybox
# The command waits until the model file (specified in MODEL_URL) exists.
command: /bin/sh -c "LLM_MODEL_FILE=$(basename ${MODEL_URL}); echo 'Waiting for downloader to finish'; while [ ! -f /models/$LLM_MODEL_FILE ]; do sleep 1; done; echo 'Model downloaded!'"
# Specifies that this service should start after the downloader service has started.
depends_on:
downloader:
condition: service_started
# Mount the same local directory to check for the downloaded model.
volumes:
- ./jan-inference/llm/models:/models
networks:
jan_community:
ipv4_address: 172.20.0.17
# Service to run the Llama web application.
llm:
image: ghcr.io/abetlen/llama-cpp-python:latest
# Mount the directory that contains the downloaded model.
volumes:
- ./jan-inference/llm/models:/models
ports:
- 8000:8000
environment:
# Specify the path to the model for the web application.
MODEL: /models/llama-2-7b-chat.ggmlv3.q4_1.bin
PYTHONUNBUFFERED: 1
# Restart policy configuration
restart: on-failure
# Specifies that this service should start only after wait-for-downloader has completed successfully.
depends_on:
wait-for-downloader:
condition: service_completed_successfully
# Connect this service to two networks: inference_net and traefik_public.
networks:
jan_community:
ipv4_address: 172.20.0.18
networks:
jan_community:
driver: bridge
ipam:
driver: default
config:
- subnet: 172.20.0.0/16