200 lines
5.8 KiB
YAML
200 lines
5.8 KiB
YAML
# docker version
|
|
version: "3"
|
|
|
|
# volumes:
|
|
# keycloak_postgres_data:
|
|
# db_data:
|
|
|
|
services:
|
|
keycloak:
|
|
image: quay.io/keycloak/keycloak:${KEYCLOAK_VERSION-22.0.0}
|
|
command: ["start-dev", "--import-realm", "--http-port", "8088"]
|
|
environment:
|
|
KC_DB: postgres
|
|
KC_DB_URL_HOST: keycloak_postgres
|
|
KC_DB_URL_DATABASE: ${POSTGRES_DB_NAME:-keycloak}
|
|
KC_DB_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
|
|
KC_DB_USERNAME: ${POSTGRES_USERNAME:-postgres}
|
|
KC_DB_SCHEMA: ${KC_DB_SCHEMA:-public}
|
|
KEYCLOAK_ADMIN: ${KEYCLOAK_ADMIN-admin}
|
|
KEYCLOAK_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD-admin}
|
|
volumes:
|
|
- ./conf/keycloak_conf:/opt/keycloak/data/import
|
|
ports:
|
|
- "8088:8088"
|
|
depends_on:
|
|
keycloak_postgres:
|
|
condition: service_healthy
|
|
networks:
|
|
jan_community:
|
|
ipv4_address: 172.20.0.9
|
|
|
|
keycloak_postgres:
|
|
image: postgres:13
|
|
command: postgres -c 'max_connections=200' && postgres -c 'shared_buffers=24MB'
|
|
environment:
|
|
# Environment Variables expecially for Postgres
|
|
POSTGRES_DB: ${POSTGRES_DB_NAME:-keycloak}
|
|
POSTGRES_USER: ${POSTGRES_USERNAME:-postgres}
|
|
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
|
|
PGDATA: /data/postgres
|
|
PGPORT: ${POSTGRES_PORT:-5432}
|
|
healthcheck:
|
|
test: "exit 0"
|
|
# volumes:
|
|
# - keycloak_postgres_data:/data/postgres
|
|
ports:
|
|
- ${POSTGRES_PORT:-5432}:${POSTGRES_PORT:-5432}
|
|
networks:
|
|
jan_community:
|
|
ipv4_address: 172.20.0.10
|
|
|
|
postgres:
|
|
image: postgres:13
|
|
restart: always
|
|
# volumes:
|
|
# - db_data:/var/lib/postgresql/data
|
|
env_file:
|
|
- conf/sample.env_app-backend-postgres
|
|
networks:
|
|
jan_community:
|
|
ipv4_address: 172.20.0.11
|
|
|
|
graphql-engine:
|
|
image: hasura/graphql-engine:v2.31.0.cli-migrations-v3
|
|
ports:
|
|
- "8080:8080"
|
|
restart: always
|
|
env_file:
|
|
- conf/sample.env_app-backend
|
|
volumes:
|
|
- ./app-backend/hasura/migrations:/migrations
|
|
- ./app-backend/hasura/metadata:/metadata
|
|
depends_on:
|
|
data-connector-agent:
|
|
condition: service_healthy
|
|
networks:
|
|
jan_community:
|
|
ipv4_address: 172.20.0.12
|
|
|
|
worker:
|
|
build:
|
|
context: ./app-backend
|
|
dockerfile: ./worker/Dockerfile
|
|
restart: always
|
|
environment:
|
|
- "NODE_ENV=development"
|
|
volumes:
|
|
- .:/worker
|
|
ports:
|
|
- "8787:8787"
|
|
networks:
|
|
jan_community:
|
|
ipv4_address: 172.20.0.13
|
|
|
|
data-connector-agent:
|
|
image: hasura/graphql-data-connector:v2.31.0
|
|
restart: always
|
|
ports:
|
|
- 8081:8081
|
|
environment:
|
|
QUARKUS_LOG_LEVEL: ERROR # FATAL, ERROR, WARN, INFO, DEBUG, TRACE
|
|
## https://quarkus.io/guides/opentelemetry#configuration-reference
|
|
QUARKUS_OPENTELEMETRY_ENABLED: "false"
|
|
## QUARKUS_OPENTELEMETRY_TRACER_EXPORTER_OTLP_ENDPOINT: http://jaeger:4317
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:8081/api/v1/athena/health"]
|
|
interval: 5s
|
|
timeout: 10s
|
|
retries: 5
|
|
start_period: 5s
|
|
|
|
networks:
|
|
jan_community:
|
|
ipv4_address: 172.20.0.14
|
|
|
|
web:
|
|
build:
|
|
context: ./web-client
|
|
dockerfile: ./dev.Dockerfile
|
|
container_name: jan_web
|
|
restart: always
|
|
env_file:
|
|
- conf/sample.env_web-client
|
|
ports:
|
|
- "3000:3000"
|
|
environment:
|
|
NODE_ENV: development
|
|
|
|
networks:
|
|
jan_community:
|
|
ipv4_address: 172.20.0.15
|
|
|
|
# Service to download a model file.
|
|
downloader:
|
|
image: busybox
|
|
# The command extracts the model filename from MODEL_URL and downloads it if it doesn't exist.
|
|
command: /bin/sh -c "LLM_MODEL_FILE=$(basename ${MODEL_URL}); if [ ! -f /models/$LLM_MODEL_FILE ]; then wget -O /models/$LLM_MODEL_FILE ${MODEL_URL}; fi"
|
|
# Mount a local directory to store the downloaded model.
|
|
volumes:
|
|
- ./jan-inference/llm/models:/models
|
|
|
|
networks:
|
|
jan_community:
|
|
ipv4_address: 172.20.0.16
|
|
|
|
# Service to wait for the downloader service to finish downloading the model.
|
|
wait-for-downloader:
|
|
image: busybox
|
|
# The command waits until the model file (specified in MODEL_URL) exists.
|
|
command: /bin/sh -c "LLM_MODEL_FILE=$(basename ${MODEL_URL}); echo 'Waiting for downloader to finish'; while [ ! -f /models/$LLM_MODEL_FILE ]; do sleep 1; done; echo 'Model downloaded!'"
|
|
# Specifies that this service should start after the downloader service has started.
|
|
depends_on:
|
|
downloader:
|
|
condition: service_started
|
|
# Mount the same local directory to check for the downloaded model.
|
|
volumes:
|
|
- ./jan-inference/llm/models:/models
|
|
|
|
networks:
|
|
jan_community:
|
|
ipv4_address: 172.20.0.17
|
|
|
|
# Service to run the Llama web application.
|
|
llm:
|
|
image: ghcr.io/abetlen/llama-cpp-python:latest
|
|
# Mount the directory that contains the downloaded model.
|
|
volumes:
|
|
- ./jan-inference/llm/models:/models
|
|
ports:
|
|
- 8000:8000
|
|
environment:
|
|
# Specify the path to the model for the web application.
|
|
MODEL: /models/llama-2-7b-chat.ggmlv3.q4_1.bin
|
|
PYTHONUNBUFFERED: 1
|
|
# Health check configuration
|
|
# healthcheck:
|
|
# test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8000"]
|
|
# interval: 30s
|
|
# timeout: 10s
|
|
# retries: 3
|
|
# start_period: 30s
|
|
# Restart policy configuration
|
|
restart: on-failure
|
|
# Specifies that this service should start only after wait-for-downloader has completed successfully.
|
|
depends_on:
|
|
wait-for-downloader:
|
|
condition: service_completed_successfully
|
|
# Connect this service to two networks: inference_net and traefik_public.
|
|
|
|
networks:
|
|
jan_community:
|
|
ipv4_address: 172.20.0.18
|
|
|
|
networks:
|
|
jan_community:
|
|
driver: bridge
|
|
ipam:
|
|
driver: default
|
|
config:
|
|
- subnet: 172.20.0.0/16 |