mirror of
https://github.com/StanGirard/quivr.git
synced 2024-11-22 11:33:57 +03:00
a1c0067f50
# Description I tried to use the Quivr with the docker-compose-ollama.yml option but the docker compose build crashed with the following messages: `{"code":"58P01","details":null,"hint":null,"message":"could not access file \"$libdir/wrappers-0.1.19\": No such file or directory"}` After a quick search I saw that the issue was related to the supabase/postgres image and after a quick compare with the default docker-compose.yml i saw that it uses a newer version for the supabase/postgres image and when i tried the newer version with the docker-compose-ollama.yml the errors stopped and the app works fine. Relative issues: #1846 #1822 #1850
505 lines
15 KiB
YAML
505 lines
15 KiB
YAML
version: "3.8"
|
|
|
|
services:
|
|
frontend:
|
|
pull_policy: never
|
|
build:
|
|
context: frontend
|
|
dockerfile: Dockerfile
|
|
args:
|
|
- NEXT_PUBLIC_ENV=local
|
|
- NEXT_PUBLIC_BACKEND_URL=${NEXT_PUBLIC_BACKEND_URL}
|
|
- NEXT_PUBLIC_SUPABASE_URL=${NEXT_PUBLIC_SUPABASE_URL}
|
|
- NEXT_PUBLIC_SUPABASE_ANON_KEY=${NEXT_PUBLIC_SUPABASE_ANON_KEY}
|
|
- NEXT_PUBLIC_CMS_URL=${NEXT_PUBLIC_CMS_URL}
|
|
- NEXT_PUBLIC_FRONTEND_URL=${NEXT_PUBLIC_FRONTEND_URL}
|
|
container_name: web
|
|
depends_on:
|
|
- backend-core
|
|
restart: always
|
|
ports:
|
|
- 3000:3000
|
|
|
|
|
|
backend-core:
|
|
image: backend-base
|
|
pull_policy: never
|
|
env_file:
|
|
- .env
|
|
build:
|
|
context: backend
|
|
dockerfile: Dockerfile
|
|
container_name: backend-core
|
|
command:
|
|
- "uvicorn"
|
|
- "main:app"
|
|
- "--host"
|
|
- "0.0.0.0"
|
|
- "--port"
|
|
- "5050"
|
|
- "--workers"
|
|
- "1"
|
|
restart: always
|
|
depends_on:
|
|
db:
|
|
condition: service_healthy
|
|
kong:
|
|
condition: service_healthy
|
|
ports:
|
|
- 5050:5050
|
|
|
|
redis:
|
|
image: redis:latest
|
|
container_name: redis
|
|
restart: always
|
|
ports:
|
|
- 6379:6379
|
|
|
|
worker:
|
|
pull_policy: never
|
|
image: backend-base
|
|
env_file:
|
|
- .env
|
|
build:
|
|
context: backend
|
|
dockerfile: Dockerfile
|
|
container_name: worker
|
|
command: celery -A celery_worker worker -l info
|
|
restart: always
|
|
depends_on:
|
|
- redis
|
|
|
|
beat:
|
|
image: backend-base
|
|
pull_policy: never
|
|
env_file:
|
|
- .env
|
|
build:
|
|
context: backend
|
|
dockerfile: Dockerfile
|
|
container_name: beat
|
|
command: celery -A celery_worker beat -l info
|
|
restart: always
|
|
depends_on:
|
|
- redis
|
|
|
|
flower:
|
|
image: backend-base
|
|
pull_policy: never
|
|
env_file:
|
|
- .env
|
|
build:
|
|
context: backend
|
|
dockerfile: Dockerfile
|
|
container_name: flower
|
|
command: celery -A celery_worker flower -l info --port=5555
|
|
restart: always
|
|
depends_on:
|
|
- redis
|
|
- worker
|
|
- beat
|
|
ports:
|
|
- 5555:5555
|
|
|
|
studio:
|
|
container_name: supabase-studio
|
|
image: supabase/studio:20231123-64a766a
|
|
restart: unless-stopped
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"node",
|
|
"-e",
|
|
"require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})"
|
|
]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 3
|
|
depends_on:
|
|
analytics:
|
|
condition: service_healthy
|
|
environment:
|
|
STUDIO_PG_META_URL: http://meta:8080
|
|
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
|
|
|
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
|
|
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
|
|
|
|
SUPABASE_URL: http://kong:8000
|
|
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
|
|
SUPABASE_ANON_KEY: ${ANON_KEY}
|
|
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
|
|
|
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
|
LOGFLARE_URL: http://analytics:4000
|
|
NEXT_PUBLIC_ENABLE_LOGS: true
|
|
# Comment to use Big Query backend for analytics
|
|
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
|
|
# Uncomment to use Big Query backend for analytics
|
|
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
|
|
|
|
kong:
|
|
container_name: supabase-kong
|
|
image: kong:2.8.1
|
|
restart: unless-stopped
|
|
# https://unix.stackexchange.com/a/294837
|
|
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
|
|
ports:
|
|
- ${KONG_HTTP_PORT}:8000/tcp
|
|
- ${KONG_HTTPS_PORT}:8443/tcp
|
|
depends_on:
|
|
analytics:
|
|
condition: service_healthy
|
|
environment:
|
|
KONG_DATABASE: "off"
|
|
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
|
|
# https://github.com/supabase/cli/issues/14
|
|
KONG_DNS_ORDER: LAST,A,CNAME
|
|
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
|
|
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
|
|
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
|
|
SUPABASE_ANON_KEY: ${ANON_KEY}
|
|
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
|
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
|
|
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
|
|
volumes:
|
|
# https://github.com/supabase/supabase/issues/12661
|
|
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro
|
|
|
|
auth:
|
|
container_name: supabase-auth
|
|
image: supabase/gotrue:v2.99.0
|
|
depends_on:
|
|
db:
|
|
# Disable this if you are using an external Postgres database
|
|
condition: service_healthy
|
|
analytics:
|
|
condition: service_healthy
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"wget",
|
|
"--no-verbose",
|
|
"--tries=1",
|
|
"--spider",
|
|
"http://localhost:9999/health"
|
|
]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 3
|
|
restart: unless-stopped
|
|
environment:
|
|
GOTRUE_API_HOST: 0.0.0.0
|
|
GOTRUE_API_PORT: 9999
|
|
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
|
|
|
|
GOTRUE_DB_DRIVER: postgres
|
|
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
|
|
|
GOTRUE_SITE_URL: ${SITE_URL}
|
|
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
|
|
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
|
|
|
|
GOTRUE_JWT_ADMIN_ROLES: service_role
|
|
GOTRUE_JWT_AUD: authenticated
|
|
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
|
|
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
|
|
GOTRUE_JWT_SECRET: ${JWT_SECRET}
|
|
|
|
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
|
|
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
|
|
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
|
|
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
|
|
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
|
|
GOTRUE_SMTP_HOST: ${SMTP_HOST}
|
|
GOTRUE_SMTP_PORT: ${SMTP_PORT}
|
|
GOTRUE_SMTP_USER: ${SMTP_USER}
|
|
GOTRUE_SMTP_PASS: ${SMTP_PASS}
|
|
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
|
|
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
|
|
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
|
|
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
|
|
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
|
|
|
|
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
|
|
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
|
|
|
|
rest:
|
|
container_name: supabase-rest
|
|
image: postgrest/postgrest:v11.2.2
|
|
depends_on:
|
|
db:
|
|
# Disable this if you are using an external Postgres database
|
|
condition: service_healthy
|
|
analytics:
|
|
condition: service_healthy
|
|
restart: unless-stopped
|
|
environment:
|
|
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
|
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
|
|
PGRST_DB_ANON_ROLE: anon
|
|
PGRST_JWT_SECRET: ${JWT_SECRET}
|
|
PGRST_DB_USE_LEGACY_GUCS: "false"
|
|
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
|
|
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
|
|
command: "postgrest"
|
|
|
|
realtime:
|
|
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
|
|
container_name: realtime-dev.supabase-realtime
|
|
image: supabase/realtime:v2.25.35
|
|
depends_on:
|
|
db:
|
|
# Disable this if you are using an external Postgres database
|
|
condition: service_healthy
|
|
analytics:
|
|
condition: service_healthy
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"bash",
|
|
"-c",
|
|
"printf \\0 > /dev/tcp/localhost/4000"
|
|
]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 3
|
|
restart: unless-stopped
|
|
environment:
|
|
PORT: 4000
|
|
DB_HOST: ${POSTGRES_HOST}
|
|
DB_PORT: ${POSTGRES_PORT}
|
|
DB_USER: supabase_admin
|
|
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
|
DB_NAME: ${POSTGRES_DB}
|
|
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
|
|
DB_ENC_KEY: supabaserealtime
|
|
API_JWT_SECRET: ${JWT_SECRET}
|
|
FLY_ALLOC_ID: fly123
|
|
FLY_APP_NAME: realtime
|
|
SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
|
ERL_AFLAGS: -proto_dist inet_tcp
|
|
ENABLE_TAILSCALE: "false"
|
|
DNS_NODES: "''"
|
|
command: >
|
|
sh -c "/app/bin/migrate && /app/bin/realtime eval 'Realtime.Release.seeds(Realtime.Repo)' && /app/bin/server"
|
|
|
|
storage:
|
|
container_name: supabase-storage
|
|
image: supabase/storage-api:v0.43.11
|
|
depends_on:
|
|
db:
|
|
# Disable this if you are using an external Postgres database
|
|
condition: service_healthy
|
|
rest:
|
|
condition: service_started
|
|
imgproxy:
|
|
condition: service_started
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"wget",
|
|
"--no-verbose",
|
|
"--tries=1",
|
|
"--spider",
|
|
"http://localhost:5000/status"
|
|
]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 3
|
|
restart: unless-stopped
|
|
environment:
|
|
ANON_KEY: ${ANON_KEY}
|
|
SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
|
POSTGREST_URL: http://rest:3000
|
|
PGRST_JWT_SECRET: ${JWT_SECRET}
|
|
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
|
FILE_SIZE_LIMIT: 52428800
|
|
STORAGE_BACKEND: file
|
|
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
|
|
TENANT_ID: stub
|
|
# TODO: https://github.com/supabase/storage-api/issues/55
|
|
REGION: stub
|
|
GLOBAL_S3_BUCKET: stub
|
|
ENABLE_IMAGE_TRANSFORMATION: "true"
|
|
IMGPROXY_URL: http://imgproxy:5001
|
|
volumes:
|
|
- ./volumes/storage:/var/lib/storage:z
|
|
|
|
imgproxy:
|
|
container_name: supabase-imgproxy
|
|
image: darthsim/imgproxy:v3.8.0
|
|
healthcheck:
|
|
test: [ "CMD", "imgproxy", "health" ]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 3
|
|
environment:
|
|
IMGPROXY_BIND: ":5001"
|
|
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
|
|
IMGPROXY_USE_ETAG: "true"
|
|
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
|
|
volumes:
|
|
- ./volumes/storage:/var/lib/storage:z
|
|
|
|
meta:
|
|
container_name: supabase-meta
|
|
image: supabase/postgres-meta:v0.68.0
|
|
depends_on:
|
|
db:
|
|
# Disable this if you are using an external Postgres database
|
|
condition: service_healthy
|
|
analytics:
|
|
condition: service_healthy
|
|
restart: unless-stopped
|
|
environment:
|
|
PG_META_PORT: 8080
|
|
PG_META_DB_HOST: ${POSTGRES_HOST}
|
|
PG_META_DB_PORT: ${POSTGRES_PORT}
|
|
PG_META_DB_NAME: ${POSTGRES_DB}
|
|
PG_META_DB_USER: supabase_admin
|
|
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
|
|
|
|
functions:
|
|
container_name: supabase-edge-functions
|
|
image: supabase/edge-runtime:v1.22.4
|
|
restart: unless-stopped
|
|
depends_on:
|
|
analytics:
|
|
condition: service_healthy
|
|
environment:
|
|
JWT_SECRET: ${JWT_SECRET}
|
|
SUPABASE_URL: http://kong:8000
|
|
SUPABASE_ANON_KEY: ${ANON_KEY}
|
|
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
|
|
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
|
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
|
|
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
|
|
volumes:
|
|
- ./volumes/functions:/home/deno/functions:Z
|
|
command:
|
|
- start
|
|
- --main-service
|
|
- /home/deno/functions/main
|
|
|
|
analytics:
|
|
container_name: supabase-analytics
|
|
image: supabase/logflare:1.4.0
|
|
healthcheck:
|
|
test: [ "CMD", "curl", "http://localhost:4000/health" ]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 10
|
|
restart: unless-stopped
|
|
depends_on:
|
|
db:
|
|
# Disable this if you are using an external Postgres database
|
|
condition: service_healthy
|
|
# Uncomment to use Big Query backend for analytics
|
|
# volumes:
|
|
# - type: bind
|
|
# source: ${PWD}/gcloud.json
|
|
# target: /opt/app/rel/logflare/bin/gcloud.json
|
|
# read_only: true
|
|
environment:
|
|
LOGFLARE_NODE_HOST: 127.0.0.1
|
|
DB_USERNAME: supabase_admin
|
|
DB_DATABASE: ${POSTGRES_DB}
|
|
DB_HOSTNAME: ${POSTGRES_HOST}
|
|
DB_PORT: ${POSTGRES_PORT}
|
|
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
|
DB_SCHEMA: _analytics
|
|
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
|
LOGFLARE_SINGLE_TENANT: true
|
|
LOGFLARE_SUPABASE_MODE: true
|
|
LOGFLARE_MIN_CLUSTER_SIZE: 1
|
|
RELEASE_COOKIE: cookie
|
|
|
|
# Comment variables to use Big Query backend for analytics
|
|
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
|
POSTGRES_BACKEND_SCHEMA: _analytics
|
|
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
|
|
|
|
# Uncomment to use Big Query backend for analytics
|
|
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
|
|
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
|
|
ports:
|
|
- 4000:4000
|
|
entrypoint: |
|
|
sh -c `cat <<'EOF' > run.sh && sh run.sh
|
|
./logflare eval Logflare.Release.migrate
|
|
./logflare start --sname logflare
|
|
EOF
|
|
`
|
|
|
|
# Comment out everything below this point if you are using an external Postgres database
|
|
db:
|
|
container_name: supabase-db
|
|
image: supabase/postgres:15.1.0.136
|
|
healthcheck:
|
|
test: pg_isready -U postgres -h localhost
|
|
interval: 5s
|
|
timeout: 5s
|
|
retries: 10
|
|
depends_on:
|
|
vector:
|
|
condition: service_healthy
|
|
command:
|
|
- postgres
|
|
- -c
|
|
- config_file=/etc/postgresql/postgresql.conf
|
|
- -c
|
|
- log_min_messages=fatal # prevents Realtime polling queries from appearing in logs
|
|
restart: unless-stopped
|
|
ports:
|
|
# Pass down internal port because it's set dynamically by other services
|
|
- ${POSTGRES_PORT}:${POSTGRES_PORT}
|
|
environment:
|
|
POSTGRES_HOST: /var/run/postgresql
|
|
PGPORT: ${POSTGRES_PORT}
|
|
POSTGRES_PORT: ${POSTGRES_PORT}
|
|
PGPASSWORD: ${POSTGRES_PASSWORD}
|
|
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
|
PGDATABASE: ${POSTGRES_DB}
|
|
POSTGRES_DB: ${POSTGRES_DB}
|
|
JWT_SECRET: ${JWT_SECRET}
|
|
JWT_EXP: ${JWT_EXPIRY}
|
|
volumes:
|
|
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
|
|
# Must be superuser to create event trigger
|
|
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
|
|
# Must be superuser to alter reserved role
|
|
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
|
|
# Initialize the database settings with JWT_SECRET and JWT_EXP
|
|
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
|
|
# PGDATA directory is persisted between restarts
|
|
- ./volumes/db/data:/var/lib/postgresql/data:Z
|
|
# Changes required for Analytics support
|
|
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
|
|
- ./scripts/tables-ollama.sql:/docker-entrypoint-initdb.d/seed.sql
|
|
|
|
vector:
|
|
container_name: supabase-vector
|
|
image: timberio/vector:0.28.1-alpine
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"wget",
|
|
"--no-verbose",
|
|
"--tries=1",
|
|
"--spider",
|
|
"http://vector:9001/health"
|
|
]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 3
|
|
volumes:
|
|
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro
|
|
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro
|
|
|
|
command: [ "--config", "etc/vector/vector.yml" ] |