diff --git a/.backend_env.example b/.backend_env.example deleted file mode 100644 index 02dfeca44..000000000 --- a/.backend_env.example +++ /dev/null @@ -1,29 +0,0 @@ -SUPABASE_URL= -SUPABASE_SERVICE_KEY= -PG_DATABASE_URL=notimplementedyet -OPENAI_API_KEY= -ANTHROPIC_API_KEY=null -JWT_SECRET_KEY= -AUTHENTICATE=true -GOOGLE_APPLICATION_CREDENTIALS= -GOOGLE_CLOUD_PROJECT= -CELERY_BROKER_URL=redis://redis:6379/0 -CELEBRY_BROKER_QUEUE_NAME=quivr-preview.fifo - -#LOCAL -#OLLAMA_API_BASE_URL=http://host.docker.internal:11434 # local all in one remove comment to use local llm with Ollama - - - -#RESEND -RESEND_API_KEY= -RESEND_EMAIL_ADDRESS=onboarding@resend.dev -RESEND_CONTACT_SALES_FROM=contact_sales@resend.dev -RESEND_CONTACT_SALES_TO= - -CRAWL_DEPTH=1 - - -PREMIUM_MAX_BRAIN_NUMBER=30 -PREMIUM_MAX_BRAIN_SIZE=10000000 -PREMIUM_DAILY_CHAT_CREDIT=100 diff --git a/.env.example b/.env.example new file mode 100644 index 000000000..1f2cd8191 --- /dev/null +++ b/.env.example @@ -0,0 +1,163 @@ +#### QUIVR Configuration + + +OPENAI_API_KEY=CHANGE_ME + + + + +# This file is used to configure the Quivr stack. It is used by the `docker-compose.yml` file to configure the stack. + +######## +# FRONTEND +######## + +NEXT_PUBLIC_ENV=local +NEXT_PUBLIC_BACKEND_URL=http://localhost:5050 +NEXT_PUBLIC_SUPABASE_URL=http://localhost:8000 +NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE +NEXT_PUBLIC_CMS_URL=https://cms.quivr.app +NEXT_PUBLIC_FRONTEND_URL=http://localhost:* + + + +######## +# BACKEND +######## + +SUPABASE_URL=http://kong:8000 +SUPABASE_SERVICE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q +PG_DATABASE_URL=notimplementedyet +ANTHROPIC_API_KEY=null +JWT_SECRET_KEY=your-super-secret-jwt-token-with-at-least-32-characters-long +AUTHENTICATE=true +GOOGLE_APPLICATION_CREDENTIALS= +GOOGLE_CLOUD_PROJECT= +CELERY_BROKER_URL=redis://redis:6379/0 +CELEBRY_BROKER_QUEUE_NAME=quivr-preview.fifo + +#LOCAL +#OLLAMA_API_BASE_URL=http://host.docker.internal:11434 # local all in one remove comment to use local llm with Ollama + + +#RESEND +RESEND_API_KEY= +RESEND_EMAIL_ADDRESS=onboarding@resend.dev +RESEND_CONTACT_SALES_FROM=contact_sales@resend.dev +RESEND_CONTACT_SALES_TO= + +CRAWL_DEPTH=1 + + +PREMIUM_MAX_BRAIN_NUMBER=30 +PREMIUM_MAX_BRAIN_SIZE=10000000 +PREMIUM_DAILY_CHAT_CREDIT=100 + + + +###### SUPABASE CONFIGURATION ###### + +############ +# Secrets +# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION +############ + +POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password +JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long +ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE +SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q +DASHBOARD_USERNAME=admin +DASHBOARD_PASSWORD=admin + +############ +# Database - You can change these to any PostgreSQL database that has logical replication enabled. +############ + +POSTGRES_HOST=db +POSTGRES_DB=postgres +POSTGRES_PORT=5432 +# default user is postgres + +############ +# API Proxy - Configuration for the Kong Reverse proxy. +############ + +KONG_HTTP_PORT=8000 +KONG_HTTPS_PORT=8443 + + +############ +# API - Configuration for PostgREST. +############ + +PGRST_DB_SCHEMAS=public,storage,graphql_public + + +############ +# Auth - Configuration for the GoTrue authentication server. +############ + +## General +SITE_URL=http://localhost:3001 +ADDITIONAL_REDIRECT_URLS= +JWT_EXPIRY=3600 +DISABLE_SIGNUP=false +API_EXTERNAL_URL=http://localhost:8000 + +## Mailer Config +MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify" +MAILER_URLPATHS_INVITE="/auth/v1/verify" +MAILER_URLPATHS_RECOVERY="/auth/v1/verify" +MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify" + +## Email auth +ENABLE_EMAIL_SIGNUP=true +ENABLE_EMAIL_AUTOCONFIRM=false +SMTP_ADMIN_EMAIL=admin@example.com +SMTP_HOST=supabase-mail +SMTP_PORT=2500 +SMTP_USER=fake_mail_user +SMTP_PASS=fake_mail_password +SMTP_SENDER_NAME=fake_sender + +## Phone auth +ENABLE_PHONE_SIGNUP=true +ENABLE_PHONE_AUTOCONFIRM=true + + +############ +# Studio - Configuration for the Dashboard +############ + +STUDIO_DEFAULT_ORGANIZATION=Default Organization +STUDIO_DEFAULT_PROJECT=Default Project + +STUDIO_PORT=3001 +# replace if you intend to use Studio outside of localhost +SUPABASE_PUBLIC_URL=http://localhost:8000 + +# Enable webp support +IMGPROXY_ENABLE_WEBP_DETECTION=true + +############ +# Functions - Configuration for Functions +############ +# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet. +FUNCTIONS_VERIFY_JWT=false + +############ +# Logs - Configuration for Logflare +# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction +############ + +LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key + +# Change vector.toml sinks to reflect this change +LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key + +# Docker socket location - this value will differ depending on your OS +DOCKER_SOCKET_LOCATION=/var/run/docker.sock + +# Google Cloud Project details +GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID +GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER \ No newline at end of file diff --git a/.frontend_env.example b/.frontend_env.example deleted file mode 100644 index d7c4da11c..000000000 --- a/.frontend_env.example +++ /dev/null @@ -1,23 +0,0 @@ -NEXT_PUBLIC_ENV=local -NEXT_PUBLIC_BACKEND_URL=http://localhost:5050 -NEXT_PUBLIC_FRONTEND_URL=http://localhost:* -NEXT_PUBLIC_SUPABASE_URL= -NEXT_PUBLIC_SUPABASE_ANON_KEY= - -NEXT_PUBLIC_GROWTHBOOK_CLIENT_KEY= -NEXT_PUBLIC_GROWTHBOOK_URL= - -NEXT_PUBLIC_JUNE_API_KEY= -NEXT_PUBLIC_GA_ID= - -NEXT_PUBLIC_E2E_URL=http://localhost:3003 -NEXT_PUBLIC_E2E_EMAIL= -NEXT_PUBLIC_E2E_PASSWORD= - -NEXT_PUBLIC_CMS_URL=https://cms.quivr.app - -NEXT_PUBLIC_STRIPE_PRICING_TABLE_ID= -NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY= -NEXT_PUBLIC_STRIPE_MANAGE_PLAN_URL= - -NEXT_PUBLIC_AUTH_MODES=magic_link,password,google_sso \ No newline at end of file diff --git a/.gitignore b/.gitignore index 03a102736..66787ddfe 100644 --- a/.gitignore +++ b/.gitignore @@ -59,7 +59,6 @@ backend/core/application_default_credentials.json backend/core/local_models/* ## scripts -.migration_info package-lock.json backend/celerybeat-schedule frontend/public/robots.txt @@ -71,3 +70,5 @@ backend/lib/* backend/pyvenv.cfg backend/share/* backend/slim.report.json +volumes/db/data/ +volumes/storage/stub/stub/quivr/* diff --git a/.migration_info b/.migration_info new file mode 100644 index 000000000..67c1535bc --- /dev/null +++ b/.migration_info @@ -0,0 +1,5 @@ +DB_HOST=localhost +DB_NAME=postgres +DB_PORT=5432 +DB_USER=postgres +DB_PASSWORD=your-super-secret-and-long-postgres-password diff --git a/README.md b/README.md index 379abb93d..f5c47ee91 100644 --- a/README.md +++ b/README.md @@ -43,16 +43,9 @@ Ensure you have the following installed: - Docker - Docker Compose -Additionally, you'll need a [Supabase](https://supabase.com/) account for: - -- Creating a new Supabase project -- Supabase Project API key -- Supabase Project URL ### Installation Steps 💽 -- **Step 0**: If needed, the installation is explained on Youtube [here](https://youtu.be/rC-s4QdfY80) - - **Step 1**: Clone the repository using **one** of these commands: - If you don't have an SSH key set up: @@ -67,46 +60,41 @@ Additionally, you'll need a [Supabase](https://supabase.com/) account for: git clone git@github.com:StanGirard/Quivr.git && cd Quivr ``` -- **Step 2**: Use the install helper script to automate subsequent steps. - You can use the install_helper.sh script to setup your env files and execute the migrations. - - prerequisites: +- **Step 2**: Install prerequisites: ```bash brew install gum # Windows (via Scoop) scoop install charm-gum brew install postgresql # Windows (via Scoop) scoop install postgresql ``` - ```bash - chmod +x install_helper.sh - ./install_helper.sh - ``` - If you want to manually set up the environment, follow the steps below, otherwise skip to Step 6. -- **Step 2 - Bis**: Copy the `.XXXXX_env` files +- **Step 3**: Copy the `.env.example` files ```bash - cp .backend_env.example backend/.env - cp .frontend_env.example frontend/.env + cp .env.example .env ``` -- **Step 3**: Update the `backend/.env` and `frontend/.env` file +- **Step 4**: Update the `.env` files - > _Your `supabase_service_key` can be found in your Supabase dashboard under Project Settings -> API. Use the `anon` `public` key found in the `Project API keys` section._ +Update **OPENAI_API_KEY** in the `.env` file. - > _Your `JWT_SECRET_KEY` can be found in your supabase settings under Project Settings -> API -> JWT Settings -> JWT Secret_ +You just need to update the `OPENAI_API_KEY` variable in the `.env` file. You can get your API key [here](https://platform.openai.com/api-keys). You need to create an account first. And put your credit card information. Don't worry, you won't be charged unless you use the API. You can find more information about the pricing [here](https://openai.com/pricing/). - > _The `NEXT_PUBLIC_BACKEND_URL` is set to localhost:5050 for the docker. Update it if you are running the backend on a different machine._ +- **Step 5**: Launch the db - - Change variables in `backend/.env` - - Change variables in `frontend/.env` + ```bash + docker compose pull + + docker compose up db -d + ``` -- **Step 4**: Use the `migration.sh` script to run the migration scripts +- **Step 6**: Use the `migration.sh` script to run the migration scripts ```bash chmod +x migration.sh ./migration.sh + # Select 1) Create all tables ``` Choose either `Create all tables` if it's your first time or `Run migrations` @@ -117,23 +105,20 @@ Additionally, you'll need a [Supabase](https://supabase.com/) account for: All the scripts can be found in the [scripts](scripts/) folder - > _If you come from an old version of Quivr, run the scripts in [migration script](scripts/) to migrate your data to the new version in the order of date_ -- **Step 5**: Launch the app +- **Step 7**: Launch the app ```bash docker compose up --build ``` -- **Step 6**: Navigate to `localhost:3000` in your browser +- **Step 8**: Navigate to `localhost:8000` in your browser - Once Quivr is running, you need to create an account to access the app. Go to supabase and create a new user in the `Users` section. You can then log in with your new user. +Once Quivr is running, you need to create an account to access the app. User is `admin` and password is `admin`. Go to [http://localhost:8000/project/default/auth/users](http://localhost:8000/project/default/auth/users) and create a new user in the `Users` section. You can then log in with your new user. -- **Step 7**: Want to contribute to the project? +- **Step 9**: Login to the app - ``` - docker compose -f docker-compose.dev.yml up --build - ``` +You can now sign in to the app with your new user. You can access the app at [http://localhost:3000/login](http://localhost:3000/login). ## Contributors ✨ diff --git a/docker-compose.local.yml b/docker-compose.local.yml deleted file mode 100644 index fa768f895..000000000 --- a/docker-compose.local.yml +++ /dev/null @@ -1,112 +0,0 @@ -version: "3" - -services: - traefik: - image: traefik:v2.7 - container_name: traefik - command: - - "--api.insecure=true" - - "--providers.docker=true" - - "--providers.docker.exposedbydefault=false" - - "--entrypoints.web.address=:80" - - "--entrypoints.websecure.address=:443" - - "--certificatesresolvers.myresolver.acme.tlschallenge=true" - - "--certificatesresolvers.myresolver.acme.email=${EMAIL}" - - "--certificatesresolvers.myresolver.acme.storage=/letsencrypt/acme.json" - ports: - - "80:80" - - "443:443" - volumes: - - "/var/run/docker.sock:/var/run/docker.sock:ro" - - "./letsencrypt:/letsencrypt" - restart: always - env_file: - - .env - - frontend: - env_file: - - ./frontend/.env - build: - context: frontend - dockerfile: Dockerfile - container_name: web - restart: always - labels: - - "traefik.enable=true" - - "traefik.http.routers.frontend.rule=Host(`${DOMAIN_NAME}`)" - - "traefik.http.routers.frontend.entrypoints=websecure" - - "traefik.http.routers.frontend.tls.certresolver=myresolver" - depends_on: - - traefik - - backend-core: - env_file: - - ./backend/.env - build: - context: backend - dockerfile: Dockerfile - container_name: backend-core - restart: always - volumes: - - ./backend/:/code/ - depends_on: - - redis - - worker - - beat - - traefik - labels: - - "traefik.enable=true" - - "traefik.http.routers.backend-core.rule=Host(`${API_DOMAIN_NAME}`)" - - "traefik.http.routers.backend-core.entrypoints=websecure" - - "traefik.http.routers.backend-core.tls.certresolver=myresolver" - - "traefik.http.services.backend-core.loadbalancer.server.port=5050" - - redis: - image: redis:latest - container_name: redis - restart: always - - worker: - env_file: - - ./backend/.env - build: - context: backend - dockerfile: Dockerfile - container_name: worker - command: celery -A celery_worker worker -l info - restart: always - depends_on: - - redis - - beat: - env_file: - - ./backend/.env - build: - context: backend - dockerfile: Dockerfile - container_name: beat - command: celery -A celery_worker beat -l info - restart: always - depends_on: - - redis - - flower: - env_file: - - ./backend/.env - build: - context: backend - dockerfile: Dockerfile - container_name: flower - command: celery -A celery_worker flower -l info --port=5555 - restart: always - depends_on: - - redis - - worker - - beat - labels: - - "traefik.enable=true" - - "traefik.http.routers.flower.rule=Host(`flower.${API_DOMAIN_NAME}`)" - - "traefik.http.routers.flower.entrypoints=websecure" - - "traefik.http.routers.flower.tls.certresolver=myresolver" - - "traefik.http.services.flower.loadbalancer.server.port=5555" - diff --git a/docker-compose.supabase.yml b/docker-compose.supabase.yml deleted file mode 100644 index ed1b83b34..000000000 --- a/docker-compose.supabase.yml +++ /dev/null @@ -1,483 +0,0 @@ -version: "3.8" - -services: - frontend: - env_file: - - ./frontend/.env - build: - context: frontend - dockerfile: Dockerfile - container_name: web - restart: always - ports: - - 3000:3000 - - backend-core: - image: backend-base - env_file: - - ./backend/.env - build: - context: backend - dockerfile: Dockerfile - container_name: backend-core - restart: always - depends_on: - - redis - - worker - - beat - ports: - - 5050:5050 - volumes: - - ./backend/:/code/ - - redis: - image: redis:latest - container_name: redis - restart: always - ports: - - 6379:6379 - - worker: - image: backend-base - env_file: - - ./backend/.env - build: - context: backend - dockerfile: Dockerfile - container_name: worker - command: celery -A celery_worker worker -l info - restart: always - depends_on: - - redis - - beat: - image: backend-base - env_file: - - ./backend/.env - build: - context: backend - dockerfile: Dockerfile - container_name: beat - command: celery -A celery_worker beat -l info - restart: always - depends_on: - - redis - - flower: - image: backend-base - env_file: - - ./backend/.env - build: - context: backend - dockerfile: Dockerfile - container_name: flower - command: celery -A celery_worker flower -l info --port=5555 - restart: always - depends_on: - - redis - - worker - - beat - ports: - - 5555:5555 - - studio: - container_name: supabase-studio - image: supabase/studio:20231123-64a766a - restart: unless-stopped - healthcheck: - test: - [ - "CMD", - "node", - "-e", - "require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})" - ] - timeout: 5s - interval: 5s - retries: 3 - depends_on: - analytics: - condition: service_healthy - environment: - STUDIO_PG_META_URL: http://meta:8080 - POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} - - DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION} - DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT} - - SUPABASE_URL: http://kong:8000 - SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL} - SUPABASE_ANON_KEY: ${ANON_KEY} - SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} - - LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} - LOGFLARE_URL: http://analytics:4000 - NEXT_PUBLIC_ENABLE_LOGS: true - # Comment to use Big Query backend for analytics - NEXT_ANALYTICS_BACKEND_PROVIDER: postgres - # Uncomment to use Big Query backend for analytics - # NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery - - kong: - container_name: supabase-kong - image: kong:2.8.1 - restart: unless-stopped - # https://unix.stackexchange.com/a/294837 - entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start' - ports: - - ${KONG_HTTP_PORT}:8000/tcp - - ${KONG_HTTPS_PORT}:8443/tcp - depends_on: - analytics: - condition: service_healthy - environment: - KONG_DATABASE: "off" - KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml - # https://github.com/supabase/cli/issues/14 - KONG_DNS_ORDER: LAST,A,CNAME - KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth - KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k - KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k - SUPABASE_ANON_KEY: ${ANON_KEY} - SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} - DASHBOARD_USERNAME: ${DASHBOARD_USERNAME} - DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD} - volumes: - # https://github.com/supabase/supabase/issues/12661 - - ./volumes/api/kong.yml:/home/kong/temp.yml:ro - - auth: - container_name: supabase-auth - image: supabase/gotrue:v2.99.0 - depends_on: - db: - # Disable this if you are using an external Postgres database - condition: service_healthy - analytics: - condition: service_healthy - healthcheck: - test: - [ - "CMD", - "wget", - "--no-verbose", - "--tries=1", - "--spider", - "http://localhost:9999/health" - ] - timeout: 5s - interval: 5s - retries: 3 - restart: unless-stopped - environment: - GOTRUE_API_HOST: 0.0.0.0 - GOTRUE_API_PORT: 9999 - API_EXTERNAL_URL: ${API_EXTERNAL_URL} - - GOTRUE_DB_DRIVER: postgres - GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} - - GOTRUE_SITE_URL: ${SITE_URL} - GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS} - GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP} - - GOTRUE_JWT_ADMIN_ROLES: service_role - GOTRUE_JWT_AUD: authenticated - GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated - GOTRUE_JWT_EXP: ${JWT_EXPIRY} - GOTRUE_JWT_SECRET: ${JWT_SECRET} - - GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP} - GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM} - # GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true - # GOTRUE_SMTP_MAX_FREQUENCY: 1s - GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL} - GOTRUE_SMTP_HOST: ${SMTP_HOST} - GOTRUE_SMTP_PORT: ${SMTP_PORT} - GOTRUE_SMTP_USER: ${SMTP_USER} - GOTRUE_SMTP_PASS: ${SMTP_PASS} - GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME} - GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE} - GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION} - GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY} - GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE} - - GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP} - GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM} - - rest: - container_name: supabase-rest - image: postgrest/postgrest:v11.2.2 - depends_on: - db: - # Disable this if you are using an external Postgres database - condition: service_healthy - analytics: - condition: service_healthy - restart: unless-stopped - environment: - PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} - PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS} - PGRST_DB_ANON_ROLE: anon - PGRST_JWT_SECRET: ${JWT_SECRET} - PGRST_DB_USE_LEGACY_GUCS: "false" - PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET} - PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY} - command: "postgrest" - - realtime: - # This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain - container_name: realtime-dev.supabase-realtime - image: supabase/realtime:v2.25.35 - depends_on: - db: - # Disable this if you are using an external Postgres database - condition: service_healthy - analytics: - condition: service_healthy - healthcheck: - test: - [ - "CMD", - "bash", - "-c", - "printf \\0 > /dev/tcp/localhost/4000" - ] - timeout: 5s - interval: 5s - retries: 3 - restart: unless-stopped - environment: - PORT: 4000 - DB_HOST: ${POSTGRES_HOST} - DB_PORT: ${POSTGRES_PORT} - DB_USER: supabase_admin - DB_PASSWORD: ${POSTGRES_PASSWORD} - DB_NAME: ${POSTGRES_DB} - DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime' - DB_ENC_KEY: supabaserealtime - API_JWT_SECRET: ${JWT_SECRET} - FLY_ALLOC_ID: fly123 - FLY_APP_NAME: realtime - SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq - ERL_AFLAGS: -proto_dist inet_tcp - ENABLE_TAILSCALE: "false" - DNS_NODES: "''" - command: > - sh -c "/app/bin/migrate && /app/bin/realtime eval 'Realtime.Release.seeds(Realtime.Repo)' && /app/bin/server" - - storage: - container_name: supabase-storage - image: supabase/storage-api:v0.43.11 - depends_on: - db: - # Disable this if you are using an external Postgres database - condition: service_healthy - rest: - condition: service_started - imgproxy: - condition: service_started - healthcheck: - test: - [ - "CMD", - "wget", - "--no-verbose", - "--tries=1", - "--spider", - "http://localhost:5000/status" - ] - timeout: 5s - interval: 5s - retries: 3 - restart: unless-stopped - environment: - ANON_KEY: ${ANON_KEY} - SERVICE_KEY: ${SERVICE_ROLE_KEY} - POSTGREST_URL: http://rest:3000 - PGRST_JWT_SECRET: ${JWT_SECRET} - DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} - FILE_SIZE_LIMIT: 52428800 - STORAGE_BACKEND: file - FILE_STORAGE_BACKEND_PATH: /var/lib/storage - TENANT_ID: stub - # TODO: https://github.com/supabase/storage-api/issues/55 - REGION: stub - GLOBAL_S3_BUCKET: stub - ENABLE_IMAGE_TRANSFORMATION: "true" - IMGPROXY_URL: http://imgproxy:5001 - volumes: - - ./volumes/storage:/var/lib/storage:z - - imgproxy: - container_name: supabase-imgproxy - image: darthsim/imgproxy:v3.8.0 - healthcheck: - test: [ "CMD", "imgproxy", "health" ] - timeout: 5s - interval: 5s - retries: 3 - environment: - IMGPROXY_BIND: ":5001" - IMGPROXY_LOCAL_FILESYSTEM_ROOT: / - IMGPROXY_USE_ETAG: "true" - IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION} - volumes: - - ./volumes/storage:/var/lib/storage:z - - meta: - container_name: supabase-meta - image: supabase/postgres-meta:v0.68.0 - depends_on: - db: - # Disable this if you are using an external Postgres database - condition: service_healthy - analytics: - condition: service_healthy - restart: unless-stopped - environment: - PG_META_PORT: 8080 - PG_META_DB_HOST: ${POSTGRES_HOST} - PG_META_DB_PORT: ${POSTGRES_PORT} - PG_META_DB_NAME: ${POSTGRES_DB} - PG_META_DB_USER: supabase_admin - PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD} - - functions: - container_name: supabase-edge-functions - image: supabase/edge-runtime:v1.22.4 - restart: unless-stopped - depends_on: - analytics: - condition: service_healthy - environment: - JWT_SECRET: ${JWT_SECRET} - SUPABASE_URL: http://kong:8000 - SUPABASE_ANON_KEY: ${ANON_KEY} - SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY} - SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} - # TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786 - VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}" - volumes: - - ./volumes/functions:/home/deno/functions:Z - command: - - start - - --main-service - - /home/deno/functions/main - - analytics: - container_name: supabase-analytics - image: supabase/logflare:1.4.0 - healthcheck: - test: [ "CMD", "curl", "http://localhost:4000/health" ] - timeout: 5s - interval: 5s - retries: 10 - restart: unless-stopped - depends_on: - db: - # Disable this if you are using an external Postgres database - condition: service_healthy - # Uncomment to use Big Query backend for analytics - # volumes: - # - type: bind - # source: ${PWD}/gcloud.json - # target: /opt/app/rel/logflare/bin/gcloud.json - # read_only: true - environment: - LOGFLARE_NODE_HOST: 127.0.0.1 - DB_USERNAME: supabase_admin - DB_DATABASE: ${POSTGRES_DB} - DB_HOSTNAME: ${POSTGRES_HOST} - DB_PORT: ${POSTGRES_PORT} - DB_PASSWORD: ${POSTGRES_PASSWORD} - DB_SCHEMA: _analytics - LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} - LOGFLARE_SINGLE_TENANT: true - LOGFLARE_SUPABASE_MODE: true - LOGFLARE_MIN_CLUSTER_SIZE: 1 - RELEASE_COOKIE: cookie - - # Comment variables to use Big Query backend for analytics - POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} - POSTGRES_BACKEND_SCHEMA: _analytics - LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true - - # Uncomment to use Big Query backend for analytics - # GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID} - # GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER} - ports: - - 4000:4000 - entrypoint: | - sh -c `cat <<'EOF' > run.sh && sh run.sh - ./logflare eval Logflare.Release.migrate - ./logflare start --sname logflare - EOF - ` - - # Comment out everything below this point if you are using an external Postgres database - db: - container_name: supabase-db - image: supabase/postgres:15.1.0.117 - healthcheck: - test: pg_isready -U postgres -h localhost - interval: 5s - timeout: 5s - retries: 10 - depends_on: - vector: - condition: service_healthy - command: - - postgres - - -c - - config_file=/etc/postgresql/postgresql.conf - - -c - - log_min_messages=fatal # prevents Realtime polling queries from appearing in logs - restart: unless-stopped - ports: - # Pass down internal port because it's set dynamically by other services - - ${POSTGRES_PORT}:${POSTGRES_PORT} - environment: - POSTGRES_HOST: /var/run/postgresql - PGPORT: ${POSTGRES_PORT} - POSTGRES_PORT: ${POSTGRES_PORT} - PGPASSWORD: ${POSTGRES_PASSWORD} - POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} - PGDATABASE: ${POSTGRES_DB} - POSTGRES_DB: ${POSTGRES_DB} - JWT_SECRET: ${JWT_SECRET} - JWT_EXP: ${JWT_EXPIRY} - volumes: - - ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z - # Must be superuser to create event trigger - - ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z - # Must be superuser to alter reserved role - - ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z - # Initialize the database settings with JWT_SECRET and JWT_EXP - - ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z - # PGDATA directory is persisted between restarts - - ./volumes/db/data:/var/lib/postgresql/data:Z - # Changes required for Analytics support - - ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z - - vector: - container_name: supabase-vector - image: timberio/vector:0.28.1-alpine - healthcheck: - test: - [ - "CMD", - "wget", - "--no-verbose", - "--tries=1", - "--spider", - "http://vector:9001/health" - ] - timeout: 5s - interval: 5s - retries: 3 - volumes: - - ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro - - ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro - - command: [ "--config", "etc/vector/vector.yml" ] \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml index 3c2fa9569..8625ce368 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,21 +1,27 @@ -version: "3" +version: "3.8" services: frontend: - env_file: - - ./frontend/.env build: context: frontend dockerfile: Dockerfile + args: + - NEXT_PUBLIC_ENV=local + - NEXT_PUBLIC_BACKEND_URL=${NEXT_PUBLIC_BACKEND_URL} + - NEXT_PUBLIC_SUPABASE_URL=${NEXT_PUBLIC_SUPABASE_URL} + - NEXT_PUBLIC_SUPABASE_ANON_KEY=${NEXT_PUBLIC_SUPABASE_ANON_KEY} + - NEXT_PUBLIC_CMS_URL=${NEXT_PUBLIC_CMS_URL} + - NEXT_PUBLIC_FRONTEND_URL=${NEXT_PUBLIC_FRONTEND_URL} container_name: web restart: always ports: - 3000:3000 + backend-core: image: backend-base env_file: - - ./backend/.env + - .env build: context: backend dockerfile: Dockerfile @@ -38,7 +44,7 @@ services: worker: image: backend-base env_file: - - ./backend/.env + - .env build: context: backend dockerfile: Dockerfile @@ -51,7 +57,7 @@ services: beat: image: backend-base env_file: - - ./backend/.env + - .env build: context: backend dockerfile: Dockerfile @@ -64,7 +70,7 @@ services: flower: image: backend-base env_file: - - ./backend/.env + - .env build: context: backend dockerfile: Dockerfile @@ -77,3 +83,406 @@ services: - beat ports: - 5555:5555 + + studio: + container_name: supabase-studio + image: supabase/studio:20231123-64a766a + restart: unless-stopped + healthcheck: + test: + [ + "CMD", + "node", + "-e", + "require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})" + ] + timeout: 5s + interval: 5s + retries: 3 + depends_on: + analytics: + condition: service_healthy + environment: + STUDIO_PG_META_URL: http://meta:8080 + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + + DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION} + DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT} + + SUPABASE_URL: http://kong:8000 + SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL} + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} + + LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + LOGFLARE_URL: http://analytics:4000 + NEXT_PUBLIC_ENABLE_LOGS: true + # Comment to use Big Query backend for analytics + NEXT_ANALYTICS_BACKEND_PROVIDER: postgres + # Uncomment to use Big Query backend for analytics + # NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery + + kong: + container_name: supabase-kong + image: kong:2.8.1 + restart: unless-stopped + # https://unix.stackexchange.com/a/294837 + entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start' + ports: + - ${KONG_HTTP_PORT}:8000/tcp + - ${KONG_HTTPS_PORT}:8443/tcp + depends_on: + analytics: + condition: service_healthy + environment: + KONG_DATABASE: "off" + KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml + # https://github.com/supabase/cli/issues/14 + KONG_DNS_ORDER: LAST,A,CNAME + KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth + KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k + KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} + DASHBOARD_USERNAME: ${DASHBOARD_USERNAME} + DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD} + volumes: + # https://github.com/supabase/supabase/issues/12661 + - ./volumes/api/kong.yml:/home/kong/temp.yml:ro + + auth: + container_name: supabase-auth + image: supabase/gotrue:v2.99.0 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://localhost:9999/health" + ] + timeout: 5s + interval: 5s + retries: 3 + restart: unless-stopped + environment: + GOTRUE_API_HOST: 0.0.0.0 + GOTRUE_API_PORT: 9999 + API_EXTERNAL_URL: ${API_EXTERNAL_URL} + + GOTRUE_DB_DRIVER: postgres + GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + + GOTRUE_SITE_URL: ${SITE_URL} + GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS} + GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP} + + GOTRUE_JWT_ADMIN_ROLES: service_role + GOTRUE_JWT_AUD: authenticated + GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated + GOTRUE_JWT_EXP: ${JWT_EXPIRY} + GOTRUE_JWT_SECRET: ${JWT_SECRET} + + GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP} + GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM} + # GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true + # GOTRUE_SMTP_MAX_FREQUENCY: 1s + GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL} + GOTRUE_SMTP_HOST: ${SMTP_HOST} + GOTRUE_SMTP_PORT: ${SMTP_PORT} + GOTRUE_SMTP_USER: ${SMTP_USER} + GOTRUE_SMTP_PASS: ${SMTP_PASS} + GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME} + GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE} + GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION} + GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY} + GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE} + + GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP} + GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM} + + rest: + container_name: supabase-rest + image: postgrest/postgrest:v11.2.2 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + restart: unless-stopped + environment: + PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS} + PGRST_DB_ANON_ROLE: anon + PGRST_JWT_SECRET: ${JWT_SECRET} + PGRST_DB_USE_LEGACY_GUCS: "false" + PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET} + PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY} + command: "postgrest" + + realtime: + # This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain + container_name: realtime-dev.supabase-realtime + image: supabase/realtime:v2.25.35 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + healthcheck: + test: + [ + "CMD", + "bash", + "-c", + "printf \\0 > /dev/tcp/localhost/4000" + ] + timeout: 5s + interval: 5s + retries: 3 + restart: unless-stopped + environment: + PORT: 4000 + DB_HOST: ${POSTGRES_HOST} + DB_PORT: ${POSTGRES_PORT} + DB_USER: supabase_admin + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_NAME: ${POSTGRES_DB} + DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime' + DB_ENC_KEY: supabaserealtime + API_JWT_SECRET: ${JWT_SECRET} + FLY_ALLOC_ID: fly123 + FLY_APP_NAME: realtime + SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq + ERL_AFLAGS: -proto_dist inet_tcp + ENABLE_TAILSCALE: "false" + DNS_NODES: "''" + command: > + sh -c "/app/bin/migrate && /app/bin/realtime eval 'Realtime.Release.seeds(Realtime.Repo)' && /app/bin/server" + + storage: + container_name: supabase-storage + image: supabase/storage-api:v0.43.11 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + rest: + condition: service_started + imgproxy: + condition: service_started + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://localhost:5000/status" + ] + timeout: 5s + interval: 5s + retries: 3 + restart: unless-stopped + environment: + ANON_KEY: ${ANON_KEY} + SERVICE_KEY: ${SERVICE_ROLE_KEY} + POSTGREST_URL: http://rest:3000 + PGRST_JWT_SECRET: ${JWT_SECRET} + DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + FILE_SIZE_LIMIT: 52428800 + STORAGE_BACKEND: file + FILE_STORAGE_BACKEND_PATH: /var/lib/storage + TENANT_ID: stub + # TODO: https://github.com/supabase/storage-api/issues/55 + REGION: stub + GLOBAL_S3_BUCKET: stub + ENABLE_IMAGE_TRANSFORMATION: "true" + IMGPROXY_URL: http://imgproxy:5001 + volumes: + - ./volumes/storage:/var/lib/storage:z + + imgproxy: + container_name: supabase-imgproxy + image: darthsim/imgproxy:v3.8.0 + healthcheck: + test: [ "CMD", "imgproxy", "health" ] + timeout: 5s + interval: 5s + retries: 3 + environment: + IMGPROXY_BIND: ":5001" + IMGPROXY_LOCAL_FILESYSTEM_ROOT: / + IMGPROXY_USE_ETAG: "true" + IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION} + volumes: + - ./volumes/storage:/var/lib/storage:z + + meta: + container_name: supabase-meta + image: supabase/postgres-meta:v0.68.0 + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + restart: unless-stopped + environment: + PG_META_PORT: 8080 + PG_META_DB_HOST: ${POSTGRES_HOST} + PG_META_DB_PORT: ${POSTGRES_PORT} + PG_META_DB_NAME: ${POSTGRES_DB} + PG_META_DB_USER: supabase_admin + PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD} + + functions: + container_name: supabase-edge-functions + image: supabase/edge-runtime:v1.22.4 + restart: unless-stopped + depends_on: + analytics: + condition: service_healthy + environment: + JWT_SECRET: ${JWT_SECRET} + SUPABASE_URL: http://kong:8000 + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY} + SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + # TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786 + VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}" + volumes: + - ./volumes/functions:/home/deno/functions:Z + command: + - start + - --main-service + - /home/deno/functions/main + + analytics: + container_name: supabase-analytics + image: supabase/logflare:1.4.0 + healthcheck: + test: [ "CMD", "curl", "http://localhost:4000/health" ] + timeout: 5s + interval: 5s + retries: 10 + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + # Uncomment to use Big Query backend for analytics + # volumes: + # - type: bind + # source: ${PWD}/gcloud.json + # target: /opt/app/rel/logflare/bin/gcloud.json + # read_only: true + environment: + LOGFLARE_NODE_HOST: 127.0.0.1 + DB_USERNAME: supabase_admin + DB_DATABASE: ${POSTGRES_DB} + DB_HOSTNAME: ${POSTGRES_HOST} + DB_PORT: ${POSTGRES_PORT} + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_SCHEMA: _analytics + LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + LOGFLARE_SINGLE_TENANT: true + LOGFLARE_SUPABASE_MODE: true + LOGFLARE_MIN_CLUSTER_SIZE: 1 + RELEASE_COOKIE: cookie + + # Comment variables to use Big Query backend for analytics + POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} + POSTGRES_BACKEND_SCHEMA: _analytics + LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true + + # Uncomment to use Big Query backend for analytics + # GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID} + # GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER} + ports: + - 4000:4000 + entrypoint: | + sh -c `cat <<'EOF' > run.sh && sh run.sh + ./logflare eval Logflare.Release.migrate + ./logflare start --sname logflare + EOF + ` + + # Comment out everything below this point if you are using an external Postgres database + db: + container_name: supabase-db + image: supabase/postgres:15.1.0.117 + healthcheck: + test: pg_isready -U postgres -h localhost + interval: 5s + timeout: 5s + retries: 10 + depends_on: + vector: + condition: service_healthy + command: + - postgres + - -c + - config_file=/etc/postgresql/postgresql.conf + - -c + - log_min_messages=fatal # prevents Realtime polling queries from appearing in logs + restart: unless-stopped + ports: + # Pass down internal port because it's set dynamically by other services + - ${POSTGRES_PORT}:${POSTGRES_PORT} + environment: + POSTGRES_HOST: /var/run/postgresql + PGPORT: ${POSTGRES_PORT} + POSTGRES_PORT: ${POSTGRES_PORT} + PGPASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + PGDATABASE: ${POSTGRES_DB} + POSTGRES_DB: ${POSTGRES_DB} + JWT_SECRET: ${JWT_SECRET} + JWT_EXP: ${JWT_EXPIRY} + volumes: + - ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z + # Must be superuser to create event trigger + - ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z + # Must be superuser to alter reserved role + - ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z + # Initialize the database settings with JWT_SECRET and JWT_EXP + - ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z + # PGDATA directory is persisted between restarts + - ./volumes/db/data:/var/lib/postgresql/data:Z + # Changes required for Analytics support + - ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z + - ./volumes/db/init/data.sql:/docker-entrypoint-initdb.d/seed.sql + + vector: + container_name: supabase-vector + image: timberio/vector:0.28.1-alpine + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://vector:9001/health" + ] + timeout: 5s + interval: 5s + retries: 3 + volumes: + - ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro + - ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro + + command: [ "--config", "etc/vector/vector.yml" ] \ No newline at end of file diff --git a/docs/docs/Developers/contribution/env.md b/docs/docs/Developers/contribution/env.md index 3fa4c35c7..017ff0604 100644 --- a/docs/docs/Developers/contribution/env.md +++ b/docs/docs/Developers/contribution/env.md @@ -5,7 +5,7 @@ sidebar_position: 8 You have two environment files in the root of the project: -One in `backend/.env` and the other in `frontend/.env`. +The .env file is used to configure the app server. It contains the following variables: ### Backend Environment File diff --git a/docs/docs/Developers/contribution/llm/private-llm.md b/docs/docs/Developers/contribution/llm/private-llm.md index 5a7dbd792..97c8ecc5a 100644 --- a/docs/docs/Developers/contribution/llm/private-llm.md +++ b/docs/docs/Developers/contribution/llm/private-llm.md @@ -25,7 +25,7 @@ Quivr introduces the groundbreaking feature of integrating private Large Languag ### 4. Install Quivr - To set up Quivr, kindly follow the concise 3-step installation guide provided in our [readme.md](https://github.com/Quivr/README.md). -- Important: Configure environmental variables in your backend/.env file, including the Huggingface API key for seamless integration. +- Important: Configure environmental variables in your .env file, including the Huggingface API key for seamless integration. ### 5. Configure Your Supabase Instance - Within your Supabase instance, locate the user_settings table. diff --git a/docs/docs/Developers/selfHosted/run_fully_local.md b/docs/docs/Developers/selfHosted/run_fully_local.md index c12ac2ce2..6a010f9c0 100644 --- a/docs/docs/Developers/selfHosted/run_fully_local.md +++ b/docs/docs/Developers/selfHosted/run_fully_local.md @@ -19,51 +19,6 @@ The guide was put together in collaboration with members of the Quivr Discord, * -## Local Supabase - -Instead of relying on a remote Supabase instance, we have to set it up locally. We are Following the instructions on https://supabase.com/docs/guides/self-hosting/docker with a few modifications - -### Go to the root of the Quivr repo - -Are you there ? Good. - -### Get the code -`git clone --depth 1 https://github.com/supabase/supabase` - -### Go to the docker folder -`cd supabase/docker` - -### Move the folder & files to the root - -`mv * ../../` - -### Copy the fake env vars -`cp .env.example .env` - -### Edit the .env file - -`Update 3000 to 3001 for the port` - -### Run the docker-compose - -`docker-compose up -f docker-compose.supabase.yml up --build` - -Dont forget to change the variables in backend/.env to match your local supabase instance: -- SUPABASE_URL -- SUPABASE_SERVICE_KEY -- JWT_SECRET_KEY - -And in the frontend/.env: -- NEXT_PUBLIC_SUPABASE_URL -- NEXT_PUBLIC_SUPABASE_ANON_KEY - -Troubleshooting: - -- If the Quivr backend container cannot reach Supabase on port 8000, change the Quivr backend container to use the host network. -- If email service does not work, add a user using the supabase web ui, and check "Auto Confirm User?". - - http://localhost:8000/project/default/auth/users - - ## Ollama @@ -162,7 +117,7 @@ DROP TABLE user_settings; In order to have Quivr use Ollama we need to update the env variables. -Go to `backend/.env` and add the following env variables: +Go to `.env` and add the following env variables: ```bash OLLAMA_API_BASE_URL=http://host.docker.internal:11434 diff --git a/frontend/Dockerfile b/frontend/Dockerfile index e0d3d733f..a79df5db4 100644 --- a/frontend/Dockerfile +++ b/frontend/Dockerfile @@ -23,6 +23,20 @@ WORKDIR /app COPY --from=deps /app/node_modules ./node_modules COPY . . +ARG NEXT_PUBLIC_ENV +ARG NEXT_PUBLIC_BACKEND_URL +ARG NEXT_PUBLIC_SUPABASE_URL +ARG NEXT_PUBLIC_SUPABASE_ANON_KEY +ARG NEXT_PUBLIC_CMS_URL +ARG NEXT_PUBLIC_FRONTEND_URL + +ENV NEXT_PUBLIC_ENV=$NEXT_PUBLIC_ENV +ENV NEXT_PUBLIC_BACKEND_URL=$NEXT_PUBLIC_BACKEND_URL +ENV NEXT_PUBLIC_SUPABASE_URL=$NEXT_PUBLIC_SUPABASE_URL +ENV NEXT_PUBLIC_SUPABASE_ANON_KEY=$NEXT_PUBLIC_SUPABASE_ANON_KEY +ENV NEXT_PUBLIC_CMS_URL=$NEXT_PUBLIC_CMS_URL +ENV NEXT_PUBLIC_FRONTEND_URL=$NEXT_PUBLIC_FRONTEND_URL + # Next.js collects completely anonymous telemetry data about general usage. # Learn more here: https://nextjs.org/telemetry # Uncomment the following line in case you want to disable telemetry during the build. diff --git a/install_helper.sh b/install_helper.sh deleted file mode 100755 index 5455b4035..000000000 --- a/install_helper.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/sh - -# Function to print error message and exit -error_exit() { - echo "$1" >&2 - exit 1 -} - -# Function to replace variables in files -replace_in_file() { - local file="$1" - local search="$2" - local replace="$3" - if [ "$(uname)" = "Darwin" ]; then - # macOS - sed -i "" "s|${search}|${replace}|" "$file" - else - # Linux/Unix and Windows (Git Bash) - sed -i "s|${search}|${replace}|" "$file" - fi -} - -# Step 2: Copy the .XXXXX_env files if they don't exist -if [ ! -f backend/.env ]; then - echo "Copying backend-core .env example file..." - cp .backend_env.example backend/.env -fi - -if [ ! -f frontend/.env ]; then - echo "Copying frontend .env example file..." - cp .frontend_env.example frontend/.env -fi - -# Step 3: Ask the user for environment variables and update .env files -# only if they haven't been set. - -# Update backend/.env -if grep -q "SUPABASE_URL=" backend/.env; then - echo "SUPABASE_URL can be found in your Supabase dashboard under Settings > API." - SUPABASE_URL=$(gum input --placeholder "Enter SUPABASE_URL for backend") - replace_in_file backend/.env "SUPABASE_URL=.*" "SUPABASE_URL=${SUPABASE_URL}" -fi - -if grep -q "SUPABASE_SERVICE_KEY=" backend/.env; then - echo "SUPABASE_SERVICE_KEY can be found in your Supabase dashboard under Settings > API. Use the anon public key found in the Project API keys section." - SUPABASE_SERVICE_KEY=$(gum input --placeholder "Enter SUPABASE_SERVICE_KEY for backend") - replace_in_file backend/.env "SUPABASE_SERVICE_KEY=.*" "SUPABASE_SERVICE_KEY=${SUPABASE_SERVICE_KEY}" -fi - -if grep -q "PG_DATABASE_URL=" backend/.env; then - echo "PG_DATABASE_URL can be found in your Postgres provider Settings > API." - PG_DATABASE_URL=$(gum input --placeholder "Enter PG_DATABASE_URL for backend") - replace_in_file backend/.env "PG_DATABASE_URL=.*" "PG_DATABASE_URL=${PG_DATABASE_URL}" -fi - -if grep -q "OPENAI_API_KEY=" backend/.env; then - echo "OPENAI_API_KEY is the API key from OpenAI, if you are using OpenAI services." - OPENAI_API_KEY=$(gum input --placeholder "Enter OPENAI_API_KEY for backend") - replace_in_file backend/.env "OPENAI_API_KEY=.*" "OPENAI_API_KEY=${OPENAI_API_KEY}" -fi - -if grep -q "JWT_SECRET_KEY=" backend/.env; then - echo "JWT_SECRET_KEY can be found in your Supabase project dashboard under Settings > API > JWT Settings > JWT Secret." - JWT_SECRET_KEY=$(gum input --placeholder "Enter JWT_SECRET_KEY for backend") - replace_in_file backend/.env "JWT_SECRET_KEY=.*" "JWT_SECRET_KEY=${JWT_SECRET_KEY}" -fi - -# Update frontend/.env using the same SUPABASE_URL and SUPABASE_SERVICE_KEY -if grep -q "NEXT_PUBLIC_SUPABASE_URL=" frontend/.env; then - replace_in_file frontend/.env "NEXT_PUBLIC_SUPABASE_URL=.*" "NEXT_PUBLIC_SUPABASE_URL=${SUPABASE_URL}" -fi - -if grep -q "NEXT_PUBLIC_SUPABASE_ANON_KEY=" frontend/.env; then - replace_in_file frontend/.env "NEXT_PUBLIC_SUPABASE_ANON_KEY=.*" "NEXT_PUBLIC_SUPABASE_ANON_KEY=${SUPABASE_SERVICE_KEY}" -fi - -# Step 4: Run the migration scripts (this is supposed to be done manually as per the instructions) -echo "Running the migration scripts..." -./migration.sh || error_exit "Error running migration scripts." - -# Step 5: Launch the app -echo "Launching the app..." -docker compose up --build || error_exit "Error running docker compose." - -# Final message -echo "Navigate to localhost:3000 in your browser to access the app." diff --git a/scripts/tables.sql b/scripts/tables.sql index 9994be271..88778b1a3 100644 --- a/scripts/tables.sql +++ b/scripts/tables.sql @@ -246,7 +246,7 @@ CREATE TABLE IF NOT EXISTS migrations ( CREATE TABLE IF NOT EXISTS user_settings ( user_id UUID PRIMARY KEY, - models JSONB DEFAULT '["gpt-3.5-turbo","gpt-4"]'::jsonb, + models JSONB DEFAULT '["gpt-3.5-turbo-1106","gpt-4"]'::jsonb, daily_chat_credit INT DEFAULT 300, max_brains INT DEFAULT 30, max_brain_size INT DEFAULT 100000000 diff --git a/volumes/api/kong.yml b/volumes/api/kong.yml new file mode 100644 index 000000000..4a0799503 --- /dev/null +++ b/volumes/api/kong.yml @@ -0,0 +1,223 @@ +_format_version: '2.1' +_transform: true + +### +### Consumers / Users +### +consumers: + - username: DASHBOARD + - username: anon + keyauth_credentials: + - key: $SUPABASE_ANON_KEY + - username: service_role + keyauth_credentials: + - key: $SUPABASE_SERVICE_KEY + +### +### Access Control List +### +acls: + - consumer: anon + group: anon + - consumer: service_role + group: admin + +### +### Dashboard credentials +### +basicauth_credentials: +- consumer: DASHBOARD + username: $DASHBOARD_USERNAME + password: $DASHBOARD_PASSWORD + + +### +### API Routes +### +services: + + ## Open Auth routes + - name: auth-v1-open + url: http://auth:9999/verify + routes: + - name: auth-v1-open + strip_path: true + paths: + - /auth/v1/verify + plugins: + - name: cors + - name: auth-v1-open-callback + url: http://auth:9999/callback + routes: + - name: auth-v1-open-callback + strip_path: true + paths: + - /auth/v1/callback + plugins: + - name: cors + - name: auth-v1-open-authorize + url: http://auth:9999/authorize + routes: + - name: auth-v1-open-authorize + strip_path: true + paths: + - /auth/v1/authorize + plugins: + - name: cors + + ## Secure Auth routes + - name: auth-v1 + _comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*' + url: http://auth:9999/ + routes: + - name: auth-v1-all + strip_path: true + paths: + - /auth/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure REST routes + - name: rest-v1 + _comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*' + url: http://rest:3000/ + routes: + - name: rest-v1-all + strip_path: true + paths: + - /rest/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: true + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure GraphQL routes + - name: graphql-v1 + _comment: 'PostgREST: /graphql/v1/* -> http://rest:3000/rpc/graphql' + url: http://rest:3000/rpc/graphql + routes: + - name: graphql-v1-all + strip_path: true + paths: + - /graphql/v1 + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: true + - name: request-transformer + config: + add: + headers: + - Content-Profile:graphql_public + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure Realtime routes + - name: realtime-v1 + _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*' + url: http://realtime-dev.supabase-realtime:4000/socket/ + routes: + - name: realtime-v1-all + strip_path: true + paths: + - /realtime/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Storage routes: the storage server manages its own auth + - name: storage-v1 + _comment: 'Storage: /storage/v1/* -> http://storage:5000/*' + url: http://storage:5000/ + routes: + - name: storage-v1-all + strip_path: true + paths: + - /storage/v1/ + plugins: + - name: cors + + ## Edge Functions routes + - name: functions-v1 + _comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*' + url: http://functions:9000/ + routes: + - name: functions-v1-all + strip_path: true + paths: + - /functions/v1/ + plugins: + - name: cors + + ## Analytics routes + - name: analytics-v1 + _comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*' + url: http://analytics:4000/ + routes: + - name: analytics-v1-all + strip_path: true + paths: + - /analytics/v1/ + + ## Secure Database routes + - name: meta + _comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*' + url: http://meta:8080/ + routes: + - name: meta-all + strip_path: true + paths: + - /pg/ + plugins: + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + + ## Protected Dashboard - catch all remaining routes + - name: dashboard + _comment: 'Studio: /* -> http://studio:3000/*' + url: http://studio:3000/ + routes: + - name: dashboard-all + strip_path: true + paths: + - / + plugins: + - name: cors + - name: basic-auth + config: + hide_credentials: true diff --git a/volumes/db/init/data.sql b/volumes/db/init/data.sql new file mode 100755 index 000000000..e69de29bb diff --git a/volumes/db/jwt.sql b/volumes/db/jwt.sql new file mode 100644 index 000000000..cfd3b1602 --- /dev/null +++ b/volumes/db/jwt.sql @@ -0,0 +1,5 @@ +\set jwt_secret `echo "$JWT_SECRET"` +\set jwt_exp `echo "$JWT_EXP"` + +ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret'; +ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp'; diff --git a/volumes/db/logs.sql b/volumes/db/logs.sql new file mode 100644 index 000000000..22fc2479e --- /dev/null +++ b/volumes/db/logs.sql @@ -0,0 +1,4 @@ +\set pguser `echo "$POSTGRES_USER"` + +create schema if not exists _analytics; +alter schema _analytics owner to :pguser; diff --git a/volumes/db/realtime.sql b/volumes/db/realtime.sql new file mode 100644 index 000000000..4d4b9ffb9 --- /dev/null +++ b/volumes/db/realtime.sql @@ -0,0 +1,4 @@ +\set pguser `echo "$POSTGRES_USER"` + +create schema if not exists _realtime; +alter schema _realtime owner to :pguser; diff --git a/volumes/db/roles.sql b/volumes/db/roles.sql new file mode 100644 index 000000000..8f7161a6d --- /dev/null +++ b/volumes/db/roles.sql @@ -0,0 +1,8 @@ +-- NOTE: change to your own passwords for production environments +\set pgpass `echo "$POSTGRES_PASSWORD"` + +ALTER USER authenticator WITH PASSWORD :'pgpass'; +ALTER USER pgbouncer WITH PASSWORD :'pgpass'; +ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass'; +ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass'; +ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass'; diff --git a/volumes/db/webhooks.sql b/volumes/db/webhooks.sql new file mode 100644 index 000000000..5837b8618 --- /dev/null +++ b/volumes/db/webhooks.sql @@ -0,0 +1,208 @@ +BEGIN; + -- Create pg_net extension + CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions; + -- Create supabase_functions schema + CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin; + GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role; + -- supabase_functions.migrations definition + CREATE TABLE supabase_functions.migrations ( + version text PRIMARY KEY, + inserted_at timestamptz NOT NULL DEFAULT NOW() + ); + -- Initial supabase_functions migration + INSERT INTO supabase_functions.migrations (version) VALUES ('initial'); + -- supabase_functions.hooks definition + CREATE TABLE supabase_functions.hooks ( + id bigserial PRIMARY KEY, + hook_table_id integer NOT NULL, + hook_name text NOT NULL, + created_at timestamptz NOT NULL DEFAULT NOW(), + request_id bigint + ); + CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id); + CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name); + COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.'; + CREATE FUNCTION supabase_functions.http_request() + RETURNS trigger + LANGUAGE plpgsql + AS $function$ + DECLARE + request_id bigint; + payload jsonb; + url text := TG_ARGV[0]::text; + method text := TG_ARGV[1]::text; + headers jsonb DEFAULT '{}'::jsonb; + params jsonb DEFAULT '{}'::jsonb; + timeout_ms integer DEFAULT 1000; + BEGIN + IF url IS NULL OR url = 'null' THEN + RAISE EXCEPTION 'url argument is missing'; + END IF; + + IF method IS NULL OR method = 'null' THEN + RAISE EXCEPTION 'method argument is missing'; + END IF; + + IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN + headers = '{"Content-Type": "application/json"}'::jsonb; + ELSE + headers = TG_ARGV[2]::jsonb; + END IF; + + IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN + params = '{}'::jsonb; + ELSE + params = TG_ARGV[3]::jsonb; + END IF; + + IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN + timeout_ms = 1000; + ELSE + timeout_ms = TG_ARGV[4]::integer; + END IF; + + CASE + WHEN method = 'GET' THEN + SELECT http_get INTO request_id FROM net.http_get( + url, + params, + headers, + timeout_ms + ); + WHEN method = 'POST' THEN + payload = jsonb_build_object( + 'old_record', OLD, + 'record', NEW, + 'type', TG_OP, + 'table', TG_TABLE_NAME, + 'schema', TG_TABLE_SCHEMA + ); + + SELECT http_post INTO request_id FROM net.http_post( + url, + payload, + params, + headers, + timeout_ms + ); + ELSE + RAISE EXCEPTION 'method argument % is invalid', method; + END CASE; + + INSERT INTO supabase_functions.hooks + (hook_table_id, hook_name, request_id) + VALUES + (TG_RELID, TG_NAME, request_id); + + RETURN NEW; + END + $function$; + -- Supabase super admin + DO + $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_functions_admin' + ) + THEN + CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + END + $$; + GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin; + ALTER USER supabase_functions_admin SET search_path = "supabase_functions"; + ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin; + ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin; + ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin; + GRANT supabase_functions_admin TO postgres; + -- Remove unused supabase_pg_net_admin role + DO + $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_pg_net_admin' + ) + THEN + REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin; + DROP OWNED BY supabase_pg_net_admin; + DROP ROLE supabase_pg_net_admin; + END IF; + END + $$; + -- pg_net grants when extension is already enabled + DO + $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_extension + WHERE extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END + $$; + -- Event trigger for pg_net + CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() + RETURNS event_trigger + LANGUAGE plpgsql + AS $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END; + $$; + COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net'; + DO + $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_event_trigger + WHERE evtname = 'issue_pg_net_access' + ) THEN + CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION') + EXECUTE PROCEDURE extensions.grant_pg_net_access(); + END IF; + END + $$; + INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants'); + ALTER function supabase_functions.http_request() SECURITY DEFINER; + ALTER function supabase_functions.http_request() SET search_path = supabase_functions; + REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC; + GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role; +COMMIT; diff --git a/volumes/functions/hello/index.ts b/volumes/functions/hello/index.ts new file mode 100644 index 000000000..f1e20b90e --- /dev/null +++ b/volumes/functions/hello/index.ts @@ -0,0 +1,16 @@ +// Follow this setup guide to integrate the Deno language server with your editor: +// https://deno.land/manual/getting_started/setup_your_environment +// This enables autocomplete, go to definition, etc. + +import { serve } from "https://deno.land/std@0.177.1/http/server.ts" + +serve(async () => { + return new Response( + `"Hello from Edge Functions!"`, + { headers: { "Content-Type": "application/json" } }, + ) +}) + +// To invoke: +// curl 'http://localhost:/functions/v1/hello' \ +// --header 'Authorization: Bearer ' diff --git a/volumes/functions/main/index.ts b/volumes/functions/main/index.ts new file mode 100644 index 000000000..a094010b9 --- /dev/null +++ b/volumes/functions/main/index.ts @@ -0,0 +1,94 @@ +import { serve } from 'https://deno.land/std@0.131.0/http/server.ts' +import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts' + +console.log('main function started') + +const JWT_SECRET = Deno.env.get('JWT_SECRET') +const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true' + +function getAuthToken(req: Request) { + const authHeader = req.headers.get('authorization') + if (!authHeader) { + throw new Error('Missing authorization header') + } + const [bearer, token] = authHeader.split(' ') + if (bearer !== 'Bearer') { + throw new Error(`Auth header is not 'Bearer {token}'`) + } + return token +} + +async function verifyJWT(jwt: string): Promise { + const encoder = new TextEncoder() + const secretKey = encoder.encode(JWT_SECRET) + try { + await jose.jwtVerify(jwt, secretKey) + } catch (err) { + console.error(err) + return false + } + return true +} + +serve(async (req: Request) => { + if (req.method !== 'OPTIONS' && VERIFY_JWT) { + try { + const token = getAuthToken(req) + const isValidJWT = await verifyJWT(token) + + if (!isValidJWT) { + return new Response(JSON.stringify({ msg: 'Invalid JWT' }), { + status: 401, + headers: { 'Content-Type': 'application/json' }, + }) + } + } catch (e) { + console.error(e) + return new Response(JSON.stringify({ msg: e.toString() }), { + status: 401, + headers: { 'Content-Type': 'application/json' }, + }) + } + } + + const url = new URL(req.url) + const { pathname } = url + const path_parts = pathname.split('/') + const service_name = path_parts[1] + + if (!service_name || service_name === '') { + const error = { msg: 'missing function name in request' } + return new Response(JSON.stringify(error), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }) + } + + const servicePath = `/home/deno/functions/${service_name}` + console.error(`serving the request with ${servicePath}`) + + const memoryLimitMb = 150 + const workerTimeoutMs = 1 * 60 * 1000 + const noModuleCache = false + const importMapPath = null + const envVarsObj = Deno.env.toObject() + const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]]) + + try { + const worker = await EdgeRuntime.userWorkers.create({ + servicePath, + memoryLimitMb, + workerTimeoutMs, + noModuleCache, + importMapPath, + envVars, + }) + return await worker.fetch(req) + } catch (e) { + const error = { msg: e.toString() } + return new Response(JSON.stringify(error), { + status: 500, + headers: { 'Content-Type': 'application/json' }, + }) + } +}) diff --git a/volumes/logs/vector.yml b/volumes/logs/vector.yml new file mode 100644 index 000000000..39a007729 --- /dev/null +++ b/volumes/logs/vector.yml @@ -0,0 +1,232 @@ +api: + enabled: true + address: 0.0.0.0:9001 + +sources: + docker_host: + type: docker_logs + exclude_containers: + - supabase-vector + +transforms: + project_logs: + type: remap + inputs: + - docker_host + source: |- + .project = "default" + .event_message = del(.message) + .appname = del(.container_name) + del(.container_created_at) + del(.container_id) + del(.source_type) + del(.stream) + del(.label) + del(.image) + del(.host) + del(.stream) + router: + type: route + inputs: + - project_logs + route: + kong: '.appname == "supabase-kong"' + auth: '.appname == "supabase-auth"' + rest: '.appname == "supabase-rest"' + realtime: '.appname == "supabase-realtime"' + storage: '.appname == "supabase-storage"' + functions: '.appname == "supabase-functions"' + db: '.appname == "supabase-db"' + # Ignores non nginx errors since they are related with kong booting up + kong_logs: + type: remap + inputs: + - router.kong + source: |- + req, err = parse_nginx_log(.event_message, "combined") + if err == null { + .timestamp = req.timestamp + .metadata.request.headers.referer = req.referer + .metadata.request.headers.user_agent = req.agent + .metadata.request.headers.cf_connecting_ip = req.client + .metadata.request.method = req.method + .metadata.request.path = req.path + .metadata.request.protocol = req.protocol + .metadata.response.status_code = req.status + } + if err != null { + abort + } + # Ignores non nginx errors since they are related with kong booting up + kong_err: + type: remap + inputs: + - router.kong + source: |- + .metadata.request.method = "GET" + .metadata.response.status_code = 200 + parsed, err = parse_nginx_log(.event_message, "error") + if err == null { + .timestamp = parsed.timestamp + .severity = parsed.severity + .metadata.request.host = parsed.host + .metadata.request.headers.cf_connecting_ip = parsed.client + url, err = split(parsed.request, " ") + if err == null { + .metadata.request.method = url[0] + .metadata.request.path = url[1] + .metadata.request.protocol = url[2] + } + } + if err != null { + abort + } + # Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency. + auth_logs: + type: remap + inputs: + - router.auth + source: |- + parsed, err = parse_json(.event_message) + if err == null { + .metadata.timestamp = parsed.time + .metadata = merge!(.metadata, parsed) + } + # PostgREST logs are structured so we separate timestamp from message using regex + rest_logs: + type: remap + inputs: + - router.rest + source: |- + parsed, err = parse_regex(.event_message, r'^(?P