mirror of
https://github.com/StanGirard/quivr.git
synced 2024-12-23 11:23:00 +03:00
feat: 🎸 supabase
added usage of cli
This commit is contained in:
parent
5a1ff46b68
commit
10d0ec0448
1
.gitignore
vendored
1
.gitignore
vendored
@ -72,3 +72,4 @@ backend/share/*
|
||||
backend/slim.report.json
|
||||
volumes/db/data/
|
||||
volumes/storage/stub/stub/quivr/*
|
||||
supabase/migrations/20240103191539_private.sql
|
||||
|
3
.vscode/extensions.json
vendored
3
.vscode/extensions.json
vendored
@ -4,6 +4,7 @@
|
||||
"dbaeumer.vscode-eslint",
|
||||
"ms-python.vscode-pylance",
|
||||
"ms-pyright.pyright",
|
||||
"inlang.vs-code-extension"
|
||||
"inlang.vs-code-extension",
|
||||
"denoland.vscode-deno"
|
||||
]
|
||||
}
|
@ -32,6 +32,17 @@ class UserUsage(Repository):
|
||||
"""
|
||||
matching_customers = None
|
||||
try:
|
||||
is_premium_user = (
|
||||
self.db.from_("user_settings")
|
||||
.select("is_premium")
|
||||
.filter("user_id", "eq", str(user_id))
|
||||
.execute()
|
||||
.data
|
||||
)
|
||||
|
||||
if len(is_premium_user) > 0 and is_premium_user[0]["is_premium"]:
|
||||
return True
|
||||
|
||||
user_email_customer = (
|
||||
self.db.from_("users")
|
||||
.select("*")
|
||||
@ -42,20 +53,41 @@ class UserUsage(Repository):
|
||||
if len(user_email_customer) == 0:
|
||||
return False
|
||||
|
||||
matching_customers = (
|
||||
self.db.table("customers")
|
||||
.select("email")
|
||||
.filter("email", "eq", user_email_customer[0]["email"])
|
||||
subscription_still_valid = (
|
||||
self.db.from_("subscriptions")
|
||||
.select("*")
|
||||
.filter("customer", "eq", user_email_customer[0]["id"])
|
||||
.filter("current_period_end", "gte", datetime.now())
|
||||
.execute()
|
||||
).data
|
||||
|
||||
if len(subscription_still_valid) > 0:
|
||||
matching_customers = (
|
||||
self.db.table("customers")
|
||||
.select("email")
|
||||
.filter("email", "eq", user_email_customer[0]["email"])
|
||||
.execute()
|
||||
).data
|
||||
|
||||
if len(matching_customers) > 0:
|
||||
self.db.table("user_settings").update({"is_premium": True}).match(
|
||||
{"user_id": str(user_id)}
|
||||
).execute()
|
||||
return True
|
||||
else:
|
||||
self.db.table("user_settings").update({"is_premium": False}).match(
|
||||
{"user_id": str(user_id)}
|
||||
).execute()
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.info(matching_customers)
|
||||
logger.error("Error while checking if user is a premium user")
|
||||
logger.error(
|
||||
"Error while checking if user is a premium user. Stripe needs to be configured."
|
||||
)
|
||||
logger.error(e)
|
||||
return False
|
||||
|
||||
return len(matching_customers) > 0
|
||||
|
||||
def get_user_settings(self, user_id):
|
||||
"""
|
||||
Fetch the user settings from the database
|
||||
@ -80,11 +112,10 @@ class UserUsage(Repository):
|
||||
raise ValueError("User settings could not be created")
|
||||
|
||||
user_settings = user_settings_response[0]
|
||||
user_settings["is_premium"] = False
|
||||
is_premium_user = self.check_if_is_premium_user(user_id)
|
||||
|
||||
if is_premium_user:
|
||||
user_settings["is_premium"] = True
|
||||
check_is_premium = self.check_if_is_premium_user(user_id)
|
||||
|
||||
if check_is_premium:
|
||||
user_settings["max_brains"] = int(
|
||||
os.environ.get("PREMIUM_MAX_BRAIN_NUMBER", 12)
|
||||
)
|
||||
|
@ -35,7 +35,7 @@ def check_user_requests_limit(
|
||||
if int(userDailyUsage.daily_requests_count) >= int(daily_chat_credit):
|
||||
raise HTTPException(
|
||||
status_code=429, # pyright: ignore reportPrivateUsage=none
|
||||
detail="You have reached the maximum number of requests for today.", # pyright: ignore reportPrivateUsage=none
|
||||
detail=f"You have reached your daily chat limit of {daily_chat_credit} requests per day. Please upgrade your plan to increase your daily chat limit.",
|
||||
)
|
||||
else:
|
||||
pass
|
||||
|
@ -1,83 +0,0 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
backend-core:
|
||||
image: backend-base
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile.dev
|
||||
container_name: backend-core
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "http://localhost:5050/healthz" ]
|
||||
command:
|
||||
- "uvicorn"
|
||||
- "main:app"
|
||||
- "--reload"
|
||||
- "--host"
|
||||
- "0.0.0.0"
|
||||
- "--port"
|
||||
- "5050"
|
||||
- "--workers"
|
||||
- "1"
|
||||
restart: always
|
||||
volumes:
|
||||
- ./backend/:/code/
|
||||
ports:
|
||||
- 5050:5050
|
||||
|
||||
redis:
|
||||
image: redis:latest
|
||||
container_name: redis
|
||||
restart: always
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
|
||||
worker:
|
||||
pull_policy: never
|
||||
image: backend-base
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile.dev
|
||||
container_name: worker
|
||||
command: celery -A celery_worker worker -l info
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
|
||||
beat:
|
||||
image: backend-base
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile.dev
|
||||
container_name: beat
|
||||
command: celery -A celery_worker beat -l info
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
|
||||
flower:
|
||||
image: backend-base
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile.dev
|
||||
container_name: flower
|
||||
command: celery -A celery_worker flower -l info --port=5555
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
- worker
|
||||
- beat
|
||||
ports:
|
||||
- 5555:5555
|
@ -1,106 +0,0 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
frontend:
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: frontend
|
||||
dockerfile: Dockerfile.dev
|
||||
args:
|
||||
- NEXT_PUBLIC_ENV=local
|
||||
- NEXT_PUBLIC_BACKEND_URL=${NEXT_PUBLIC_BACKEND_URL}
|
||||
- NEXT_PUBLIC_SUPABASE_URL=${NEXT_PUBLIC_SUPABASE_URL}
|
||||
- NEXT_PUBLIC_SUPABASE_ANON_KEY=${NEXT_PUBLIC_SUPABASE_ANON_KEY}
|
||||
- NEXT_PUBLIC_CMS_URL=${NEXT_PUBLIC_CMS_URL}
|
||||
- NEXT_PUBLIC_FRONTEND_URL=${NEXT_PUBLIC_FRONTEND_URL}
|
||||
container_name: web
|
||||
depends_on:
|
||||
- backend-core
|
||||
restart: always
|
||||
volumes:
|
||||
- ./frontend/:/app/
|
||||
ports:
|
||||
- 3000:3000
|
||||
|
||||
|
||||
backend-core:
|
||||
image: backend-base
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile.dev
|
||||
container_name: backend-core
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "http://localhost:5050/healthz" ]
|
||||
command:
|
||||
- "uvicorn"
|
||||
- "main:app"
|
||||
- "--reload"
|
||||
- "--host"
|
||||
- "0.0.0.0"
|
||||
- "--port"
|
||||
- "5050"
|
||||
- "--workers"
|
||||
- "1"
|
||||
restart: always
|
||||
volumes:
|
||||
- ./backend/:/code/
|
||||
ports:
|
||||
- 5050:5050
|
||||
|
||||
redis:
|
||||
image: redis:latest
|
||||
container_name: redis
|
||||
restart: always
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
worker:
|
||||
pull_policy: never
|
||||
image: backend-base
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile.dev
|
||||
container_name: worker
|
||||
command: celery -A celery_worker worker -l info
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
|
||||
beat:
|
||||
image: backend-base
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile.dev
|
||||
container_name: beat
|
||||
command: celery -A celery_worker beat -l info
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
|
||||
flower:
|
||||
image: backend-base
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile.dev
|
||||
container_name: flower
|
||||
command: celery -A celery_worker flower -l info --port=5555
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
- worker
|
||||
- beat
|
||||
ports:
|
||||
- 5555:5555
|
@ -1,495 +0,0 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
backend-core:
|
||||
image: backend-base
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile.dev
|
||||
args:
|
||||
- DEV_MODE=true
|
||||
container_name: backend-core
|
||||
volumes:
|
||||
- ./backend/:/code/
|
||||
command:
|
||||
- "uvicorn"
|
||||
- "main:app"
|
||||
- "--reload"
|
||||
- "--host"
|
||||
- "0.0.0.0"
|
||||
- "--port"
|
||||
- "5050"
|
||||
- "--workers"
|
||||
- "2"
|
||||
restart: always
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
kong:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- 5050:5050
|
||||
- 5678:5678 # debug port
|
||||
|
||||
redis:
|
||||
image: redis:latest
|
||||
container_name: redis
|
||||
restart: always
|
||||
ports:
|
||||
- 6379:6379
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
|
||||
worker:
|
||||
pull_policy: never
|
||||
image: backend-base
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile
|
||||
container_name: worker
|
||||
command: celery -A celery_worker worker -l info
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
- db
|
||||
|
||||
beat:
|
||||
image: backend-base
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile
|
||||
container_name: beat
|
||||
command: celery -A celery_worker beat -l info
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
|
||||
flower:
|
||||
image: backend-base
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile
|
||||
container_name: flower
|
||||
command: celery -A celery_worker flower -l info --port=5555
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
- worker
|
||||
- beat
|
||||
ports:
|
||||
- 5555:5555
|
||||
|
||||
studio:
|
||||
container_name: supabase-studio
|
||||
image: supabase/studio:20231123-64a766a
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"node",
|
||||
"-e",
|
||||
"require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
STUDIO_PG_META_URL: http://meta:8080
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
|
||||
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
|
||||
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_URL: http://analytics:4000
|
||||
NEXT_PUBLIC_ENABLE_LOGS: true
|
||||
# Comment to use Big Query backend for analytics
|
||||
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
|
||||
|
||||
kong:
|
||||
container_name: supabase-kong
|
||||
image: kong:2.8.1
|
||||
restart: unless-stopped
|
||||
# https://unix.stackexchange.com/a/294837
|
||||
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
|
||||
ports:
|
||||
- ${KONG_HTTP_PORT}:8000/tcp
|
||||
- ${KONG_HTTPS_PORT}:8443/tcp
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
KONG_DATABASE: "off"
|
||||
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
|
||||
# https://github.com/supabase/cli/issues/14
|
||||
KONG_DNS_ORDER: LAST,A,CNAME
|
||||
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
|
||||
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
|
||||
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
|
||||
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
|
||||
volumes:
|
||||
# https://github.com/supabase/supabase/issues/12661
|
||||
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro
|
||||
|
||||
auth:
|
||||
container_name: supabase-auth
|
||||
image: supabase/gotrue:v2.99.0
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:9999/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
GOTRUE_API_HOST: 0.0.0.0
|
||||
GOTRUE_API_PORT: 9999
|
||||
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
|
||||
|
||||
GOTRUE_DB_DRIVER: postgres
|
||||
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
|
||||
GOTRUE_SITE_URL: ${SITE_URL}
|
||||
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
|
||||
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
|
||||
|
||||
GOTRUE_JWT_ADMIN_ROLES: service_role
|
||||
GOTRUE_JWT_AUD: authenticated
|
||||
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
|
||||
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
|
||||
GOTRUE_JWT_SECRET: ${JWT_SECRET}
|
||||
|
||||
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
|
||||
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
|
||||
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
|
||||
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
|
||||
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
|
||||
GOTRUE_SMTP_HOST: ${SMTP_HOST}
|
||||
GOTRUE_SMTP_PORT: ${SMTP_PORT}
|
||||
GOTRUE_SMTP_USER: ${SMTP_USER}
|
||||
GOTRUE_SMTP_PASS: ${SMTP_PASS}
|
||||
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
|
||||
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
|
||||
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
|
||||
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
|
||||
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
|
||||
|
||||
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
|
||||
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
|
||||
|
||||
rest:
|
||||
container_name: supabase-rest
|
||||
image: postgrest/postgrest:v11.2.2
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
|
||||
PGRST_DB_ANON_ROLE: anon
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_DB_USE_LEGACY_GUCS: "false"
|
||||
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
|
||||
command: "postgrest"
|
||||
|
||||
realtime:
|
||||
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
|
||||
container_name: realtime-dev.supabase-realtime
|
||||
image: supabase/realtime:v2.25.35
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"bash",
|
||||
"-c",
|
||||
"printf \\0 > /dev/tcp/localhost/4000"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
PORT: 4000
|
||||
DB_HOST: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_USER: supabase_admin
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_NAME: ${POSTGRES_DB}
|
||||
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
|
||||
DB_ENC_KEY: supabaserealtime
|
||||
API_JWT_SECRET: ${JWT_SECRET}
|
||||
FLY_ALLOC_ID: fly123
|
||||
FLY_APP_NAME: realtime
|
||||
SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
ERL_AFLAGS: -proto_dist inet_tcp
|
||||
ENABLE_TAILSCALE: "false"
|
||||
DNS_NODES: "''"
|
||||
command: >
|
||||
sh -c "/app/bin/migrate && /app/bin/realtime eval 'Realtime.Release.seeds(Realtime.Repo)' && /app/bin/server"
|
||||
|
||||
storage:
|
||||
container_name: supabase-storage
|
||||
image: supabase/storage-api:v0.43.11
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
rest:
|
||||
condition: service_started
|
||||
imgproxy:
|
||||
condition: service_started
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:5000/status"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
ANON_KEY: ${ANON_KEY}
|
||||
SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
POSTGREST_URL: http://rest:3000
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
FILE_SIZE_LIMIT: 52428800
|
||||
STORAGE_BACKEND: file
|
||||
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
|
||||
TENANT_ID: stub
|
||||
# TODO: https://github.com/supabase/storage-api/issues/55
|
||||
REGION: stub
|
||||
GLOBAL_S3_BUCKET: stub
|
||||
ENABLE_IMAGE_TRANSFORMATION: "true"
|
||||
IMGPROXY_URL: http://imgproxy:5001
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
|
||||
imgproxy:
|
||||
container_name: supabase-imgproxy
|
||||
image: darthsim/imgproxy:v3.8.0
|
||||
healthcheck:
|
||||
test: [ "CMD", "imgproxy", "health" ]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
IMGPROXY_BIND: ":5001"
|
||||
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
|
||||
IMGPROXY_USE_ETAG: "true"
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
|
||||
meta:
|
||||
container_name: supabase-meta
|
||||
image: supabase/postgres-meta:v0.68.0
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
PG_META_PORT: 8080
|
||||
PG_META_DB_HOST: ${POSTGRES_HOST}
|
||||
PG_META_DB_PORT: ${POSTGRES_PORT}
|
||||
PG_META_DB_NAME: ${POSTGRES_DB}
|
||||
PG_META_DB_USER: supabase_admin
|
||||
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
functions:
|
||||
container_name: supabase-edge-functions
|
||||
image: supabase/edge-runtime:v1.22.4
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
|
||||
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
|
||||
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
|
||||
volumes:
|
||||
- ./volumes/functions:/home/deno/functions:Z
|
||||
command:
|
||||
- start
|
||||
- --main-service
|
||||
- /home/deno/functions/main
|
||||
|
||||
analytics:
|
||||
container_name: supabase-analytics
|
||||
image: supabase/logflare:1.4.0
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "http://localhost:4000/health" ]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 10
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# volumes:
|
||||
# - type: bind
|
||||
# source: ${PWD}/gcloud.json
|
||||
# target: /opt/app/rel/logflare/bin/gcloud.json
|
||||
# read_only: true
|
||||
environment:
|
||||
LOGFLARE_NODE_HOST: 127.0.0.1
|
||||
DB_USERNAME: supabase_admin
|
||||
DB_DATABASE: ${POSTGRES_DB}
|
||||
DB_HOSTNAME: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_SCHEMA: _analytics
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_SINGLE_TENANT: true
|
||||
LOGFLARE_SUPABASE_MODE: true
|
||||
LOGFLARE_MIN_CLUSTER_SIZE: 1
|
||||
RELEASE_COOKIE: cookie
|
||||
|
||||
# Comment variables to use Big Query backend for analytics
|
||||
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
POSTGRES_BACKEND_SCHEMA: _analytics
|
||||
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
|
||||
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
|
||||
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
|
||||
ports:
|
||||
- 4000:4000
|
||||
entrypoint: |
|
||||
sh -c `cat <<'EOF' > run.sh && sh run.sh
|
||||
./logflare eval Logflare.Release.migrate
|
||||
./logflare start --sname logflare
|
||||
EOF
|
||||
`
|
||||
|
||||
# Comment out everything below this point if you are using an external Postgres database
|
||||
db:
|
||||
container_name: supabase-db
|
||||
image: supabase/postgres:15.1.0.117
|
||||
healthcheck:
|
||||
test: pg_isready -U postgres -h localhost
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
depends_on:
|
||||
vector:
|
||||
condition: service_healthy
|
||||
command:
|
||||
- postgres
|
||||
- -c
|
||||
- config_file=/etc/postgresql/postgresql.conf
|
||||
- -c
|
||||
- log_min_messages=fatal # prevents Realtime polling queries from appearing in logs
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
# Pass down internal port because it's set dynamically by other services
|
||||
- ${POSTGRES_PORT}:${POSTGRES_PORT}
|
||||
environment:
|
||||
POSTGRES_HOST: /var/run/postgresql
|
||||
PGPORT: ${POSTGRES_PORT}
|
||||
POSTGRES_PORT: ${POSTGRES_PORT}
|
||||
PGPASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
PGDATABASE: ${POSTGRES_DB}
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
JWT_EXP: ${JWT_EXPIRY}
|
||||
volumes:
|
||||
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
|
||||
# Must be superuser to create event trigger
|
||||
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
|
||||
# Must be superuser to alter reserved role
|
||||
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
|
||||
# Initialize the database settings with JWT_SECRET and JWT_EXP
|
||||
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
|
||||
# PGDATA directory is persisted between restarts
|
||||
- ./volumes/db/data:/var/lib/postgresql/data:Z
|
||||
# Changes required for Analytics support
|
||||
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
|
||||
- ./scripts/tables.sql:/docker-entrypoint-initdb.d/seed.sql
|
||||
|
||||
vector:
|
||||
container_name: supabase-vector
|
||||
image: timberio/vector:0.28.1-alpine
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://vector:9001/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
volumes:
|
||||
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro
|
||||
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro
|
||||
|
||||
command: [ "--config", "etc/vector/vector.yml" ]
|
@ -1,28 +1,6 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
frontend:
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: frontend
|
||||
dockerfile: Dockerfile.dev
|
||||
args:
|
||||
- NEXT_PUBLIC_ENV=local
|
||||
- NEXT_PUBLIC_BACKEND_URL=${NEXT_PUBLIC_BACKEND_URL}
|
||||
- NEXT_PUBLIC_SUPABASE_URL=${NEXT_PUBLIC_SUPABASE_URL}
|
||||
- NEXT_PUBLIC_SUPABASE_ANON_KEY=${NEXT_PUBLIC_SUPABASE_ANON_KEY}
|
||||
- NEXT_PUBLIC_CMS_URL=${NEXT_PUBLIC_CMS_URL}
|
||||
- NEXT_PUBLIC_FRONTEND_URL=${NEXT_PUBLIC_FRONTEND_URL}
|
||||
container_name: web
|
||||
depends_on:
|
||||
- backend-core
|
||||
restart: always
|
||||
ports:
|
||||
- 3000:3000
|
||||
|
||||
|
||||
backend-core:
|
||||
image: backend-base
|
||||
pull_policy: never
|
||||
@ -36,7 +14,7 @@ services:
|
||||
container_name: backend-core
|
||||
volumes:
|
||||
- ./backend/:/code/
|
||||
command:
|
||||
command:
|
||||
- "uvicorn"
|
||||
- "main:app"
|
||||
- "--reload"
|
||||
@ -46,12 +24,8 @@ services:
|
||||
- "5050"
|
||||
- "--workers"
|
||||
- "2"
|
||||
restart: always
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
kong:
|
||||
condition: service_healthy
|
||||
restart: always
|
||||
|
||||
ports:
|
||||
- 5050:5050
|
||||
- 5678:5678 # debug port
|
||||
@ -62,9 +36,6 @@ services:
|
||||
restart: always
|
||||
ports:
|
||||
- 6379:6379
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
|
||||
worker:
|
||||
pull_policy: never
|
||||
@ -79,7 +50,6 @@ services:
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
- db
|
||||
|
||||
beat:
|
||||
image: backend-base
|
||||
@ -112,406 +82,3 @@ services:
|
||||
- beat
|
||||
ports:
|
||||
- 5555:5555
|
||||
|
||||
studio:
|
||||
container_name: supabase-studio
|
||||
image: supabase/studio:20231123-64a766a
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"node",
|
||||
"-e",
|
||||
"require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
STUDIO_PG_META_URL: http://meta:8080
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
|
||||
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
|
||||
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_URL: http://analytics:4000
|
||||
NEXT_PUBLIC_ENABLE_LOGS: true
|
||||
# Comment to use Big Query backend for analytics
|
||||
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
|
||||
|
||||
kong:
|
||||
container_name: supabase-kong
|
||||
image: kong:2.8.1
|
||||
restart: unless-stopped
|
||||
# https://unix.stackexchange.com/a/294837
|
||||
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
|
||||
ports:
|
||||
- ${KONG_HTTP_PORT}:8000/tcp
|
||||
- ${KONG_HTTPS_PORT}:8443/tcp
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
KONG_DATABASE: "off"
|
||||
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
|
||||
# https://github.com/supabase/cli/issues/14
|
||||
KONG_DNS_ORDER: LAST,A,CNAME
|
||||
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
|
||||
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
|
||||
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
|
||||
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
|
||||
volumes:
|
||||
# https://github.com/supabase/supabase/issues/12661
|
||||
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro
|
||||
|
||||
auth:
|
||||
container_name: supabase-auth
|
||||
image: supabase/gotrue:v2.99.0
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:9999/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
GOTRUE_API_HOST: 0.0.0.0
|
||||
GOTRUE_API_PORT: 9999
|
||||
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
|
||||
|
||||
GOTRUE_DB_DRIVER: postgres
|
||||
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
|
||||
GOTRUE_SITE_URL: ${SITE_URL}
|
||||
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
|
||||
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
|
||||
|
||||
GOTRUE_JWT_ADMIN_ROLES: service_role
|
||||
GOTRUE_JWT_AUD: authenticated
|
||||
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
|
||||
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
|
||||
GOTRUE_JWT_SECRET: ${JWT_SECRET}
|
||||
|
||||
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
|
||||
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
|
||||
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
|
||||
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
|
||||
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
|
||||
GOTRUE_SMTP_HOST: ${SMTP_HOST}
|
||||
GOTRUE_SMTP_PORT: ${SMTP_PORT}
|
||||
GOTRUE_SMTP_USER: ${SMTP_USER}
|
||||
GOTRUE_SMTP_PASS: ${SMTP_PASS}
|
||||
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
|
||||
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
|
||||
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
|
||||
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
|
||||
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
|
||||
|
||||
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
|
||||
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
|
||||
|
||||
rest:
|
||||
container_name: supabase-rest
|
||||
image: postgrest/postgrest:v11.2.2
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
|
||||
PGRST_DB_ANON_ROLE: anon
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_DB_USE_LEGACY_GUCS: "false"
|
||||
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
|
||||
command: "postgrest"
|
||||
|
||||
realtime:
|
||||
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
|
||||
container_name: realtime-dev.supabase-realtime
|
||||
image: supabase/realtime:v2.25.35
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"bash",
|
||||
"-c",
|
||||
"printf \\0 > /dev/tcp/localhost/4000"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
PORT: 4000
|
||||
DB_HOST: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_USER: supabase_admin
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_NAME: ${POSTGRES_DB}
|
||||
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
|
||||
DB_ENC_KEY: supabaserealtime
|
||||
API_JWT_SECRET: ${JWT_SECRET}
|
||||
FLY_ALLOC_ID: fly123
|
||||
FLY_APP_NAME: realtime
|
||||
SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
ERL_AFLAGS: -proto_dist inet_tcp
|
||||
ENABLE_TAILSCALE: "false"
|
||||
DNS_NODES: "''"
|
||||
command: >
|
||||
sh -c "/app/bin/migrate && /app/bin/realtime eval 'Realtime.Release.seeds(Realtime.Repo)' && /app/bin/server"
|
||||
|
||||
storage:
|
||||
container_name: supabase-storage
|
||||
image: supabase/storage-api:v0.43.11
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
rest:
|
||||
condition: service_started
|
||||
imgproxy:
|
||||
condition: service_started
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:5000/status"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
ANON_KEY: ${ANON_KEY}
|
||||
SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
POSTGREST_URL: http://rest:3000
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
FILE_SIZE_LIMIT: 52428800
|
||||
STORAGE_BACKEND: file
|
||||
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
|
||||
TENANT_ID: stub
|
||||
# TODO: https://github.com/supabase/storage-api/issues/55
|
||||
REGION: stub
|
||||
GLOBAL_S3_BUCKET: stub
|
||||
ENABLE_IMAGE_TRANSFORMATION: "true"
|
||||
IMGPROXY_URL: http://imgproxy:5001
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
|
||||
imgproxy:
|
||||
container_name: supabase-imgproxy
|
||||
image: darthsim/imgproxy:v3.8.0
|
||||
healthcheck:
|
||||
test: [ "CMD", "imgproxy", "health" ]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
IMGPROXY_BIND: ":5001"
|
||||
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
|
||||
IMGPROXY_USE_ETAG: "true"
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
|
||||
meta:
|
||||
container_name: supabase-meta
|
||||
image: supabase/postgres-meta:v0.68.0
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
PG_META_PORT: 8080
|
||||
PG_META_DB_HOST: ${POSTGRES_HOST}
|
||||
PG_META_DB_PORT: ${POSTGRES_PORT}
|
||||
PG_META_DB_NAME: ${POSTGRES_DB}
|
||||
PG_META_DB_USER: supabase_admin
|
||||
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
functions:
|
||||
container_name: supabase-edge-functions
|
||||
image: supabase/edge-runtime:v1.22.4
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
|
||||
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
|
||||
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
|
||||
volumes:
|
||||
- ./volumes/functions:/home/deno/functions:Z
|
||||
command:
|
||||
- start
|
||||
- --main-service
|
||||
- /home/deno/functions/main
|
||||
|
||||
analytics:
|
||||
container_name: supabase-analytics
|
||||
image: supabase/logflare:1.4.0
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "http://localhost:4000/health" ]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 10
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# volumes:
|
||||
# - type: bind
|
||||
# source: ${PWD}/gcloud.json
|
||||
# target: /opt/app/rel/logflare/bin/gcloud.json
|
||||
# read_only: true
|
||||
environment:
|
||||
LOGFLARE_NODE_HOST: 127.0.0.1
|
||||
DB_USERNAME: supabase_admin
|
||||
DB_DATABASE: ${POSTGRES_DB}
|
||||
DB_HOSTNAME: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_SCHEMA: _analytics
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_SINGLE_TENANT: true
|
||||
LOGFLARE_SUPABASE_MODE: true
|
||||
LOGFLARE_MIN_CLUSTER_SIZE: 1
|
||||
RELEASE_COOKIE: cookie
|
||||
|
||||
# Comment variables to use Big Query backend for analytics
|
||||
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
POSTGRES_BACKEND_SCHEMA: _analytics
|
||||
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
|
||||
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
|
||||
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
|
||||
ports:
|
||||
- 4000:4000
|
||||
entrypoint: |
|
||||
sh -c `cat <<'EOF' > run.sh && sh run.sh
|
||||
./logflare eval Logflare.Release.migrate
|
||||
./logflare start --sname logflare
|
||||
EOF
|
||||
`
|
||||
|
||||
# Comment out everything below this point if you are using an external Postgres database
|
||||
db:
|
||||
container_name: supabase-db
|
||||
image: supabase/postgres:15.1.0.117
|
||||
healthcheck:
|
||||
test: pg_isready -U postgres -h localhost
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
depends_on:
|
||||
vector:
|
||||
condition: service_healthy
|
||||
command:
|
||||
- postgres
|
||||
- -c
|
||||
- config_file=/etc/postgresql/postgresql.conf
|
||||
- -c
|
||||
- log_min_messages=fatal # prevents Realtime polling queries from appearing in logs
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
# Pass down internal port because it's set dynamically by other services
|
||||
- ${POSTGRES_PORT}:${POSTGRES_PORT}
|
||||
environment:
|
||||
POSTGRES_HOST: /var/run/postgresql
|
||||
PGPORT: ${POSTGRES_PORT}
|
||||
POSTGRES_PORT: ${POSTGRES_PORT}
|
||||
PGPASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
PGDATABASE: ${POSTGRES_DB}
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
JWT_EXP: ${JWT_EXPIRY}
|
||||
volumes:
|
||||
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
|
||||
# Must be superuser to create event trigger
|
||||
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
|
||||
# Must be superuser to alter reserved role
|
||||
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
|
||||
# Initialize the database settings with JWT_SECRET and JWT_EXP
|
||||
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
|
||||
# PGDATA directory is persisted between restarts
|
||||
- ./volumes/db/data:/var/lib/postgresql/data:Z
|
||||
# Changes required for Analytics support
|
||||
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
|
||||
- ./scripts/tables.sql:/docker-entrypoint-initdb.d/seed.sql
|
||||
|
||||
vector:
|
||||
container_name: supabase-vector
|
||||
image: timberio/vector:0.28.1-alpine
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://vector:9001/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
volumes:
|
||||
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro
|
||||
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro
|
||||
|
||||
command: [ "--config", "etc/vector/vector.yml" ]
|
4
supabase/.gitignore
vendored
Normal file
4
supabase/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
# Supabase
|
||||
.branches
|
||||
.temp
|
||||
.env
|
149
supabase/config.toml
Normal file
149
supabase/config.toml
Normal file
@ -0,0 +1,149 @@
|
||||
# A string used to distinguish different Supabase projects on the same host. Defaults to the
|
||||
# working directory name when running `supabase init`.
|
||||
project_id = "secondbrain"
|
||||
|
||||
[api]
|
||||
enabled = true
|
||||
# Port to use for the API URL.
|
||||
port = 54321
|
||||
# Schemas to expose in your API. Tables, views and stored procedures in this schema will get API
|
||||
# endpoints. public and storage are always included.
|
||||
schemas = ["public", "storage", "graphql_public"]
|
||||
# Extra schemas to add to the search_path of every request. public is always included.
|
||||
extra_search_path = ["public", "extensions"]
|
||||
# The maximum number of rows returns from a view, table, or stored procedure. Limits payload size
|
||||
# for accidental or malicious requests.
|
||||
max_rows = 1000
|
||||
|
||||
[db]
|
||||
# Port to use for the local database URL.
|
||||
port = 54322
|
||||
# Port used by db diff command to initialize the shadow database.
|
||||
shadow_port = 54320
|
||||
# The database major version to use. This has to be the same as your remote database's. Run `SHOW
|
||||
# server_version;` on the remote database to check.
|
||||
major_version = 15
|
||||
|
||||
[db.pooler]
|
||||
enabled = false
|
||||
# Port to use for the local connection pooler.
|
||||
port = 54329
|
||||
# Specifies when a server connection can be reused by other clients.
|
||||
# Configure one of the supported pooler modes: `transaction`, `session`.
|
||||
pool_mode = "transaction"
|
||||
# How many server connections to allow per user/database pair.
|
||||
default_pool_size = 20
|
||||
# Maximum number of client connections allowed.
|
||||
max_client_conn = 100
|
||||
|
||||
[realtime]
|
||||
enabled = true
|
||||
# Bind realtime via either IPv4 or IPv6. (default: IPv6)
|
||||
# ip_version = "IPv6"
|
||||
|
||||
[studio]
|
||||
enabled = true
|
||||
# Port to use for Supabase Studio.
|
||||
port = 54323
|
||||
# External URL of the API server that frontend connects to.
|
||||
api_url = "http://localhost"
|
||||
|
||||
# Email testing server. Emails sent with the local dev setup are not actually sent - rather, they
|
||||
# are monitored, and you can view the emails that would have been sent from the web interface.
|
||||
[inbucket]
|
||||
enabled = true
|
||||
# Port to use for the email testing server web interface.
|
||||
port = 54324
|
||||
# Uncomment to expose additional ports for testing user applications that send emails.
|
||||
# smtp_port = 54325
|
||||
# pop3_port = 54326
|
||||
|
||||
[storage]
|
||||
enabled = true
|
||||
# The maximum file size allowed (e.g. "5MB", "500KB").
|
||||
file_size_limit = "50MiB"
|
||||
|
||||
[auth]
|
||||
enabled = true
|
||||
# The base URL of your website. Used as an allow-list for redirects and for constructing URLs used
|
||||
# in emails.
|
||||
site_url = "http://localhost:3000"
|
||||
# A list of *exact* URLs that auth providers are permitted to redirect to post authentication.
|
||||
additional_redirect_urls = ["https://localhost:3000"]
|
||||
# How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week).
|
||||
jwt_expiry = 3600
|
||||
# If disabled, the refresh token will never expire.
|
||||
enable_refresh_token_rotation = true
|
||||
# Allows refresh tokens to be reused after expiry, up to the specified interval in seconds.
|
||||
# Requires enable_refresh_token_rotation = true.
|
||||
refresh_token_reuse_interval = 10
|
||||
# Allow/disallow new user signups to your project.
|
||||
enable_signup = true
|
||||
|
||||
[auth.email]
|
||||
# Allow/disallow new user signups via email to your project.
|
||||
enable_signup = true
|
||||
# If enabled, a user will be required to confirm any email change on both the old, and new email
|
||||
# addresses. If disabled, only the new email is required to confirm.
|
||||
double_confirm_changes = true
|
||||
# If enabled, users need to confirm their email address before signing in.
|
||||
enable_confirmations = false
|
||||
|
||||
# Uncomment to customize email template
|
||||
# [auth.email.template.invite]
|
||||
# subject = "You have been invited"
|
||||
# content_path = "./supabase/templates/invite.html"
|
||||
|
||||
[auth.sms]
|
||||
# Allow/disallow new user signups via SMS to your project.
|
||||
enable_signup = true
|
||||
# If enabled, users need to confirm their phone number before signing in.
|
||||
enable_confirmations = false
|
||||
# Template for sending OTP to users
|
||||
template = "Your code is {{ .Code }} ."
|
||||
|
||||
# Use pre-defined map of phone number to OTP for testing.
|
||||
[auth.sms.test_otp]
|
||||
# 4152127777 = "123456"
|
||||
|
||||
# Configure one of the supported SMS providers: `twilio`, `twilio_verify`, `messagebird`, `textlocal`, `vonage`.
|
||||
[auth.sms.twilio]
|
||||
enabled = false
|
||||
account_sid = ""
|
||||
message_service_sid = ""
|
||||
# DO NOT commit your Twilio auth token to git. Use environment variable substitution instead:
|
||||
auth_token = "env(SUPABASE_AUTH_SMS_TWILIO_AUTH_TOKEN)"
|
||||
|
||||
# Use an external OAuth provider. The full list of providers are: `apple`, `azure`, `bitbucket`,
|
||||
# `discord`, `facebook`, `github`, `gitlab`, `google`, `keycloak`, `linkedin`, `notion`, `twitch`,
|
||||
# `twitter`, `slack`, `spotify`, `workos`, `zoom`.
|
||||
[auth.external.apple]
|
||||
enabled = false
|
||||
client_id = ""
|
||||
# DO NOT commit your OAuth provider secret to git. Use environment variable substitution instead:
|
||||
secret = "env(SUPABASE_AUTH_EXTERNAL_APPLE_SECRET)"
|
||||
# Overrides the default auth redirectUrl.
|
||||
redirect_uri = ""
|
||||
# Overrides the default auth provider URL. Used to support self-hosted gitlab, single-tenant Azure,
|
||||
# or any other third-party OIDC providers.
|
||||
url = ""
|
||||
|
||||
[analytics]
|
||||
enabled = false
|
||||
port = 54327
|
||||
vector_port = 54328
|
||||
# Configure one of the supported backends: `postgres`, `bigquery`.
|
||||
backend = "postgres"
|
||||
|
||||
# Experimental features may be deprecated any time
|
||||
[experimental]
|
||||
# Configures Postgres storage engine to use OrioleDB (S3)
|
||||
orioledb_version = ""
|
||||
# Configures S3 bucket URL, eg. <bucket_name>.s3-<region>.amazonaws.com
|
||||
s3_host = "env(S3_HOST)"
|
||||
# Configures S3 bucket region, eg. us-east-1
|
||||
s3_region = "env(S3_REGION)"
|
||||
# Configures AWS_ACCESS_KEY_ID for S3 bucket
|
||||
s3_access_key = "env(S3_ACCESS_KEY)"
|
||||
# Configures AWS_SECRET_ACCESS_KEY for S3 bucket
|
||||
s3_secret_key = "env(S3_SECRET_KEY)"
|
2102
supabase/migrations/20240103173626_init.sql
Normal file
2102
supabase/migrations/20240103173626_init.sql
Normal file
File diff suppressed because it is too large
Load Diff
205
supabase/migrations/20240103175048_prod.sql
Normal file
205
supabase/migrations/20240103175048_prod.sql
Normal file
@ -0,0 +1,205 @@
|
||||
create sequence "public"."documents_id_seq";
|
||||
|
||||
drop function if exists "public"."get_premium_user"(input_email text);
|
||||
|
||||
drop function if exists "public"."update_max_brains"();
|
||||
|
||||
drop function if exists "public"."update_user_settings"();
|
||||
|
||||
drop function if exists "public"."match_summaries"(query_embedding vector, match_count integer, match_threshold double precision);
|
||||
|
||||
alter table "public"."vectors" drop constraint "vectors_pkey1";
|
||||
|
||||
drop index if exists "public"."vectors_pkey1";
|
||||
|
||||
create table "public"."documents" (
|
||||
"id" bigint not null default nextval('documents_id_seq'::regclass),
|
||||
"content" text,
|
||||
"metadata" jsonb,
|
||||
"embedding" vector(1536)
|
||||
);
|
||||
|
||||
|
||||
alter table "public"."brains" drop column "retrieval_algorithm";
|
||||
|
||||
alter table "public"."brains" add column "openai_api_key" text;
|
||||
|
||||
alter table "public"."brains" alter column "status" set default 'private'::text;
|
||||
|
||||
alter table "public"."brains_users" alter column "default_brain" set default false;
|
||||
|
||||
alter table "public"."brains_vectors" drop column "rights";
|
||||
|
||||
alter table "public"."user_settings" alter column "max_brain_size" set default 50000000;
|
||||
|
||||
alter table "public"."vectors" alter column "id" drop default;
|
||||
|
||||
alter sequence "public"."documents_id_seq" owned by "public"."documents"."id";
|
||||
|
||||
CREATE INDEX brains_vectors_brain_id_idx ON public.brains_vectors USING btree (brain_id);
|
||||
|
||||
CREATE INDEX brains_vectors_vector_id_idx ON public.brains_vectors USING btree (vector_id);
|
||||
|
||||
CREATE UNIQUE INDEX documents_pkey ON public.documents USING btree (id);
|
||||
|
||||
CREATE INDEX idx_brains_vectors_vector_id ON public.brains_vectors USING btree (vector_id);
|
||||
|
||||
CREATE INDEX idx_vectors_id ON public.vectors USING btree (id);
|
||||
|
||||
CREATE INDEX vectors_file_sha1_idx ON public.vectors USING btree (file_sha1);
|
||||
|
||||
CREATE INDEX vectors_id_idx ON public.vectors USING btree (id);
|
||||
|
||||
CREATE UNIQUE INDEX vectors_new_pkey ON public.vectors USING btree (id);
|
||||
|
||||
alter table "public"."documents" add constraint "documents_pkey" PRIMARY KEY using index "documents_pkey";
|
||||
|
||||
alter table "public"."vectors" add constraint "vectors_new_pkey" PRIMARY KEY using index "vectors_new_pkey";
|
||||
|
||||
alter table "public"."api_keys" add constraint "api_keys_user_id_fkey" FOREIGN KEY (user_id) REFERENCES auth.users(id) not valid;
|
||||
|
||||
alter table "public"."api_keys" validate constraint "api_keys_user_id_fkey";
|
||||
|
||||
alter table "public"."brains_vectors" add constraint "brains_vectors_vector_id_fkey" FOREIGN KEY (vector_id) REFERENCES vectors(id) not valid;
|
||||
|
||||
alter table "public"."brains_vectors" validate constraint "brains_vectors_vector_id_fkey";
|
||||
|
||||
alter table "public"."knowledge_vectors" add constraint "knowledge_vectors_vector_id_fkey" FOREIGN KEY (vector_id) REFERENCES vectors(id) not valid;
|
||||
|
||||
alter table "public"."knowledge_vectors" validate constraint "knowledge_vectors_vector_id_fkey";
|
||||
|
||||
alter table "public"."summaries" add constraint "summaries_document_id_fkey" FOREIGN KEY (document_id) REFERENCES vectors(id) not valid;
|
||||
|
||||
alter table "public"."summaries" validate constraint "summaries_document_id_fkey";
|
||||
|
||||
set check_function_bodies = off;
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.match_documents(query_embedding vector, match_count integer)
|
||||
RETURNS TABLE(id bigint, content text, metadata jsonb, similarity double precision)
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
#variable_conflict use_column
|
||||
begin
|
||||
return query
|
||||
select
|
||||
id,
|
||||
content,
|
||||
metadata,
|
||||
1 - (documents.embedding <=> query_embedding) as similarity
|
||||
from documents
|
||||
order by documents.embedding <=> query_embedding
|
||||
limit match_count;
|
||||
end;
|
||||
$function$
|
||||
;
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.match_summaries(query_embedding vector, match_count integer, match_threshold double precision)
|
||||
RETURNS TABLE(id bigint, document_id bigint, content text, metadata jsonb, embedding vector, similarity double precision)
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
# variable_conflict use_column
|
||||
BEGIN
|
||||
RETURN query
|
||||
SELECT
|
||||
id,
|
||||
document_id,
|
||||
content,
|
||||
metadata,
|
||||
embedding,
|
||||
1 -(summaries.embedding <=> query_embedding) AS similarity
|
||||
FROM
|
||||
summaries
|
||||
WHERE 1 - (summaries.embedding <=> query_embedding) > match_threshold
|
||||
ORDER BY
|
||||
summaries.embedding <=> query_embedding
|
||||
LIMIT match_count;
|
||||
END;
|
||||
$function$
|
||||
;
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.update_max_brains_theodo()
|
||||
RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
AS $function$
|
||||
DECLARE
|
||||
userEmail TEXT;
|
||||
allowedDomains TEXT[] := ARRAY['%@theodo.fr', '%@theodo.com', '%@theodo.co.uk', '%@bam.tech', '%@padok.fr', '%@aleios.com', '%@sicara.com', '%@hokla.com', '%@sipios.com'];
|
||||
BEGIN
|
||||
SELECT email INTO userEmail FROM auth.users WHERE id = NEW.user_id;
|
||||
|
||||
IF userEmail LIKE ANY(allowedDomains) THEN
|
||||
-- Ensure the models column is initialized as an array if null
|
||||
IF NEW.models IS NULL THEN
|
||||
NEW.models := '[]'::jsonb;
|
||||
END IF;
|
||||
|
||||
-- Add gpt-4 if not present
|
||||
IF NOT NEW.models ? 'gpt-4' THEN
|
||||
NEW.models := NEW.models || '["gpt-4"]'::jsonb;
|
||||
END IF;
|
||||
|
||||
-- Add gpt-3.5-turbo if not present
|
||||
IF NOT NEW.models ? 'gpt-3.5-turbo-1106' THEN
|
||||
NEW.models := NEW.models || '["gpt-3.5-turbo"]'::jsonb;
|
||||
END IF;
|
||||
|
||||
UPDATE user_settings
|
||||
SET
|
||||
max_brains = 30,
|
||||
max_brain_size = 100000000,
|
||||
daily_chat_credit = 200,
|
||||
models = NEW.models
|
||||
WHERE user_id = NEW.user_id;
|
||||
END IF;
|
||||
|
||||
RETURN NULL; -- for AFTER triggers, the return value is ignored
|
||||
END;
|
||||
$function$
|
||||
;
|
||||
|
||||
grant delete on table "public"."documents" to "anon";
|
||||
|
||||
grant insert on table "public"."documents" to "anon";
|
||||
|
||||
grant references on table "public"."documents" to "anon";
|
||||
|
||||
grant select on table "public"."documents" to "anon";
|
||||
|
||||
grant trigger on table "public"."documents" to "anon";
|
||||
|
||||
grant truncate on table "public"."documents" to "anon";
|
||||
|
||||
grant update on table "public"."documents" to "anon";
|
||||
|
||||
grant delete on table "public"."documents" to "authenticated";
|
||||
|
||||
grant insert on table "public"."documents" to "authenticated";
|
||||
|
||||
grant references on table "public"."documents" to "authenticated";
|
||||
|
||||
grant select on table "public"."documents" to "authenticated";
|
||||
|
||||
grant trigger on table "public"."documents" to "authenticated";
|
||||
|
||||
grant truncate on table "public"."documents" to "authenticated";
|
||||
|
||||
grant update on table "public"."documents" to "authenticated";
|
||||
|
||||
grant delete on table "public"."documents" to "service_role";
|
||||
|
||||
grant insert on table "public"."documents" to "service_role";
|
||||
|
||||
grant references on table "public"."documents" to "service_role";
|
||||
|
||||
grant select on table "public"."documents" to "service_role";
|
||||
|
||||
grant trigger on table "public"."documents" to "service_role";
|
||||
|
||||
grant truncate on table "public"."documents" to "service_role";
|
||||
|
||||
grant update on table "public"."documents" to "service_role";
|
||||
|
||||
CREATE TRIGGER update_max_brains_theodo_trigger AFTER INSERT ON public.user_settings FOR EACH ROW EXECUTE FUNCTION update_max_brains_theodo();
|
||||
|
||||
|
7
supabase/migrations/20240103181249_premium.sql
Normal file
7
supabase/migrations/20240103181249_premium.sql
Normal file
@ -0,0 +1,7 @@
|
||||
alter table "public"."user_settings" add column "is_premium" boolean not null default false;
|
||||
|
||||
alter table "public"."user_settings" alter column "max_brain_size" set not null;
|
||||
|
||||
alter table "public"."user_settings" alter column "max_brain_size" set data type bigint using "max_brain_size"::bigint;
|
||||
|
||||
|
3
supabase/migrations/20240103181925_cleanup.sql
Normal file
3
supabase/migrations/20240103181925_cleanup.sql
Normal file
@ -0,0 +1,3 @@
|
||||
drop function if exists "public"."match_summaries"(query_embedding vector, match_count integer, match_threshold double precision);
|
||||
|
||||
|
3
supabase/migrations/20240103185550_upgrade.sql
Normal file
3
supabase/migrations/20240103185550_upgrade.sql
Normal file
@ -0,0 +1,3 @@
|
||||
alter extension "wrappers" update to '0.2.0';
|
||||
|
||||
|
30
supabase/migrations/20240103193921_stripe_customers.sql
Normal file
30
supabase/migrations/20240103193921_stripe_customers.sql
Normal file
@ -0,0 +1,30 @@
|
||||
create foreign table public.subscriptions (
|
||||
id text,
|
||||
customer text,
|
||||
currency text,
|
||||
current_period_start timestamp,
|
||||
current_period_end timestamp,
|
||||
attrs jsonb
|
||||
)
|
||||
server stripe_server
|
||||
options (
|
||||
object 'subscriptions',
|
||||
rowid_column 'id'
|
||||
);
|
||||
|
||||
|
||||
create foreign table public.products (
|
||||
id text,
|
||||
name text,
|
||||
active bool,
|
||||
default_price text,
|
||||
description text,
|
||||
created timestamp,
|
||||
updated timestamp,
|
||||
attrs jsonb
|
||||
)
|
||||
server stripe_server
|
||||
options (
|
||||
object 'products',
|
||||
rowid_column 'id'
|
||||
);
|
3
supabase/migrations/20240103194255_api.sql
Normal file
3
supabase/migrations/20240103194255_api.sql
Normal file
@ -0,0 +1,3 @@
|
||||
alter table "public"."user_settings" add column "API_ACCESS" boolean not null default false;
|
||||
|
||||
|
66
supabase/migrations/20240103204741_product_to_features.sql
Normal file
66
supabase/migrations/20240103204741_product_to_features.sql
Normal file
@ -0,0 +1,66 @@
|
||||
create table "public"."product_to_features" (
|
||||
"id" bigint generated by default as identity not null,
|
||||
"models" jsonb default '["gpt-3.5-turbo-1106"]'::jsonb,
|
||||
"daily_chat_credit" integer not null default 20,
|
||||
"max_brains" integer not null,
|
||||
"max_brain_size" bigint not null default '50000000'::bigint,
|
||||
"api_access" boolean not null default false,
|
||||
"stripe_product_id" text
|
||||
);
|
||||
|
||||
|
||||
alter table "public"."user_settings" drop column "API_ACCESS";
|
||||
|
||||
alter table "public"."user_settings" add column "api_access" boolean not null default false;
|
||||
|
||||
CREATE UNIQUE INDEX product_to_features_pkey ON public.product_to_features USING btree (id);
|
||||
|
||||
alter table "public"."product_to_features" add constraint "product_to_features_pkey" PRIMARY KEY using index "product_to_features_pkey";
|
||||
|
||||
alter table "public"."product_to_features" add constraint "product_to_features_max_brains_check" CHECK ((max_brains > 0)) not valid;
|
||||
|
||||
alter table "public"."product_to_features" validate constraint "product_to_features_max_brains_check";
|
||||
|
||||
grant delete on table "public"."product_to_features" to "anon";
|
||||
|
||||
grant insert on table "public"."product_to_features" to "anon";
|
||||
|
||||
grant references on table "public"."product_to_features" to "anon";
|
||||
|
||||
grant select on table "public"."product_to_features" to "anon";
|
||||
|
||||
grant trigger on table "public"."product_to_features" to "anon";
|
||||
|
||||
grant truncate on table "public"."product_to_features" to "anon";
|
||||
|
||||
grant update on table "public"."product_to_features" to "anon";
|
||||
|
||||
grant delete on table "public"."product_to_features" to "authenticated";
|
||||
|
||||
grant insert on table "public"."product_to_features" to "authenticated";
|
||||
|
||||
grant references on table "public"."product_to_features" to "authenticated";
|
||||
|
||||
grant select on table "public"."product_to_features" to "authenticated";
|
||||
|
||||
grant trigger on table "public"."product_to_features" to "authenticated";
|
||||
|
||||
grant truncate on table "public"."product_to_features" to "authenticated";
|
||||
|
||||
grant update on table "public"."product_to_features" to "authenticated";
|
||||
|
||||
grant delete on table "public"."product_to_features" to "service_role";
|
||||
|
||||
grant insert on table "public"."product_to_features" to "service_role";
|
||||
|
||||
grant references on table "public"."product_to_features" to "service_role";
|
||||
|
||||
grant select on table "public"."product_to_features" to "service_role";
|
||||
|
||||
grant trigger on table "public"."product_to_features" to "service_role";
|
||||
|
||||
grant truncate on table "public"."product_to_features" to "service_role";
|
||||
|
||||
grant update on table "public"."product_to_features" to "service_role";
|
||||
|
||||
|
0
supabase/schema.sql
Normal file
0
supabase/schema.sql
Normal file
305
supabase/seed.sql
Normal file
305
supabase/seed.sql
Normal file
@ -0,0 +1,305 @@
|
||||
SET session_replication_role = replica;
|
||||
--
|
||||
-- PostgreSQL database dump
|
||||
--
|
||||
|
||||
-- Dumped from database version 15.1 (Ubuntu 15.1-1.pgdg20.04+1)
|
||||
-- Dumped by pg_dump version 15.5 (Ubuntu 15.5-1.pgdg20.04+1)
|
||||
|
||||
SET statement_timeout = 0;
|
||||
SET lock_timeout = 0;
|
||||
SET idle_in_transaction_session_timeout = 0;
|
||||
SET client_encoding = 'UTF8';
|
||||
SET standard_conforming_strings = on;
|
||||
SELECT pg_catalog.set_config('search_path', '', false);
|
||||
SET check_function_bodies = false;
|
||||
SET xmloption = content;
|
||||
SET client_min_messages = warning;
|
||||
SET row_security = off;
|
||||
|
||||
--
|
||||
-- Data for Name: audit_log_entries; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
INSERT INTO "auth"."audit_log_entries" ("instance_id", "id", "payload", "created_at", "ip_address") VALUES
|
||||
('00000000-0000-0000-0000-000000000000', '479f1b0e-4e73-4a73-9b0a-349cfc333215', '{"action":"user_signedup","actor_id":"00000000-0000-0000-0000-000000000000","actor_username":"service_role","actor_via_sso":false,"log_type":"team","traits":{"user_email":"admin@quivr.app","user_id":"d777f5d2-1494-460c-82b4-70f445b6344b","user_phone":""}}', '2024-01-03 17:52:45.895193+00', ''),
|
||||
('00000000-0000-0000-0000-000000000000', 'b21c9ed5-6a11-4da6-b0ba-86a84ae01d9d', '{"action":"login","actor_id":"d777f5d2-1494-460c-82b4-70f445b6344b","actor_username":"admin@quivr.app","actor_via_sso":false,"log_type":"account","traits":{"provider":"email"}}', '2024-01-03 17:57:52.722055+00', ''),
|
||||
('00000000-0000-0000-0000-000000000000', 'f7fbe861-c477-483e-a74a-8dcb9f2df8c5', '{"action":"user_signedup","actor_id":"00000000-0000-0000-0000-000000000000","actor_username":"service_role","actor_via_sso":false,"log_type":"team","traits":{"user_email":"admin@quivr.app","user_id":"bad271c8-973a-4dcc-8e87-1de818ea1234","user_phone":""}}', '2024-01-03 17:59:11.223649+00', '');
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: flow_state; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: users; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
INSERT INTO "auth"."users" ("instance_id", "id", "aud", "role", "email", "encrypted_password", "email_confirmed_at", "invited_at", "confirmation_token", "confirmation_sent_at", "recovery_token", "recovery_sent_at", "email_change_token_new", "email_change", "email_change_sent_at", "last_sign_in_at", "raw_app_meta_data", "raw_user_meta_data", "is_super_admin", "created_at", "updated_at", "phone", "phone_confirmed_at", "phone_change", "phone_change_token", "phone_change_sent_at", "email_change_token_current", "email_change_confirm_status", "banned_until", "reauthentication_token", "reauthentication_sent_at", "is_sso_user", "deleted_at") VALUES
|
||||
('00000000-0000-0000-0000-000000000000', 'bad271c8-973a-4dcc-8e87-1de818ea1234', 'authenticated', 'authenticated', 'admin@quivr.app', '$2a$10$fo99ZlLdOex9QJy5cMN8OuQD2EBylfB1dPCfdLeXniDr6a6K1jOEu', '2024-01-03 17:59:11.22809+00', NULL, '', NULL, '', NULL, '', '', NULL, NULL, '{"provider": "email", "providers": ["email"]}', '{}', NULL, '2024-01-03 17:59:11.212675+00', '2024-01-03 17:59:11.228261+00', NULL, NULL, '', '', NULL, '', 0, NULL, '', NULL, false, NULL);
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: identities; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
INSERT INTO "auth"."identities" ("provider_id", "user_id", "identity_data", "provider", "last_sign_in_at", "created_at", "updated_at", "id") VALUES
|
||||
('bad271c8-973a-4dcc-8e87-1de818ea1234', 'bad271c8-973a-4dcc-8e87-1de818ea1234', '{"sub": "bad271c8-973a-4dcc-8e87-1de818ea1234", "email": "admin@quivr.app", "email_verified": false, "phone_verified": false}', 'email', '2024-01-03 17:59:11.222255+00', '2024-01-03 17:59:11.222367+00', '2024-01-03 17:59:11.222367+00', 'b22ef918-7d7c-4d30-b51a-0ac15a25ae0c');
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: instances; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: sessions; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: mfa_amr_claims; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: mfa_factors; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: mfa_challenges; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: refresh_tokens; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: sso_providers; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: saml_providers; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: saml_relay_states; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: sso_domains; Type: TABLE DATA; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: prompts; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: brains; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: api_brain_definition; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: api_keys; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: brain_subscription_invitations; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: brains_users; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: vectors; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: brains_vectors; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: chats; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: chat_history; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: composite_brain_connections; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: documents; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: knowledge; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: knowledge_vectors; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: migrations; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: notifications; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: onboardings; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
INSERT INTO "public"."onboardings" ("user_id", "onboarding_a", "onboarding_b1", "onboarding_b2", "onboarding_b3", "creation_time") VALUES
|
||||
('bad271c8-973a-4dcc-8e87-1de818ea1234', true, true, true, true, '2024-01-03 17:59:11.212049');
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: stats; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: summaries; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: user_daily_usage; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: user_identity; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: user_settings; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: users; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
INSERT INTO "public"."users" ("id", "email") VALUES
|
||||
('bad271c8-973a-4dcc-8e87-1de818ea1234', 'admin@quivr.app');
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: users_old; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Data for Name: vectors_old; Type: TABLE DATA; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
|
||||
|
||||
--
|
||||
-- Name: refresh_tokens_id_seq; Type: SEQUENCE SET; Schema: auth; Owner: supabase_auth_admin
|
||||
--
|
||||
|
||||
SELECT pg_catalog.setval('"auth"."refresh_tokens_id_seq"', 1, true);
|
||||
|
||||
|
||||
--
|
||||
-- Name: documents_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
SELECT pg_catalog.setval('"public"."documents_id_seq"', 1, false);
|
||||
|
||||
|
||||
--
|
||||
-- Name: stats_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
SELECT pg_catalog.setval('"public"."stats_id_seq"', 1, false);
|
||||
|
||||
|
||||
--
|
||||
-- Name: summaries_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
SELECT pg_catalog.setval('"public"."summaries_id_seq"', 1, false);
|
||||
|
||||
|
||||
--
|
||||
-- Name: vectors_id_seq; Type: SEQUENCE SET; Schema: public; Owner: postgres
|
||||
--
|
||||
|
||||
SELECT pg_catalog.setval('"public"."vectors_id_seq"', 1, false);
|
||||
|
||||
|
||||
--
|
||||
-- PostgreSQL database dump complete
|
||||
--
|
||||
|
||||
RESET ALL;
|
Loading…
Reference in New Issue
Block a user