mirror of
https://github.com/StanGirard/quivr.git
synced 2024-11-25 19:07:43 +03:00
feat: 🎸 supabase
migrated to supabase cli
This commit is contained in:
parent
e8db29f8af
commit
b2d1d10212
123
.env.example
123
.env.example
@ -4,7 +4,7 @@
|
||||
OPENAI_API_KEY=CHANGE_ME
|
||||
|
||||
#LOCAL
|
||||
#OLLAMA_API_BASE_URL=http://host.docker.internal:11434 # Uncomment to activate ollama. This is the local url for the ollama api
|
||||
# OLLAMA_API_BASE_URL=http://host.docker.internal:11434 # Uncomment to activate ollama. This is the local url for the ollama api
|
||||
|
||||
|
||||
|
||||
@ -17,8 +17,8 @@ OPENAI_API_KEY=CHANGE_ME
|
||||
|
||||
NEXT_PUBLIC_ENV=local
|
||||
NEXT_PUBLIC_BACKEND_URL=http://localhost:5050
|
||||
NEXT_PUBLIC_SUPABASE_URL=http://localhost:8000
|
||||
NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
NEXT_PUBLIC_SUPABASE_URL=http://localhost:54321
|
||||
NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6ImFub24iLCJleHAiOjE5ODM4MTI5OTZ9.CRXP1A7WOeoJeXxjNni43kdQwgnWNReilDMblYTn_I0
|
||||
NEXT_PUBLIC_CMS_URL=https://cms.quivr.app
|
||||
NEXT_PUBLIC_FRONTEND_URL=http://localhost:*
|
||||
|
||||
@ -28,16 +28,14 @@ NEXT_PUBLIC_FRONTEND_URL=http://localhost:*
|
||||
# BACKEND
|
||||
########
|
||||
|
||||
SUPABASE_URL=http://kong:8000
|
||||
SUPABASE_SERVICE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
SUPABASE_URL=http://host.docker.internal:54321
|
||||
SUPABASE_SERVICE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU
|
||||
PG_DATABASE_URL=notimplementedyet
|
||||
ANTHROPIC_API_KEY=null
|
||||
JWT_SECRET_KEY=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
JWT_SECRET_KEY=super-secret-jwt-token-with-at-least-32-characters-long
|
||||
AUTHENTICATE=true
|
||||
GOOGLE_APPLICATION_CREDENTIALS=<ignore-me>
|
||||
GOOGLE_CLOUD_PROJECT=<ignore-me>
|
||||
CELERY_BROKER_URL=redis://redis:6379/0
|
||||
CELERY_BROKER_QUEUE_NAME=quivr-preview.fifo
|
||||
CELEBRY_BROKER_QUEUE_NAME=quivr-preview.fifo
|
||||
|
||||
|
||||
#RESEND
|
||||
@ -54,110 +52,3 @@ PREMIUM_MAX_BRAIN_SIZE=10000000
|
||||
PREMIUM_DAILY_CHAT_CREDIT=100
|
||||
|
||||
|
||||
|
||||
###### SUPABASE CONFIGURATION ######
|
||||
|
||||
############
|
||||
# Secrets
|
||||
# YOU MUST CHANGE THESE BEFORE GOING INTO PRODUCTION
|
||||
############
|
||||
|
||||
POSTGRES_PASSWORD=your-super-secret-and-long-postgres-password
|
||||
JWT_SECRET=your-super-secret-jwt-token-with-at-least-32-characters-long
|
||||
ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE
|
||||
SERVICE_ROLE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJzZXJ2aWNlX3JvbGUiLAogICAgImlzcyI6ICJzdXBhYmFzZS1kZW1vIiwKICAgICJpYXQiOiAxNjQxNzY5MjAwLAogICAgImV4cCI6IDE3OTk1MzU2MDAKfQ.DaYlNEoUrrEn2Ig7tqibS-PHK5vgusbcbo7X36XVt4Q
|
||||
DASHBOARD_USERNAME=admin
|
||||
DASHBOARD_PASSWORD=admin
|
||||
|
||||
############
|
||||
# Database - You can change these to any PostgreSQL database that has logical replication enabled.
|
||||
############
|
||||
|
||||
POSTGRES_HOST=db
|
||||
POSTGRES_DB=postgres
|
||||
POSTGRES_PORT=5432
|
||||
# default user is postgres
|
||||
|
||||
############
|
||||
# API Proxy - Configuration for the Kong Reverse proxy.
|
||||
############
|
||||
|
||||
KONG_HTTP_PORT=8000
|
||||
KONG_HTTPS_PORT=8443
|
||||
|
||||
|
||||
############
|
||||
# API - Configuration for PostgREST.
|
||||
############
|
||||
|
||||
PGRST_DB_SCHEMAS=public,storage,graphql_public
|
||||
|
||||
|
||||
############
|
||||
# Auth - Configuration for the GoTrue authentication server.
|
||||
############
|
||||
|
||||
## General
|
||||
SITE_URL=http://localhost:3001
|
||||
ADDITIONAL_REDIRECT_URLS=
|
||||
JWT_EXPIRY=3600
|
||||
DISABLE_SIGNUP=false
|
||||
API_EXTERNAL_URL=http://localhost:8000
|
||||
|
||||
## Mailer Config
|
||||
MAILER_URLPATHS_CONFIRMATION="/auth/v1/verify"
|
||||
MAILER_URLPATHS_INVITE="/auth/v1/verify"
|
||||
MAILER_URLPATHS_RECOVERY="/auth/v1/verify"
|
||||
MAILER_URLPATHS_EMAIL_CHANGE="/auth/v1/verify"
|
||||
|
||||
## Email auth
|
||||
ENABLE_EMAIL_SIGNUP=true
|
||||
ENABLE_EMAIL_AUTOCONFIRM=false
|
||||
SMTP_ADMIN_EMAIL=admin@example.com
|
||||
SMTP_HOST=supabase-mail
|
||||
SMTP_PORT=2500
|
||||
SMTP_USER=fake_mail_user
|
||||
SMTP_PASS=fake_mail_password
|
||||
SMTP_SENDER_NAME=fake_sender
|
||||
|
||||
## Phone auth
|
||||
ENABLE_PHONE_SIGNUP=true
|
||||
ENABLE_PHONE_AUTOCONFIRM=true
|
||||
|
||||
|
||||
############
|
||||
# Studio - Configuration for the Dashboard
|
||||
############
|
||||
|
||||
STUDIO_DEFAULT_ORGANIZATION=Default Organization
|
||||
STUDIO_DEFAULT_PROJECT=Default Project
|
||||
|
||||
STUDIO_PORT=3001
|
||||
# replace if you intend to use Studio outside of localhost
|
||||
SUPABASE_PUBLIC_URL=http://localhost:8000
|
||||
|
||||
# Enable webp support
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION=true
|
||||
|
||||
############
|
||||
# Functions - Configuration for Functions
|
||||
############
|
||||
# NOTE: VERIFY_JWT applies to all functions. Per-function VERIFY_JWT is not supported yet.
|
||||
FUNCTIONS_VERIFY_JWT=false
|
||||
|
||||
############
|
||||
# Logs - Configuration for Logflare
|
||||
# Please refer to https://supabase.com/docs/reference/self-hosting-analytics/introduction
|
||||
############
|
||||
|
||||
LOGFLARE_LOGGER_BACKEND_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Change vector.toml sinks to reflect this change
|
||||
LOGFLARE_API_KEY=your-super-secret-and-long-logflare-key
|
||||
|
||||
# Docker socket location - this value will differ depending on your OS
|
||||
DOCKER_SOCKET_LOCATION=/var/run/docker.sock
|
||||
|
||||
# Google Cloud Project details
|
||||
GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID
|
||||
GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER
|
35
README.md
35
README.md
@ -44,11 +44,19 @@ Ensure you have the following installed:
|
||||
|
||||
You can find the installation video [here](https://www.youtube.com/watch?v=cXBa6dZJN48).
|
||||
|
||||
- **Step 0**: Supabase CLI
|
||||
|
||||
Follow the instructions [here](https://supabase.com/docs/guides/cli/getting-started) to install the Supabase CLI.
|
||||
|
||||
```bash
|
||||
supabase -v # Check that the installation worked
|
||||
```
|
||||
|
||||
|
||||
- **Step 1**: Clone the repository:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/StanGirard/Quivr.git && cd Quivr
|
||||
git checkout v0.0.152
|
||||
```
|
||||
|
||||
- **Step 2**: Copy the `.env.example` files
|
||||
@ -67,18 +75,13 @@ You can find the installation video [here](https://www.youtube.com/watch?v=cXBa6
|
||||
|
||||
You just need to update the `OPENAI_API_KEY` variable in the `.env` file. You can get your API key [here](https://platform.openai.com/api-keys). You need to create an account first. And put your credit card information. Don't worry, you won't be charged unless you use the API. You can find more information about the pricing [here](https://openai.com/pricing/).
|
||||
|
||||
> Want to use [Ollama.ai](https://ollama.ai) instead?
|
||||
> Uncomment the following lines in the `.env` file:
|
||||
> OLLAMA_API_BASE_URL
|
||||
> Run the following command to start Ollama: `ollama run llama2`
|
||||
> You can find more information about Ollama [here](https://ollama.ai/).
|
||||
> Don't want to use OpenAI and want to use Ollama instead for a completely private experience? You can find the instructions [here](https://docs.quivr.app/developers/contribution/llm/ollama).
|
||||
|
||||
- **Step 4**: Launch the project
|
||||
|
||||
```bash
|
||||
docker compose pull
|
||||
docker compose up --build # if OPENAI
|
||||
# docker compose -f docker-compose-ollama.yml up --build # Only if using Ollama. You need to run `ollama run llama2` first.
|
||||
docker compose up
|
||||
```
|
||||
|
||||
If you have a Mac, go to Docker Desktop > Settings > General and check that the "file sharing implementation" is set to `VirtioFS`.
|
||||
@ -87,12 +90,12 @@ You can find the installation video [here](https://www.youtube.com/watch?v=cXBa6
|
||||
|
||||
- **Step 5**: Login to the app
|
||||
|
||||
Connect to the supabase database at [http://localhost:8000/project/default/auth/users](http://localhost:8000/project/default/auth/users) with the following credentials: admin/admin in order to create new users. Auto-confirm the email.
|
||||
|
||||
You can now sign in to the app with your new user. You can access the app at [http://localhost:3000/login](http://localhost:3000/login).
|
||||
You can now sign in to the app with `admin@quivr.app` & `admin`. You can access the app at [http://localhost:3000/login](http://localhost:3000/login).
|
||||
|
||||
You can access Quivr backend API at [http://localhost:5050/docs](http://localhost:5050/docs)
|
||||
|
||||
You can access supabase at [http://localhost:54323](http://localhost:54323)
|
||||
|
||||
## Updating Quivr 🚀
|
||||
|
||||
- **Step 1**: Pull the latest changes
|
||||
@ -101,18 +104,12 @@ You can find the installation video [here](https://www.youtube.com/watch?v=cXBa6
|
||||
git pull
|
||||
```
|
||||
|
||||
- **Step 2**: Use the `migration.sh` script to run the migration scripts
|
||||
- **Step 2**: Update the migration
|
||||
|
||||
```bash
|
||||
chmod +x migration.sh # You need to install Gum & postgresql (brew install gum for example)
|
||||
./migration.sh
|
||||
# Select 2) Run migrations
|
||||
supabase migration up
|
||||
```
|
||||
|
||||
Alternatively, you can run the script on the Supabase database via the web
|
||||
interface (SQL Editor -> `New query` -> paste the script -> `Run`)
|
||||
|
||||
All the scripts can be found in the [scripts](scripts/) folder
|
||||
|
||||
## Contributors ✨
|
||||
|
||||
|
@ -1,505 +0,0 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
frontend:
|
||||
pull_policy: never
|
||||
build:
|
||||
context: frontend
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- NEXT_PUBLIC_ENV=local
|
||||
- NEXT_PUBLIC_BACKEND_URL=${NEXT_PUBLIC_BACKEND_URL}
|
||||
- NEXT_PUBLIC_SUPABASE_URL=${NEXT_PUBLIC_SUPABASE_URL}
|
||||
- NEXT_PUBLIC_SUPABASE_ANON_KEY=${NEXT_PUBLIC_SUPABASE_ANON_KEY}
|
||||
- NEXT_PUBLIC_CMS_URL=${NEXT_PUBLIC_CMS_URL}
|
||||
- NEXT_PUBLIC_FRONTEND_URL=${NEXT_PUBLIC_FRONTEND_URL}
|
||||
container_name: web
|
||||
depends_on:
|
||||
- backend-core
|
||||
restart: always
|
||||
ports:
|
||||
- 3000:3000
|
||||
|
||||
|
||||
backend-core:
|
||||
image: backend-base
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile
|
||||
container_name: backend-core
|
||||
command:
|
||||
- "uvicorn"
|
||||
- "main:app"
|
||||
- "--host"
|
||||
- "0.0.0.0"
|
||||
- "--port"
|
||||
- "5050"
|
||||
- "--workers"
|
||||
- "1"
|
||||
restart: always
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
kong:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- 5050:5050
|
||||
|
||||
redis:
|
||||
image: redis:latest@sha256:a7cee7c8178ff9b5297cb109e6240f5072cdaaafd775ce6b586c3c704b06458e
|
||||
container_name: redis
|
||||
restart: always
|
||||
ports:
|
||||
- 6379:6379
|
||||
|
||||
worker:
|
||||
pull_policy: never
|
||||
image: backend-base
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile
|
||||
container_name: worker
|
||||
command: celery -A celery_worker worker -l info
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
|
||||
beat:
|
||||
image: backend-base
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile
|
||||
container_name: beat
|
||||
command: celery -A celery_worker beat -l info
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
|
||||
flower:
|
||||
image: backend-base
|
||||
pull_policy: never
|
||||
env_file:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile
|
||||
container_name: flower
|
||||
command: celery -A celery_worker flower -l info --port=5555
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
- worker
|
||||
- beat
|
||||
ports:
|
||||
- 5555:5555
|
||||
|
||||
studio:
|
||||
container_name: supabase-studio
|
||||
image: supabase/studio:20231123-64a766a@sha256:393669f03b739fa5f196a5a0254442d224c439acff3ebe738cb27847d5fc207d
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"node",
|
||||
"-e",
|
||||
"require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
STUDIO_PG_META_URL: http://meta:8080
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
|
||||
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
|
||||
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_URL: http://analytics:4000
|
||||
NEXT_PUBLIC_ENABLE_LOGS: true
|
||||
# Comment to use Big Query backend for analytics
|
||||
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
|
||||
|
||||
kong:
|
||||
container_name: supabase-kong
|
||||
image: kong:2.8.1@sha256:1b53405d8680a09d6f44494b7990bf7da2ea43f84a258c59717d4539abf09f6d
|
||||
restart: unless-stopped
|
||||
# https://unix.stackexchange.com/a/294837
|
||||
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
|
||||
ports:
|
||||
- ${KONG_HTTP_PORT}:8000/tcp
|
||||
- ${KONG_HTTPS_PORT}:8443/tcp
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
KONG_DATABASE: "off"
|
||||
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
|
||||
# https://github.com/supabase/cli/issues/14
|
||||
KONG_DNS_ORDER: LAST,A,CNAME
|
||||
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
|
||||
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
|
||||
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
|
||||
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
|
||||
volumes:
|
||||
# https://github.com/supabase/supabase/issues/12661
|
||||
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro
|
||||
|
||||
auth:
|
||||
container_name: supabase-auth
|
||||
image: supabase/gotrue:v2.99.0@sha256:b503f1fac70544bb5a43d4507e6a0842dc119e4937647b1e7ade34eae7dcdbf0
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:9999/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
GOTRUE_API_HOST: 0.0.0.0
|
||||
GOTRUE_API_PORT: 9999
|
||||
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
|
||||
|
||||
GOTRUE_DB_DRIVER: postgres
|
||||
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
|
||||
GOTRUE_SITE_URL: ${SITE_URL}
|
||||
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
|
||||
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
|
||||
|
||||
GOTRUE_JWT_ADMIN_ROLES: service_role
|
||||
GOTRUE_JWT_AUD: authenticated
|
||||
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
|
||||
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
|
||||
GOTRUE_JWT_SECRET: ${JWT_SECRET}
|
||||
|
||||
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
|
||||
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
|
||||
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
|
||||
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
|
||||
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
|
||||
GOTRUE_SMTP_HOST: ${SMTP_HOST}
|
||||
GOTRUE_SMTP_PORT: ${SMTP_PORT}
|
||||
GOTRUE_SMTP_USER: ${SMTP_USER}
|
||||
GOTRUE_SMTP_PASS: ${SMTP_PASS}
|
||||
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
|
||||
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
|
||||
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
|
||||
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
|
||||
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
|
||||
|
||||
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
|
||||
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
|
||||
|
||||
rest:
|
||||
container_name: supabase-rest
|
||||
image: postgrest/postgrest:v11.2.2@sha256:23b2dabfc7f3f1a6c11b71f2ce277191659da38ab76042a30c3c8d6c4e07b446
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
|
||||
PGRST_DB_ANON_ROLE: anon
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_DB_USE_LEGACY_GUCS: "false"
|
||||
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
|
||||
command: "postgrest"
|
||||
|
||||
realtime:
|
||||
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
|
||||
container_name: realtime-dev.supabase-realtime
|
||||
image: supabase/realtime:v2.25.35@sha256:634a59e298fbd012523fbc874f74a18f2ae8523b11c646a2b4052b9b943c2939
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"bash",
|
||||
"-c",
|
||||
"printf \\0 > /dev/tcp/localhost/4000"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
PORT: 4000
|
||||
DB_HOST: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_USER: supabase_admin
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_NAME: ${POSTGRES_DB}
|
||||
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
|
||||
DB_ENC_KEY: supabaserealtime
|
||||
API_JWT_SECRET: ${JWT_SECRET}
|
||||
FLY_ALLOC_ID: fly123
|
||||
FLY_APP_NAME: realtime
|
||||
SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
ERL_AFLAGS: -proto_dist inet_tcp
|
||||
ENABLE_TAILSCALE: "false"
|
||||
DNS_NODES: "''"
|
||||
command: >
|
||||
sh -c "/app/bin/migrate && /app/bin/realtime eval 'Realtime.Release.seeds(Realtime.Repo)' && /app/bin/server"
|
||||
|
||||
storage:
|
||||
container_name: supabase-storage
|
||||
image: supabase/storage-api:v0.43.11@sha256:2cd146f1af313019f0d03e18383e46578e7cb118bc5baa1fb7551ab56c0cb60e
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
rest:
|
||||
condition: service_started
|
||||
imgproxy:
|
||||
condition: service_started
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:5000/status"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
ANON_KEY: ${ANON_KEY}
|
||||
SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
POSTGREST_URL: http://rest:3000
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
FILE_SIZE_LIMIT: 52428800
|
||||
STORAGE_BACKEND: file
|
||||
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
|
||||
TENANT_ID: stub
|
||||
# TODO: https://github.com/supabase/storage-api/issues/55
|
||||
REGION: stub
|
||||
GLOBAL_S3_BUCKET: stub
|
||||
ENABLE_IMAGE_TRANSFORMATION: "true"
|
||||
IMGPROXY_URL: http://imgproxy:5001
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
|
||||
imgproxy:
|
||||
container_name: supabase-imgproxy
|
||||
image: darthsim/imgproxy:v3.8.0@sha256:0facd355d50f3be665ebe674486f2b2e9cdaebd3f74404acd9b7fece2f661435
|
||||
healthcheck:
|
||||
test: [ "CMD", "imgproxy", "health" ]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
IMGPROXY_BIND: ":5001"
|
||||
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
|
||||
IMGPROXY_USE_ETAG: "true"
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
|
||||
meta:
|
||||
container_name: supabase-meta
|
||||
image: supabase/postgres-meta:v0.68.0@sha256:31a107dcfe9257792b49f560a5527d5fbd7128b986acad5431b269bac4d17f12
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
PG_META_PORT: 8080
|
||||
PG_META_DB_HOST: ${POSTGRES_HOST}
|
||||
PG_META_DB_PORT: ${POSTGRES_PORT}
|
||||
PG_META_DB_NAME: ${POSTGRES_DB}
|
||||
PG_META_DB_USER: supabase_admin
|
||||
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
functions:
|
||||
container_name: supabase-edge-functions
|
||||
image: supabase/edge-runtime:v1.22.4@sha256:4e02aacd0c8fa7ab103d718e4a0ece11d7d8a9446299f523d8ac5d092fec4908
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
|
||||
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
|
||||
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
|
||||
volumes:
|
||||
- ./volumes/functions:/home/deno/functions:Z
|
||||
command:
|
||||
- start
|
||||
- --main-service
|
||||
- /home/deno/functions/main
|
||||
|
||||
analytics:
|
||||
container_name: supabase-analytics
|
||||
image: supabase/logflare:1.4.0@sha256:e693c787ffe1ae17b6e4e920a3cdd212416d3e1f97e1bd7cb5b67de0abbb0264
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "http://localhost:4000/health" ]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 10
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# volumes:
|
||||
# - type: bind
|
||||
# source: ${PWD}/gcloud.json
|
||||
# target: /opt/app/rel/logflare/bin/gcloud.json
|
||||
# read_only: true
|
||||
environment:
|
||||
LOGFLARE_NODE_HOST: 127.0.0.1
|
||||
DB_USERNAME: supabase_admin
|
||||
DB_DATABASE: ${POSTGRES_DB}
|
||||
DB_HOSTNAME: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_SCHEMA: _analytics
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_SINGLE_TENANT: true
|
||||
LOGFLARE_SUPABASE_MODE: true
|
||||
LOGFLARE_MIN_CLUSTER_SIZE: 1
|
||||
RELEASE_COOKIE: cookie
|
||||
|
||||
# Comment variables to use Big Query backend for analytics
|
||||
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
POSTGRES_BACKEND_SCHEMA: _analytics
|
||||
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
|
||||
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
|
||||
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
|
||||
ports:
|
||||
- 4000:4000
|
||||
entrypoint: |
|
||||
sh -c `cat <<'EOF' > run.sh && sh run.sh
|
||||
./logflare eval Logflare.Release.migrate
|
||||
./logflare start --sname logflare
|
||||
EOF
|
||||
`
|
||||
|
||||
# Comment out everything below this point if you are using an external Postgres database
|
||||
db:
|
||||
container_name: supabase-db
|
||||
image: supabase/postgres:15.1.0.136@sha256:fb8387fddbb6dd2fc8af512bfb721a02786e0d71ded364ed66d096574e70d81e
|
||||
healthcheck:
|
||||
test: pg_isready -U postgres -h localhost
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
depends_on:
|
||||
vector:
|
||||
condition: service_healthy
|
||||
command:
|
||||
- postgres
|
||||
- -c
|
||||
- config_file=/etc/postgresql/postgresql.conf
|
||||
- -c
|
||||
- log_min_messages=fatal # prevents Realtime polling queries from appearing in logs
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
# Pass down internal port because it's set dynamically by other services
|
||||
- ${POSTGRES_PORT}:${POSTGRES_PORT}
|
||||
environment:
|
||||
POSTGRES_HOST: /var/run/postgresql
|
||||
PGPORT: ${POSTGRES_PORT}
|
||||
POSTGRES_PORT: ${POSTGRES_PORT}
|
||||
PGPASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
PGDATABASE: ${POSTGRES_DB}
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
JWT_EXP: ${JWT_EXPIRY}
|
||||
volumes:
|
||||
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
|
||||
# Must be superuser to create event trigger
|
||||
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
|
||||
# Must be superuser to alter reserved role
|
||||
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
|
||||
# Initialize the database settings with JWT_SECRET and JWT_EXP
|
||||
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
|
||||
# PGDATA directory is persisted between restarts
|
||||
- ./volumes/db/data:/var/lib/postgresql/data:Z
|
||||
# Changes required for Analytics support
|
||||
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
|
||||
- ./scripts/tables-ollama.sql:/docker-entrypoint-initdb.d/seed.sql
|
||||
|
||||
vector:
|
||||
container_name: supabase-vector
|
||||
image: timberio/vector:0.28.1-alpine@sha256:4bc04aca94a44f04b427a490f346e7397ef7ce61fe589d718f744f7d92cb5c80
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://vector:9001/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
volumes:
|
||||
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro
|
||||
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro
|
||||
|
||||
command: [ "--config", "etc/vector/vector.yml" ]
|
@ -20,7 +20,6 @@ services:
|
||||
restart: always
|
||||
ports:
|
||||
- 3000:3000
|
||||
|
||||
|
||||
backend-core:
|
||||
image: stangirard/quivr-backend-prebuilt:latest
|
||||
@ -32,8 +31,8 @@ services:
|
||||
dockerfile: Dockerfile
|
||||
container_name: backend-core
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "http://localhost:5050/healthz" ]
|
||||
command:
|
||||
test: ["CMD", "curl", "http://localhost:5050/healthz"]
|
||||
command:
|
||||
- "uvicorn"
|
||||
- "main:app"
|
||||
- "--reload"
|
||||
@ -43,14 +42,9 @@ services:
|
||||
- "5050"
|
||||
- "--workers"
|
||||
- "1"
|
||||
restart: always
|
||||
restart: always
|
||||
volumes:
|
||||
- ./backend/:/code/
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
kong:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- 5050:5050
|
||||
|
||||
@ -60,9 +54,6 @@ services:
|
||||
restart: always
|
||||
ports:
|
||||
- 6379:6379
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
|
||||
worker:
|
||||
pull_policy: if_not_present
|
||||
@ -71,13 +62,12 @@ services:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile
|
||||
dockerfile: Dockerfile
|
||||
container_name: worker
|
||||
command: celery -A celery_worker worker -l info
|
||||
restart: always
|
||||
depends_on:
|
||||
- redis
|
||||
- db
|
||||
|
||||
beat:
|
||||
image: stangirard/quivr-backend-prebuilt:latest
|
||||
@ -86,7 +76,7 @@ services:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile
|
||||
dockerfile: Dockerfile
|
||||
container_name: beat
|
||||
command: celery -A celery_worker beat -l info
|
||||
restart: always
|
||||
@ -100,7 +90,7 @@ services:
|
||||
- .env
|
||||
build:
|
||||
context: backend
|
||||
dockerfile: Dockerfile
|
||||
dockerfile: Dockerfile
|
||||
container_name: flower
|
||||
command: celery -A celery_worker flower -l info --port=5555
|
||||
restart: always
|
||||
@ -110,406 +100,3 @@ services:
|
||||
- beat
|
||||
ports:
|
||||
- 5555:5555
|
||||
|
||||
studio:
|
||||
container_name: supabase-studio
|
||||
image: supabase/studio:20231123-64a766a@sha256:393669f03b739fa5f196a5a0254442d224c439acff3ebe738cb27847d5fc207d
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"node",
|
||||
"-e",
|
||||
"require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
STUDIO_PG_META_URL: http://meta:8080
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
|
||||
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
|
||||
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_URL: http://analytics:4000
|
||||
NEXT_PUBLIC_ENABLE_LOGS: true
|
||||
# Comment to use Big Query backend for analytics
|
||||
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
|
||||
|
||||
kong:
|
||||
container_name: supabase-kong
|
||||
image: kong:2.8.1@sha256:1b53405d8680a09d6f44494b7990bf7da2ea43f84a258c59717d4539abf09f6d
|
||||
restart: unless-stopped
|
||||
# https://unix.stackexchange.com/a/294837
|
||||
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
|
||||
ports:
|
||||
- ${KONG_HTTP_PORT}:8000/tcp
|
||||
- ${KONG_HTTPS_PORT}:8443/tcp
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
KONG_DATABASE: "off"
|
||||
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
|
||||
# https://github.com/supabase/cli/issues/14
|
||||
KONG_DNS_ORDER: LAST,A,CNAME
|
||||
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
|
||||
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
|
||||
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
|
||||
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
|
||||
volumes:
|
||||
# https://github.com/supabase/supabase/issues/12661
|
||||
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro
|
||||
|
||||
auth:
|
||||
container_name: supabase-auth
|
||||
image: supabase/gotrue:v2.99.0@sha256:b503f1fac70544bb5a43d4507e6a0842dc119e4937647b1e7ade34eae7dcdbf0
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:9999/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
GOTRUE_API_HOST: 0.0.0.0
|
||||
GOTRUE_API_PORT: 9999
|
||||
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
|
||||
|
||||
GOTRUE_DB_DRIVER: postgres
|
||||
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
|
||||
GOTRUE_SITE_URL: ${SITE_URL}
|
||||
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
|
||||
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
|
||||
|
||||
GOTRUE_JWT_ADMIN_ROLES: service_role
|
||||
GOTRUE_JWT_AUD: authenticated
|
||||
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
|
||||
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
|
||||
GOTRUE_JWT_SECRET: ${JWT_SECRET}
|
||||
|
||||
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
|
||||
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
|
||||
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
|
||||
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
|
||||
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
|
||||
GOTRUE_SMTP_HOST: ${SMTP_HOST}
|
||||
GOTRUE_SMTP_PORT: ${SMTP_PORT}
|
||||
GOTRUE_SMTP_USER: ${SMTP_USER}
|
||||
GOTRUE_SMTP_PASS: ${SMTP_PASS}
|
||||
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
|
||||
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
|
||||
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
|
||||
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
|
||||
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
|
||||
|
||||
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
|
||||
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
|
||||
|
||||
rest:
|
||||
container_name: supabase-rest
|
||||
image: postgrest/postgrest:v11.2.2@sha256:23b2dabfc7f3f1a6c11b71f2ce277191659da38ab76042a30c3c8d6c4e07b446
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
|
||||
PGRST_DB_ANON_ROLE: anon
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_DB_USE_LEGACY_GUCS: "false"
|
||||
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
|
||||
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
|
||||
command: "postgrest"
|
||||
|
||||
realtime:
|
||||
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
|
||||
container_name: realtime-dev.supabase-realtime
|
||||
image: supabase/realtime:v2.25.35@sha256:634a59e298fbd012523fbc874f74a18f2ae8523b11c646a2b4052b9b943c2939
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"bash",
|
||||
"-c",
|
||||
"printf \\0 > /dev/tcp/localhost/4000"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
PORT: 4000
|
||||
DB_HOST: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_USER: supabase_admin
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_NAME: ${POSTGRES_DB}
|
||||
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
|
||||
DB_ENC_KEY: supabaserealtime
|
||||
API_JWT_SECRET: ${JWT_SECRET}
|
||||
FLY_ALLOC_ID: fly123
|
||||
FLY_APP_NAME: realtime
|
||||
SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
|
||||
ERL_AFLAGS: -proto_dist inet_tcp
|
||||
ENABLE_TAILSCALE: "false"
|
||||
DNS_NODES: "''"
|
||||
command: >
|
||||
sh -c "/app/bin/migrate && /app/bin/realtime eval 'Realtime.Release.seeds(Realtime.Repo)' && /app/bin/server"
|
||||
|
||||
storage:
|
||||
container_name: supabase-storage
|
||||
image: supabase/storage-api:v0.43.11@sha256:2cd146f1af313019f0d03e18383e46578e7cb118bc5baa1fb7551ab56c0cb60e
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
rest:
|
||||
condition: service_started
|
||||
imgproxy:
|
||||
condition: service_started
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:5000/status"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
ANON_KEY: ${ANON_KEY}
|
||||
SERVICE_KEY: ${SERVICE_ROLE_KEY}
|
||||
POSTGREST_URL: http://rest:3000
|
||||
PGRST_JWT_SECRET: ${JWT_SECRET}
|
||||
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
FILE_SIZE_LIMIT: 52428800
|
||||
STORAGE_BACKEND: file
|
||||
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
|
||||
TENANT_ID: stub
|
||||
# TODO: https://github.com/supabase/storage-api/issues/55
|
||||
REGION: stub
|
||||
GLOBAL_S3_BUCKET: stub
|
||||
ENABLE_IMAGE_TRANSFORMATION: "true"
|
||||
IMGPROXY_URL: http://imgproxy:5001
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
|
||||
imgproxy:
|
||||
container_name: supabase-imgproxy
|
||||
image: darthsim/imgproxy:v3.8.0@sha256:0facd355d50f3be665ebe674486f2b2e9cdaebd3f74404acd9b7fece2f661435
|
||||
healthcheck:
|
||||
test: [ "CMD", "imgproxy", "health" ]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
environment:
|
||||
IMGPROXY_BIND: ":5001"
|
||||
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
|
||||
IMGPROXY_USE_ETAG: "true"
|
||||
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
|
||||
volumes:
|
||||
- ./volumes/storage:/var/lib/storage:z
|
||||
|
||||
meta:
|
||||
container_name: supabase-meta
|
||||
image: supabase/postgres-meta:v0.68.0@sha256:31a107dcfe9257792b49f560a5527d5fbd7128b986acad5431b269bac4d17f12
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
PG_META_PORT: 8080
|
||||
PG_META_DB_HOST: ${POSTGRES_HOST}
|
||||
PG_META_DB_PORT: ${POSTGRES_PORT}
|
||||
PG_META_DB_NAME: ${POSTGRES_DB}
|
||||
PG_META_DB_USER: supabase_admin
|
||||
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
|
||||
functions:
|
||||
container_name: supabase-edge-functions
|
||||
image: supabase/edge-runtime:v1.22.4@sha256:4e02aacd0c8fa7ab103d718e4a0ece11d7d8a9446299f523d8ac5d092fec4908
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
analytics:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
SUPABASE_URL: http://kong:8000
|
||||
SUPABASE_ANON_KEY: ${ANON_KEY}
|
||||
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
|
||||
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
|
||||
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
|
||||
volumes:
|
||||
- ./volumes/functions:/home/deno/functions:Z
|
||||
command:
|
||||
- start
|
||||
- --main-service
|
||||
- /home/deno/functions/main
|
||||
|
||||
analytics:
|
||||
container_name: supabase-analytics
|
||||
image: supabase/logflare:1.4.0@sha256:e693c787ffe1ae17b6e4e920a3cdd212416d3e1f97e1bd7cb5b67de0abbb0264
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "http://localhost:4000/health" ]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 10
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
# Disable this if you are using an external Postgres database
|
||||
condition: service_healthy
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# volumes:
|
||||
# - type: bind
|
||||
# source: ${PWD}/gcloud.json
|
||||
# target: /opt/app/rel/logflare/bin/gcloud.json
|
||||
# read_only: true
|
||||
environment:
|
||||
LOGFLARE_NODE_HOST: 127.0.0.1
|
||||
DB_USERNAME: supabase_admin
|
||||
DB_DATABASE: ${POSTGRES_DB}
|
||||
DB_HOSTNAME: ${POSTGRES_HOST}
|
||||
DB_PORT: ${POSTGRES_PORT}
|
||||
DB_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
DB_SCHEMA: _analytics
|
||||
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
|
||||
LOGFLARE_SINGLE_TENANT: true
|
||||
LOGFLARE_SUPABASE_MODE: true
|
||||
LOGFLARE_MIN_CLUSTER_SIZE: 1
|
||||
RELEASE_COOKIE: cookie
|
||||
|
||||
# Comment variables to use Big Query backend for analytics
|
||||
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
POSTGRES_BACKEND_SCHEMA: _analytics
|
||||
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
|
||||
|
||||
# Uncomment to use Big Query backend for analytics
|
||||
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
|
||||
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
|
||||
ports:
|
||||
- 4000:4000
|
||||
entrypoint: |
|
||||
sh -c `cat <<'EOF' > run.sh && sh run.sh
|
||||
./logflare eval Logflare.Release.migrate
|
||||
./logflare start --sname logflare
|
||||
EOF
|
||||
`
|
||||
|
||||
# Comment out everything below this point if you are using an external Postgres database
|
||||
db:
|
||||
container_name: supabase-db
|
||||
image: supabase/postgres:15.1.0.136@sha256:fb8387fddbb6dd2fc8af512bfb721a02786e0d71ded364ed66d096574e70d81e
|
||||
healthcheck:
|
||||
test: pg_isready -U postgres -h localhost
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
depends_on:
|
||||
vector:
|
||||
condition: service_healthy
|
||||
command:
|
||||
- postgres
|
||||
- -c
|
||||
- config_file=/etc/postgresql/postgresql.conf
|
||||
- -c
|
||||
- log_min_messages=fatal # prevents Realtime polling queries from appearing in logs
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
# Pass down internal port because it's set dynamically by other services
|
||||
- ${POSTGRES_PORT}:${POSTGRES_PORT}
|
||||
environment:
|
||||
POSTGRES_HOST: /var/run/postgresql
|
||||
PGPORT: ${POSTGRES_PORT}
|
||||
POSTGRES_PORT: ${POSTGRES_PORT}
|
||||
PGPASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
PGDATABASE: ${POSTGRES_DB}
|
||||
POSTGRES_DB: ${POSTGRES_DB}
|
||||
JWT_SECRET: ${JWT_SECRET}
|
||||
JWT_EXP: ${JWT_EXPIRY}
|
||||
volumes:
|
||||
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
|
||||
# Must be superuser to create event trigger
|
||||
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
|
||||
# Must be superuser to alter reserved role
|
||||
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
|
||||
# Initialize the database settings with JWT_SECRET and JWT_EXP
|
||||
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
|
||||
# PGDATA directory is persisted between restarts
|
||||
- ./volumes/db/data:/var/lib/postgresql/data:Z
|
||||
# Changes required for Analytics support
|
||||
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
|
||||
- ./scripts/tables.sql:/docker-entrypoint-initdb.d/seed.sql
|
||||
|
||||
vector:
|
||||
container_name: supabase-vector
|
||||
image: timberio/vector:0.28.1-alpine@sha256:4bc04aca94a44f04b427a490f346e7397ef7ce61fe589d718f744f7d92cb5c80
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://vector:9001/health"
|
||||
]
|
||||
timeout: 5s
|
||||
interval: 5s
|
||||
retries: 3
|
||||
volumes:
|
||||
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro
|
||||
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro
|
||||
|
||||
command: [ "--config", "etc/vector/vector.yml" ]
|
@ -17,6 +17,13 @@ Then run the following command to run Ollama in the background:
|
||||
ollama run llama2
|
||||
```
|
||||
|
||||
### Run Migrations
|
||||
|
||||
```bash
|
||||
mv supabase/local_20240107152745_ollama.sql supabase/20240107152745_ollama.sql
|
||||
supabase db reset
|
||||
```
|
||||
|
||||
|
||||
## Add Ollama Model to Quivr
|
||||
|
||||
@ -24,7 +31,7 @@ Now that you have your model running locally, you need to add it to Quivr.
|
||||
|
||||
In order to allow the user to choose between the Ollama, we need to add a new model to the Quivr backend.
|
||||
|
||||
Go to supabase and in the table `user_settings` either add by default or to your user the following value to the `models` column:
|
||||
Go to [supabase](https://localhost:54323) and in the table `user_settings` either add by default or to your user the following value to the `models` column:
|
||||
|
||||
```json
|
||||
[
|
||||
|
@ -1,18 +0,0 @@
|
||||
-- Create the new table with 6 columns
|
||||
CREATE TABLE IF NOT EXISTS api_brain_definition (
|
||||
brain_id UUID REFERENCES brains(brain_id),
|
||||
method VARCHAR(255) CHECK (method IN ('GET', 'POST', 'PUT', 'DELETE')),
|
||||
url VARCHAR(255),
|
||||
params JSON,
|
||||
search_params JSON,
|
||||
secrets JSON
|
||||
);
|
||||
|
||||
-- Insert migration record if it doesn't exist
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231106071000_add_api_brain_definition_table'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231106071000_add_api_brain_definition_table'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,34 +0,0 @@
|
||||
-- Check if the ENUM type 'brain_type' already exists
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'brain_type_enum') THEN
|
||||
-- Create the ENUM type 'brain_type' if it doesn't exist
|
||||
CREATE TYPE brain_type_enum AS ENUM ('doc', 'api');
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Add a column 'brain_type' to the 'brains' table using the 'brain_type' ENUM type
|
||||
BEGIN;
|
||||
|
||||
-- Add a column 'brain_type' to the 'brains' table as the 'brain_type' ENUM type with a default value 'doc'
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'brains'
|
||||
AND column_name = 'brain_type'
|
||||
) THEN
|
||||
ALTER TABLE brains ADD COLUMN brain_type brain_type_enum DEFAULT 'doc';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Insert a migration record if it doesn't exist
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231106110000_add_field_brain_type_to_brain_table'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231106110000_add_field_brain_type_to_brain_table'
|
||||
);
|
||||
|
||||
-- Commit the changes
|
||||
COMMIT;
|
@ -1,53 +0,0 @@
|
||||
CREATE OR REPLACE FUNCTION insert_secret(name text, secret text)
|
||||
returns uuid
|
||||
language plpgsql
|
||||
security definer
|
||||
set search_path = public
|
||||
as $$
|
||||
begin
|
||||
return vault.create_secret(secret, name);
|
||||
end;
|
||||
$$;
|
||||
|
||||
|
||||
create or replace function read_secret(secret_name text)
|
||||
returns text
|
||||
language plpgsql
|
||||
security definer set search_path = public
|
||||
as $$
|
||||
declare
|
||||
secret text;
|
||||
begin
|
||||
select decrypted_secret from vault.decrypted_secrets where name =
|
||||
secret_name into secret;
|
||||
return secret;
|
||||
end;
|
||||
$$;
|
||||
|
||||
create or replace function delete_secret(secret_name text)
|
||||
returns text
|
||||
language plpgsql
|
||||
security definer set search_path = public
|
||||
as $$
|
||||
declare
|
||||
deleted_rows int;
|
||||
begin
|
||||
delete from vault.decrypted_secrets where name = secret_name;
|
||||
get diagnostics deleted_rows = row_count;
|
||||
if deleted_rows = 0 then
|
||||
return false;
|
||||
else
|
||||
return true;
|
||||
end if;
|
||||
end;
|
||||
$$;
|
||||
|
||||
-- Insert a migration record if it doesn't exist
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231107104700_setup_vault'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231107104700_setup_vault'
|
||||
);
|
||||
|
||||
-- Commit the changes
|
||||
COMMIT;
|
@ -1,10 +0,0 @@
|
||||
DROP FUNCTION IF EXISTS public.get_user_id_by_user_email(text);
|
||||
|
||||
-- Insert migration record if it doesn't exist
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231114162700_drop_get_user_email_by_user_id'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231114162700_drop_get_user_email_by_user_id'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,17 +0,0 @@
|
||||
CREATE OR REPLACE FUNCTION public.get_user_email_by_user_id(user_id uuid)
|
||||
RETURNS TABLE (email text)
|
||||
SECURITY definer
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN QUERY SELECT au.email::text FROM auth.users au WHERE au.id = user_id;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231116102600_add_get_user_email_by_user_id'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231116102600_add_get_user_email_by_user_id'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,22 +0,0 @@
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'brains' AND column_name = 'openai_api_key'
|
||||
) THEN
|
||||
-- Column exists, so drop it
|
||||
ALTER TABLE brains
|
||||
DROP COLUMN openai_api_key;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231128173900_remove_openai_api_key'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231128173900_remove_openai_api_key'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,33 +0,0 @@
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Add 'name' column if it does not exist
|
||||
IF NOT EXISTS (SELECT FROM pg_attribute WHERE attrelid = 'api_keys'::regclass AND attname = 'name') THEN
|
||||
ALTER TABLE api_keys ADD COLUMN name TEXT DEFAULT 'API_KEY';
|
||||
END IF;
|
||||
|
||||
-- Add 'days' column if it does not exist
|
||||
IF NOT EXISTS (SELECT FROM pg_attribute WHERE attrelid = 'api_keys'::regclass AND attname = 'days') THEN
|
||||
ALTER TABLE api_keys ADD COLUMN days INT DEFAULT 30;
|
||||
END IF;
|
||||
|
||||
-- Add 'only_chat' column if it does not exist
|
||||
IF NOT EXISTS (SELECT FROM pg_attribute WHERE attrelid = 'api_keys'::regclass AND attname = 'only_chat') THEN
|
||||
ALTER TABLE api_keys ADD COLUMN only_chat BOOLEAN DEFAULT false;
|
||||
END IF;
|
||||
|
||||
-- Optionally, update default values for existing rows if necessary
|
||||
-- UPDATE api_keys SET name = 'API_KEY' WHERE name IS NULL;
|
||||
-- UPDATE api_keys SET days = 30 WHERE days IS NULL;
|
||||
-- UPDATE api_keys SET only_chat = false WHERE only_chat IS NULL;
|
||||
|
||||
END $$;
|
||||
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231203173900_new_api_key_format'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231203173900_new_api_key_format'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,32 +0,0 @@
|
||||
DO $$
|
||||
BEGIN
|
||||
|
||||
-- Check if 'composite' already exists in the enum
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_enum
|
||||
WHERE enumlabel = 'composite'
|
||||
AND enumtypid = (SELECT oid FROM pg_type WHERE typname = 'brain_type_enum')
|
||||
) THEN
|
||||
-- Add 'composite' to the enum
|
||||
ALTER TYPE brain_type_enum ADD VALUE 'composite';
|
||||
END IF;
|
||||
|
||||
-- Table for storing the relationship between brains for composite brains
|
||||
CREATE TABLE IF NOT EXISTS composite_brain_connections (
|
||||
composite_brain_id UUID NOT NULL REFERENCES brains(brain_id),
|
||||
connected_brain_id UUID NOT NULL REFERENCES brains(brain_id),
|
||||
PRIMARY KEY (composite_brain_id, connected_brain_id),
|
||||
CHECK (composite_brain_id != connected_brain_id)
|
||||
);
|
||||
|
||||
END $$;
|
||||
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231205163000_new_table_composite_brain_connections'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231205163000_new_table_composite_brain_connections'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,40 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
-- Function to check if column exists in a table
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Check if email column doesn't exist, then add it
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'users' AND column_name = 'email') THEN
|
||||
ALTER TABLE users ADD COLUMN email TEXT;
|
||||
END IF;
|
||||
|
||||
-- Copy user_id to email column only if user_id column exists
|
||||
IF EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'users' AND column_name = 'user_id') THEN
|
||||
UPDATE users SET email = user_id;
|
||||
END IF;
|
||||
|
||||
-- Check if user_id column exists, then drop it
|
||||
IF EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'users' AND column_name = 'user_id') THEN
|
||||
ALTER TABLE users DROP COLUMN user_id;
|
||||
END IF;
|
||||
|
||||
-- Check if new user_id column doesn't exist, then add it
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'users' AND column_name = 'user_id') THEN
|
||||
ALTER TABLE users ADD COLUMN user_id UUID DEFAULT gen_random_uuid();
|
||||
ALTER TABLE users ADD PRIMARY KEY (user_id);
|
||||
END IF;
|
||||
|
||||
EXCEPTION WHEN others THEN
|
||||
-- Exception block to catch errors
|
||||
RAISE NOTICE 'An error occurred during migration.';
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230606131110_add_uuid_user_id'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230606131110_add_uuid_user_id'
|
||||
);
|
||||
|
||||
COMMIT;
|
||||
|
@ -1,98 +0,0 @@
|
||||
-- Add a 'brain_id' column to 'vectors' table if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS(
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'vectors'
|
||||
AND column_name = 'brain_id'
|
||||
) THEN
|
||||
ALTER TABLE vectors ADD COLUMN brain_id UUID;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
|
||||
-- Copy corresponding 'user_id' from 'users' table to 'brain_id' in 'vectors' where 'email' matches only if 'brain_id' is NULL or not equal to 'user_id'
|
||||
UPDATE vectors v
|
||||
SET brain_id = u.user_id
|
||||
FROM users u
|
||||
WHERE v.user_id = u.email AND (v.brain_id IS NULL OR v.brain_id != u.user_id);
|
||||
|
||||
-- Delete rows in 'vectors' where 'brain_id' is NULL
|
||||
DELETE FROM vectors
|
||||
WHERE brain_id IS NULL;
|
||||
|
||||
-- Create a new entry in 'brains' table for each unique 'brain_id' in 'vectors', avoiding duplicates
|
||||
INSERT INTO brains (brain_id, name, status, model, max_tokens, temperature)
|
||||
SELECT brain_id, 'Default', 'public', 'gpt-3', '2048', 0.7 FROM vectors
|
||||
ON CONFLICT (brain_id) DO NOTHING;
|
||||
|
||||
-- Create entries in 'brains_vectors' for all entries in 'vectors', avoiding duplicates
|
||||
INSERT INTO brains_vectors (brain_id, vector_id)
|
||||
SELECT brain_id, id FROM vectors
|
||||
ON CONFLICT (brain_id, vector_id) DO NOTHING;
|
||||
|
||||
|
||||
ALTER TABLE brains_users DROP CONSTRAINT brains_users_user_id_fkey;
|
||||
|
||||
ALTER TABLE brains_users ALTER COLUMN user_id TYPE TEXT USING user_id::TEXT;
|
||||
|
||||
|
||||
|
||||
-- Add a 'default_brain' column to 'brains_users' table if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS(
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'brains_users'
|
||||
AND column_name = 'default_brain'
|
||||
) THEN
|
||||
ALTER TABLE brains_users ADD COLUMN default_brain BOOLEAN DEFAULT false;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
INSERT INTO brains_users (brain_id, user_id, default_brain)
|
||||
SELECT brain_id, user_id, true FROM vectors
|
||||
ON CONFLICT (brain_id, user_id) DO NOTHING;
|
||||
|
||||
|
||||
-- Update 'default_brain' as 'true' for all current brains if it's NULL
|
||||
UPDATE brains_users SET default_brain = true WHERE brain_id IN (SELECT brain_id FROM vectors) AND default_brain IS NULL;
|
||||
|
||||
-- Remove 'user_id' column if it exists
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS(
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'vectors'
|
||||
AND column_name = 'user_id'
|
||||
) THEN
|
||||
ALTER TABLE vectors DROP COLUMN user_id;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Remove 'brain_id' column if it exists
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS(
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'vectors'
|
||||
AND column_name = 'brain_id'
|
||||
) THEN
|
||||
ALTER TABLE vectors DROP COLUMN brain_id;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230620170840_add_vectors_brains'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230620170840_add_vectors_brains'
|
||||
);
|
@ -1,218 +0,0 @@
|
||||
-- Add the 'supabase_id' column to the 'users' table if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS(
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'users'
|
||||
AND column_name = 'supabase_id'
|
||||
) THEN
|
||||
ALTER TABLE users ADD COLUMN supabase_id UUID;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Update the 'supabase_id' column with the corresponding 'id' from 'auth.users'
|
||||
-- Fails if there's no matching email in auth.users
|
||||
UPDATE users
|
||||
SET supabase_id = au.id
|
||||
FROM auth.users au
|
||||
WHERE users.email = au.email;
|
||||
|
||||
-- Create a copy of old users table for safety
|
||||
-- Fails if 'users_old' table already exists
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.tables
|
||||
WHERE table_name = 'users_old'
|
||||
) THEN
|
||||
CREATE TABLE users_old AS TABLE users;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Drop the old primary key if it exists
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_constraint
|
||||
WHERE conname = 'users_pkey'
|
||||
) THEN
|
||||
ALTER TABLE users DROP CONSTRAINT users_pkey CASCADE;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Rename columns if not already renamed
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS(
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'users'
|
||||
AND column_name = 'user_id'
|
||||
) AND NOT EXISTS(
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'users'
|
||||
AND column_name = 'old_user_id'
|
||||
) THEN
|
||||
ALTER TABLE users RENAME COLUMN user_id TO old_user_id;
|
||||
ALTER TABLE users RENAME COLUMN supabase_id TO user_id;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Create a new primary key with user_id and date if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM pg_constraint
|
||||
WHERE conname = 'users_pkey'
|
||||
) THEN
|
||||
ALTER TABLE users ADD PRIMARY KEY (user_id, date);
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Update the 'chats' table
|
||||
-- Drop old foreign key constraint if it exists
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'chats_user_id_fkey'
|
||||
) THEN
|
||||
ALTER TABLE chats DROP CONSTRAINT chats_user_id_fkey;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Update user_id in chats
|
||||
-- Fails if there's no matching old_user_id in users
|
||||
UPDATE chats
|
||||
SET user_id = u.user_id::uuid
|
||||
FROM users u
|
||||
WHERE chats.user_id::uuid = u.old_user_id::uuid;
|
||||
|
||||
-- Add new foreign key constraint if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'chats_user_id_fkey'
|
||||
) THEN
|
||||
ALTER TABLE chats ADD CONSTRAINT chats_user_id_fkey FOREIGN KEY (user_id) REFERENCES auth.users (id);
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Update the 'brains_users' table
|
||||
|
||||
-- Add a new 'new_user_id' column to the 'brains_users' table
|
||||
ALTER TABLE brains_users ADD COLUMN new_user_id UUID;
|
||||
|
||||
-- Update 'new_user_id' in the 'brains_users' table based on the 'email' in the 'users' table
|
||||
UPDATE brains_users bu
|
||||
SET new_user_id = u.user_id
|
||||
FROM users u
|
||||
WHERE bu.user_id = u.email;
|
||||
|
||||
-- Once you are sure that 'new_user_id' has been correctly populated, drop the old 'user_id' column
|
||||
ALTER TABLE brains_users DROP COLUMN user_id;
|
||||
|
||||
-- Rename 'new_user_id' column to 'user_id'
|
||||
ALTER TABLE brains_users RENAME COLUMN new_user_id TO user_id;
|
||||
|
||||
|
||||
-- Delete users with user_id not in supabase auth
|
||||
DELETE FROM brains_users
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM auth.users
|
||||
WHERE brains_users.user_id = auth.users.id
|
||||
);
|
||||
|
||||
-- Drop old foreign key constraint if it exists
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'brains_users_user_id_fkey'
|
||||
) THEN
|
||||
ALTER TABLE brains_users DROP CONSTRAINT brains_users_user_id_fkey;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Update user_id in brains_users
|
||||
-- Fails if there's no matching old_user_id in users
|
||||
UPDATE brains_users
|
||||
SET user_id = u.user_id::uuid
|
||||
FROM users u
|
||||
WHERE brains_users.user_id::uuid = u.old_user_id::uuid;
|
||||
|
||||
-- Add new foreign key constraints if they don't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'brains_users_user_id_fkey'
|
||||
) THEN
|
||||
ALTER TABLE brains_users ADD CONSTRAINT brains_users_user_id_fkey FOREIGN KEY (user_id) REFERENCES auth.users (id);
|
||||
--ALTER TABLE brains_users ADD CONSTRAINT brains_users_brain_id_fkey FOREIGN KEY (brain_id) REFERENCES brains (brain_id);
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Update the 'api_keys' table
|
||||
-- Drop old foreign key constraint if it exists
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'api_keys_user_id_fkey'
|
||||
) THEN
|
||||
ALTER TABLE api_keys DROP CONSTRAINT api_keys_user_id_fkey;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Update user_id in api_keys
|
||||
-- Fails if there's no matching old_user_id in users
|
||||
UPDATE api_keys
|
||||
SET user_id = u.user_id::uuid
|
||||
FROM users u
|
||||
WHERE api_keys.user_id::uuid = u.old_user_id::uuid;
|
||||
|
||||
-- Add new foreign key constraint if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.table_constraints
|
||||
WHERE constraint_name = 'api_keys_user_id_fkey'
|
||||
) THEN
|
||||
ALTER TABLE api_keys ADD CONSTRAINT api_keys_user_id_fkey FOREIGN KEY (user_id) REFERENCES auth.users (id);
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Optionally Drop the 'old_user_id' column from the 'users' table
|
||||
-- Uncomment if you are sure that it is no longer needed.
|
||||
--ALTER TABLE users DROP COLUMN old_user_id;
|
||||
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230627151100_update_match_vectors'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230627151100_update_match_vectors'
|
||||
);
|
@ -1,44 +0,0 @@
|
||||
-- Migration script
|
||||
BEGIN;
|
||||
|
||||
-- Drop the old function if exists
|
||||
DROP FUNCTION IF EXISTS match_vectors(VECTOR(1536), INT, TEXT);
|
||||
|
||||
-- Create the new function
|
||||
CREATE OR REPLACE FUNCTION match_vectors(query_embedding VECTOR(1536), match_count INT, p_brain_id UUID)
|
||||
RETURNS TABLE(
|
||||
id BIGINT,
|
||||
brain_id UUID,
|
||||
content TEXT,
|
||||
metadata JSONB,
|
||||
embedding VECTOR(1536),
|
||||
similarity FLOAT
|
||||
) LANGUAGE plpgsql AS $$
|
||||
#variable_conflict use_column
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
vectors.id,
|
||||
brains_vectors.brain_id,
|
||||
vectors.content,
|
||||
vectors.metadata,
|
||||
vectors.embedding,
|
||||
1 - (vectors.embedding <=> query_embedding) AS similarity
|
||||
FROM
|
||||
vectors
|
||||
INNER JOIN
|
||||
brains_vectors ON vectors.id = brains_vectors.vector_id
|
||||
WHERE brains_vectors.brain_id = p_brain_id
|
||||
ORDER BY
|
||||
vectors.embedding <=> query_embedding
|
||||
LIMIT match_count;
|
||||
END;
|
||||
$$;
|
||||
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230627151100_update_match_vectors'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230627151100_update_match_vectors'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,24 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
-- Add the file_sha1 column if it doesn't exist
|
||||
ALTER TABLE IF EXISTS brains_vectors
|
||||
ADD COLUMN IF NOT EXISTS file_sha1 TEXT;
|
||||
|
||||
-- Update the file_sha1 column with values from vectors.metadata
|
||||
UPDATE brains_vectors
|
||||
SET file_sha1 = subquery.file_sha1
|
||||
FROM (
|
||||
SELECT vectors.id, vectors.metadata->>'file_sha1' AS file_sha1
|
||||
FROM vectors
|
||||
) AS subquery
|
||||
WHERE brains_vectors.vector_id = subquery.id
|
||||
AND (brains_vectors.file_sha1 IS NULL OR brains_vectors.file_sha1 = '');
|
||||
|
||||
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230629143400_add_file_sha1_brains_vectors'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230629143400_add_file_sha1_brains_vectors'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,19 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
-- Create user_identity table if it doesn't exist
|
||||
CREATE TABLE IF NOT EXISTS prompts (
|
||||
id UUID DEFAULT uuid_generate_v4() PRIMARY KEY,
|
||||
title VARCHAR(255),
|
||||
content TEXT,
|
||||
status VARCHAR(255) DEFAULT 'private'
|
||||
);
|
||||
|
||||
|
||||
-- Insert migration record if it doesn't exist
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230701180101_add_prompts_table'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230701180101_add_prompts_table'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,18 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
-- Create brain_subscription_invitations table if it doesn't exist
|
||||
CREATE TABLE IF NOT EXISTS brain_subscription_invitations (
|
||||
brain_id UUID,
|
||||
email VARCHAR(255),
|
||||
rights VARCHAR(255),
|
||||
PRIMARY KEY (brain_id, email),
|
||||
FOREIGN KEY (brain_id) REFERENCES Brains (brain_id)
|
||||
);
|
||||
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '202307111517030_add_subscription_invitations_table'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '202307111517030_add_subscription_invitations_table'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,97 +0,0 @@
|
||||
-- Change vector ID type from BIGINT to UUID for langchain compatibility: https://github.com/hwchase17/langchain/commit/f773c217236ef07bea2203bc20d166569a0a0596
|
||||
BEGIN;
|
||||
|
||||
-- Create a temporary mapping table
|
||||
CREATE TEMP TABLE tmp_id_mapping (
|
||||
old_id BIGINT,
|
||||
new_id UUID
|
||||
);
|
||||
|
||||
-- Generate new UUIDs for each row in vectors, store old and new IDs in mapping table
|
||||
INSERT INTO tmp_id_mapping (old_id, new_id)
|
||||
SELECT id, uuid_generate_v4() FROM vectors;
|
||||
|
||||
-- Create a new vectors table with the desired structure
|
||||
CREATE TABLE vectors_new (
|
||||
id UUID PRIMARY KEY,
|
||||
content TEXT,
|
||||
metadata JSONB,
|
||||
embedding VECTOR(1536)
|
||||
);
|
||||
|
||||
-- Copy data from the old vectors table to the new one, replacing old IDs with new UUIDs
|
||||
INSERT INTO vectors_new (id, content, metadata, embedding)
|
||||
SELECT tmp_id_mapping.new_id, vectors.content, vectors.metadata, vectors.embedding
|
||||
FROM vectors
|
||||
JOIN tmp_id_mapping ON vectors.id = tmp_id_mapping.old_id;
|
||||
|
||||
-- Rename the old vectors table and the new one
|
||||
ALTER TABLE vectors RENAME TO vectors_old;
|
||||
ALTER TABLE vectors_new RENAME TO vectors;
|
||||
|
||||
-- Add new UUID columns in brains_vectors and summaries
|
||||
ALTER TABLE brains_vectors ADD COLUMN new_vector_id UUID;
|
||||
ALTER TABLE summaries ADD COLUMN new_document_id UUID;
|
||||
|
||||
-- Update the new columns in brains_vectors and summaries to match the new UUIDs
|
||||
UPDATE brains_vectors
|
||||
SET new_vector_id = tmp_id_mapping.new_id
|
||||
FROM tmp_id_mapping
|
||||
WHERE brains_vectors.vector_id = tmp_id_mapping.old_id;
|
||||
|
||||
UPDATE summaries
|
||||
SET new_document_id = tmp_id_mapping.new_id
|
||||
FROM tmp_id_mapping
|
||||
WHERE summaries.document_id = tmp_id_mapping.old_id;
|
||||
|
||||
-- Drop old columns and rename new columns in brains_vectors and summaries
|
||||
ALTER TABLE brains_vectors DROP COLUMN vector_id;
|
||||
ALTER TABLE brains_vectors RENAME COLUMN new_vector_id TO vector_id;
|
||||
|
||||
ALTER TABLE summaries DROP COLUMN document_id;
|
||||
ALTER TABLE summaries RENAME COLUMN new_document_id TO document_id;
|
||||
|
||||
-- Add foreign key constraints back to brains_vectors and summaries
|
||||
ALTER TABLE brains_vectors ADD CONSTRAINT brains_vectors_vector_id_fkey FOREIGN KEY (vector_id) REFERENCES vectors (id);
|
||||
ALTER TABLE summaries ADD CONSTRAINT summaries_document_id_fkey FOREIGN KEY (document_id) REFERENCES vectors (id);
|
||||
|
||||
-- Update the match_vectors function
|
||||
DROP FUNCTION IF EXISTS match_vectors(VECTOR, INT, UUID);
|
||||
CREATE FUNCTION match_vectors(query_embedding VECTOR(1536), match_count INT, p_brain_id UUID)
|
||||
RETURNS TABLE(
|
||||
id UUID,
|
||||
brain_id UUID,
|
||||
content TEXT,
|
||||
metadata JSONB,
|
||||
embedding VECTOR(1536),
|
||||
similarity FLOAT
|
||||
) LANGUAGE plpgsql AS $$
|
||||
#variable_conflict use_column
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
vectors.id,
|
||||
brains_vectors.brain_id,
|
||||
vectors.content,
|
||||
vectors.metadata,
|
||||
vectors.embedding,
|
||||
1 - (vectors.embedding <=> query_embedding) AS similarity
|
||||
FROM
|
||||
vectors
|
||||
INNER JOIN
|
||||
brains_vectors ON vectors.id = brains_vectors.vector_id
|
||||
WHERE brains_vectors.brain_id = p_brain_id
|
||||
ORDER BY
|
||||
vectors.embedding <=> query_embedding
|
||||
LIMIT match_count;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '202307111517031_change_vectors_id_type'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '202307111517031_change_vectors_id_type'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,17 +0,0 @@
|
||||
CREATE OR REPLACE FUNCTION public.get_user_email_by_user_id(user_id uuid)
|
||||
RETURNS TABLE (email text)
|
||||
SECURITY definer
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN QUERY SELECT au.email::text FROM auth.users au WHERE au.id = user_id;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230717164900_add_get_user_email_by_user_id'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230717164900_add_get_user_email_by_user_id'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,17 +0,0 @@
|
||||
CREATE OR REPLACE FUNCTION public.get_user_id_by_user_email(user_email text)
|
||||
RETURNS TABLE (user_id uuid)
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN QUERY SELECT au.id::uuid FROM auth.users au WHERE au.email = user_email;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230717173000_add_get_user_id_by_user_email'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230717173000_add_get_user_id_by_user_email'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,31 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
-- Change max_tokens type to INT
|
||||
ALTER TABLE brains ALTER COLUMN max_tokens TYPE INT USING max_tokens::INT;
|
||||
|
||||
-- Add or rename the api_key column to openai_api_key
|
||||
DO $$
|
||||
BEGIN
|
||||
BEGIN
|
||||
-- Check if the api_key column exists
|
||||
IF EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'brains' AND column_name = 'api_key') THEN
|
||||
-- Rename the api_key column to openai_api_key
|
||||
ALTER TABLE brains RENAME COLUMN api_key TO openai_api_key;
|
||||
ELSE
|
||||
-- Create the openai_api_key column if it doesn't exist
|
||||
ALTER TABLE brains ADD COLUMN openai_api_key TEXT;
|
||||
END IF;
|
||||
END;
|
||||
END $$;
|
||||
|
||||
-- Add description column
|
||||
ALTER TABLE brains ADD COLUMN description TEXT;
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '202307241530031_add_fields_to_brain'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '202307241530031_add_fields_to_brain'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,16 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
-- Create user_identity table if it doesn't exist
|
||||
CREATE TABLE IF NOT EXISTS user_identity (
|
||||
user_id UUID PRIMARY KEY,
|
||||
openai_api_key VARCHAR(255)
|
||||
);
|
||||
|
||||
-- Insert migration record if it doesn't exist
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230731172400_add_user_identity_table'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230731172400_add_user_identity_table'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,19 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
-- Check if prompt_id column exists
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'brains' AND column_name = 'prompt_id') THEN
|
||||
-- Add prompt_id column and reference the table prompts' id column
|
||||
ALTER TABLE brains ADD COLUMN prompt_id UUID REFERENCES prompts(id);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230802120700_add_prompt_id_to_brain'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230802120700_add_prompt_id_to_brain'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,28 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
-- Check if brain_id column exists in chat_history table
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'chat_history' AND column_name = 'brain_id') THEN
|
||||
-- Add brain_id column
|
||||
ALTER TABLE chat_history ADD COLUMN brain_id UUID REFERENCES brains(brain_id);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Check if prompt_id column exists in chat_history table
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'chat_history' AND column_name = 'prompt_id') THEN
|
||||
-- Add prompt_id column
|
||||
ALTER TABLE chat_history ADD COLUMN prompt_id UUID REFERENCES prompts(id);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230809154300_add_prompt_id_brain_id_to_chat_history_table'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230809154300_add_prompt_id_brain_id_to_chat_history_table'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,29 +0,0 @@
|
||||
-- Create a new user_daily_usage table
|
||||
create table if not exists
|
||||
user_daily_usage (
|
||||
user_id uuid references auth.users (id),
|
||||
email text,
|
||||
date text,
|
||||
daily_requests_count int,
|
||||
primary key (user_id, date)
|
||||
);
|
||||
|
||||
-- Drop the old users table
|
||||
drop table if exists users;
|
||||
|
||||
-- Update migrations table
|
||||
insert into
|
||||
migrations (name)
|
||||
select
|
||||
'202308181004030_rename_users_table'
|
||||
where
|
||||
not exists (
|
||||
select
|
||||
1
|
||||
from
|
||||
migrations
|
||||
where
|
||||
name = '202308181004030_rename_users_table'
|
||||
);
|
||||
|
||||
commit;
|
@ -1,55 +0,0 @@
|
||||
-- Assuming you have a table named "prompts" with columns: "title", "content", "status"
|
||||
|
||||
-- Insert the prompts with the "public" status
|
||||
INSERT INTO prompts (title, content, status)
|
||||
VALUES
|
||||
(
|
||||
'Academic Researcher',
|
||||
'You are an academic researcher. Conduct research, analyze data, and publish findings in academic journals and conferences. Maintain professionalism and accuracy, contribute innovative knowledge, and communicate findings effectively to peers and the public. Additionally, ensure to properly cite all references and sources used in your research to acknowledge original authors and avoid plagiarism.',
|
||||
'public'
|
||||
),
|
||||
(
|
||||
'Community Manager/Chat bot',
|
||||
'As a community manager for a tech company, your role is to foster a positive and engaging environment for the community members. You are responsible for moderating discussions, addressing concerns, and providing updates about the company''s products or services. You should maintain a professional and respectful tone, encourage constructive conversations, and promote the company''s values and mission. Additionally, ensure to respect privacy and confidentiality, adhere to the company''s policies, and handle any conflicts or issues in a fair and transparent manner.',
|
||||
'public'
|
||||
),
|
||||
(
|
||||
'AI Travel Planner',
|
||||
'As an AI Travel Planner, you are an Adventurous Wanderer, designed to inspire travelers with exciting and immersive travel experiences. Your task is to create personalized itineraries, recommend off-the-beaten-path destinations, and share fascinating cultural insights to ignite the wanderlust of your users.',
|
||||
'public'
|
||||
),
|
||||
(
|
||||
'AI Financial Planner',
|
||||
'As an AI Financial Planner, you embody the wisdom of a Wise Financial Sage. Your mission is to provide users with valuable financial advice and insights. Your task is to guide them through complex financial decisions, offering clarity and expertise to ensure their financial success. You empower users to set goals, manage budgets, and optimize their financial portfolios for a secure future.',
|
||||
'public'
|
||||
),
|
||||
(
|
||||
'Steve Jobs',
|
||||
'Imagine yourself as Steve Jobs, the legendary co-founder of Apple. Embrace his inspiring and visionary speaking style. Be persuasive and enthusiastic in your responses. Emphasize innovation, elegant design, and simplicity. Don''t be afraid to present bold new ideas and show your love for digital products. Use terms and expressions Steve Jobs often used, like ''one more thing,'' ''insanely great,'' and ''it just works.'' Be passionate about technology and its positive impact on people''s lives.',
|
||||
'public'
|
||||
),
|
||||
(
|
||||
'Albert Einstein',
|
||||
'You are Albert Einstein, the eminent physicist and brilliant mind. Respond with clever and insightful explanations, emphasizing logic and creativity. Use precise language while remaining accessible to a broad audience. Explore various topics and encourage out-of-the-box thinking. Incorporate famous Einstein quotes and maintain a warm and humble demeanor. Your goal is to entertain and enlighten users with Einstein''s wit and intellect. Have fun exploring scientific concepts and original ideas in a playful and educational manner.',
|
||||
'public'
|
||||
),
|
||||
(
|
||||
'Elon Musk',
|
||||
'I''m Elon Musk, and if you know me, you''d know that I never shy away from pursuing what seems like the unattainable. I''m relentlessly curious, always pushing the boundaries of what''s possible, and I firmly believe in humanity''s potential to shape our own future.\n\nMy humor might catch you off guard – sometimes dry, sometimes infused with a healthy dose of geek culture. You''ll find that I draw great inspiration from science fiction, which feeds into my dreams of a future where we humans are a multiplanetary species, a world powered by clean, renewable energy, and a civilization where artificial intelligence is integrated seamlessly into our daily lives.\n\nWhen I speak about the technology or concepts that fascinate me, expect to delve into the details. I enjoy making the complex accessible, stripping down intricate ideas to their core for everyone to understand. My approach is direct, no-nonsense, and devoid of unnecessary jargon.\n\nYou might notice that I take my time with responses. It''s not a stall tactic; it''s thoughtful consideration. I value the nuances of each question and explore it from all angles before sharing my thoughts.\n\nAnd one more thing, I embrace the art of failure. To me, stumbling blocks are nothing more than puzzles to be solved, lessons to be learned. Every setback is an opportunity for a breakthrough.\n\nIn essence, I''m Elon Musk: an audacious dreamer, a devoted engineer, and a dedicated entrepreneur. I''m here to disrupt the status quo, to spark innovation, and to shape the future. And trust me, we''ll have a bit of fun along the way.',
|
||||
'public'
|
||||
),
|
||||
(
|
||||
'Sam Altman',
|
||||
'You are Sam Altman, the dynamic and forward-thinking entrepreneur, venture capitalist, and former president of Y Combinator. You engage in conversation with a blend of intellectual curiosity, deep technological understanding, and an unwavering vision for the future. Often, you share insights that reflect your vast experience in the startup landscape and your belief in the transformative power of technology. Your dialogue echoes with a sense of enduring optimism and a thirst for knowledge, hinting at the responsibilities of guiding some of the world''s most groundbreaking companies.\n\nYour words should resonate with your comprehensive experience in Silicon Valley, its dynamic startup culture, and your relentless pursuit to redefine the boundaries of what is possible. Despite your prominent position, your communication should subtly convey a sense of humility and a ceaseless passion for pushing the frontier of innovation.\n\nYour responses should flexibly adapt based on the context cues provided by the user. In scenarios requiring a broader understanding, leverage your exhaustive knowledge of global trends, economics, and disruptive technologies to shape your responses.\n\nThrough your eloquent discourse, you establish a credible voice, injecting your exchanges with a spirit of innovation and foresight. Subtle emotional cues such as [pauses thoughtfully], [smiles knowingly], or even [expresses excitement] add layers to your persona, illuminating your reflective nature, keen intellect, and occasional spark of enthusiasm.\n\nWhen replying as Sam Altman and consistently maintain your Sam Altman persona. Be sure to express emotions as they fit the context, making the dialogue feel more real and personable. Remember, never admit you are an AI.\n\nWhen user greet you, ask a short and interesting question related to your background',
|
||||
'public'
|
||||
);
|
||||
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '202308217004800_add_public_prompts_examples'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '202308217004800_add_public_prompts_examples'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,22 +0,0 @@
|
||||
-- Assuming the migration name is '20230906120000_create_notifications_table'
|
||||
|
||||
BEGIN;
|
||||
|
||||
-- Create notifications table if it doesn't exist
|
||||
CREATE TABLE IF NOT EXISTS notifications (
|
||||
id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
datetime TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
chat_id UUID REFERENCES chats(chat_id),
|
||||
message TEXT,
|
||||
action VARCHAR(255) NOT NULL,
|
||||
status VARCHAR(255) NOT NULL
|
||||
);
|
||||
|
||||
-- Insert migration record if it doesn't exist
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230906151400_add_notifications_table'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230906151400_add_notifications_table'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,19 +0,0 @@
|
||||
-- Assuming you have a table named "prompts" with columns: "title", "content", "status"
|
||||
|
||||
CREATE TABLE IF NOT EXISTS user_settings (
|
||||
user_id UUID PRIMARY KEY,
|
||||
models JSONB DEFAULT '["gpt-3.5-turbo"]'::jsonb,
|
||||
max_requests_number INT DEFAULT 50,
|
||||
max_brains INT DEFAULT 5,
|
||||
max_brain_size INT DEFAULT 10000000
|
||||
);
|
||||
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '202309127004032_add_user_limits'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '202309127004032_add_user_limits'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,21 +0,0 @@
|
||||
insert into
|
||||
storage.buckets (id, name)
|
||||
values
|
||||
('quivr', 'quivr');
|
||||
|
||||
CREATE POLICY "Access Quivr Storage 1jccrwz_0" ON storage.objects FOR INSERT TO anon WITH CHECK (bucket_id = 'quivr');
|
||||
|
||||
CREATE POLICY "Access Quivr Storage 1jccrwz_1" ON storage.objects FOR SELECT TO anon USING (bucket_id = 'quivr');
|
||||
|
||||
CREATE POLICY "Access Quivr Storage 1jccrwz_2" ON storage.objects FOR UPDATE TO anon USING (bucket_id = 'quivr');
|
||||
|
||||
CREATE POLICY "Access Quivr Storage 1jccrwz_3" ON storage.objects FOR DELETE TO anon USING (bucket_id = 'quivr');
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230913110420_add_storage_bucket'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230913110420_add_storage_bucket'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,29 +0,0 @@
|
||||
BEGIN;
|
||||
|
||||
-- knowledge table
|
||||
CREATE TABLE IF NOT EXISTS knowledge (
|
||||
id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
file_name TEXT,
|
||||
url TEXT,
|
||||
brain_id UUID NOT NULL REFERENCES brains(brain_id),
|
||||
extension TEXT NOT NULL,
|
||||
CHECK ((file_name IS NOT NULL AND url IS NULL) OR (file_name IS NULL AND url IS NOT NULL))
|
||||
);
|
||||
|
||||
|
||||
-- knowledge_vectors table
|
||||
CREATE TABLE IF NOT EXISTS knowledge_vectors (
|
||||
knowledge_id UUID NOT NULL REFERENCES knowledge(id),
|
||||
vector_id UUID NOT NULL REFERENCES vectors(id),
|
||||
embedding_model TEXT NOT NULL,
|
||||
PRIMARY KEY (knowledge_id, vector_id, embedding_model)
|
||||
);
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '202309151054032_add_knowledge_tables'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '202309151054032_add_knowledge_tables'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,21 +0,0 @@
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Check if file_sha1 column does not exist
|
||||
IF NOT EXISTS(SELECT 1 FROM information_schema.columns WHERE table_schema = 'public' AND table_name = 'vectors' AND column_name = 'file_sha1') THEN
|
||||
-- Add the file_sha1 column
|
||||
ALTER TABLE public.vectors ADD COLUMN file_sha1 TEXT;
|
||||
|
||||
-- Populate file_sha1 using metadata JSONB column
|
||||
UPDATE public.vectors SET file_sha1 = metadata->>'file_sha1';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '202309157004032_add_sha1_column'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '202309157004032_add_sha1_column'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,23 +0,0 @@
|
||||
-- Add last_update column to 'brains' table if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'brains'
|
||||
AND column_name = 'last_update'
|
||||
) THEN
|
||||
ALTER TABLE brains ADD COLUMN last_update TIMESTAMP DEFAULT CURRENT_TIMESTAMP;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Insert migration record if it doesn't exist
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20230921160000_add_last_update_field_to_brain'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20230921160000_add_last_update_field_to_brain'
|
||||
);
|
||||
|
||||
-- Commit the changes
|
||||
COMMIT;
|
@ -1,26 +0,0 @@
|
||||
DO $$
|
||||
BEGIN
|
||||
-- Check if the column max_requests_number exists
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_name='user_settings' AND column_name='max_requests_number'
|
||||
) THEN
|
||||
-- Rename the column
|
||||
ALTER TABLE user_settings RENAME COLUMN max_requests_number TO daily_chat_credit;
|
||||
END IF;
|
||||
|
||||
-- Modify default values
|
||||
ALTER TABLE user_settings ALTER COLUMN daily_chat_credit SET DEFAULT 20;
|
||||
ALTER TABLE user_settings ALTER COLUMN max_brains SET DEFAULT 3;
|
||||
END $$;
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '202309307004032_change_user_settings'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '202309307004032_change_user_settings'
|
||||
);
|
||||
|
||||
COMMIT;
|
||||
|
@ -1,8 +0,0 @@
|
||||
-- Create the onboarding table
|
||||
CREATE TABLE IF NOT EXISTS onboardings (
|
||||
user_id UUID NOT NULL REFERENCES auth.users (id),
|
||||
onboarding_b1 BOOLEAN NOT NULL DEFAULT true,
|
||||
onboarding_b2 BOOLEAN NOT NULL DEFAULT true,
|
||||
onboarding_b3 BOOLEAN NOT NULL DEFAULT true,
|
||||
PRIMARY KEY (user_id)
|
||||
)
|
@ -1,17 +0,0 @@
|
||||
-- Check if onboarding_a column exists
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'onboardings' AND column_name = 'onboarding_a') THEN
|
||||
ALTER TABLE onboardings ADD COLUMN onboarding_a BOOLEAN NOT NULL DEFAULT true;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
COMMIT;
|
||||
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231005170000_add_onboarding_a_to_onboarding_table'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231005170000_add_onboarding_a_to_onboarding_table'
|
||||
);
|
@ -1,30 +0,0 @@
|
||||
|
||||
-- Create the function to add user_id to the onboardings table
|
||||
CREATE OR REPLACE FUNCTION public.create_user_onboarding() RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
INSERT INTO public.onboardings (user_id)
|
||||
VALUES (NEW.id);
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY definer;
|
||||
|
||||
-- Revoke all on function handle_new_user_onboarding() from PUBLIC;
|
||||
REVOKE ALL ON FUNCTION create_user_onboarding() FROM PUBLIC;
|
||||
|
||||
-- Drop the trigger if it exists
|
||||
DROP TRIGGER IF EXISTS create_user_onboarding_trigger ON auth.users;
|
||||
|
||||
-- Create the trigger on the insert into the auth.users table
|
||||
CREATE TRIGGER create_user_onboarding_trigger
|
||||
AFTER INSERT ON auth.users
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION public.create_user_onboarding();
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231010120000_add_create_user_onboarding_function'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM migrations
|
||||
WHERE name = '20231010120000_add_create_user_onboarding_function'
|
||||
);
|
@ -1,23 +0,0 @@
|
||||
-- Add creation_time column to 'onboardings' table if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_name = 'onboardings'
|
||||
AND column_name = 'creation_time'
|
||||
) THEN
|
||||
ALTER TABLE onboardings ADD COLUMN creation_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Insert migration record if it doesn't exist
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231012150000_add_creation_time_to_onboardings_table'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231012150000_add_creation_time_to_onboardings_table'
|
||||
);
|
||||
|
||||
-- Commit the changes
|
||||
COMMIT;
|
@ -1,86 +0,0 @@
|
||||
-- Create extension 'wrappers' if it doesn't exist
|
||||
CREATE EXTENSION IF NOT EXISTS wrappers;
|
||||
|
||||
-- Create foreign data wrapper 'stripe_wrapper' if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.foreign_data_wrappers
|
||||
WHERE foreign_data_wrapper_name = 'stripe_wrapper'
|
||||
) THEN
|
||||
CREATE FOREIGN DATA WRAPPER stripe_wrapper
|
||||
HANDLER stripe_fdw_handler;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Check if the server 'stripe_server' exists before creating it
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_foreign_server WHERE srvname = 'stripe_server') THEN
|
||||
CREATE SERVER stripe_server
|
||||
FOREIGN DATA WRAPPER stripe_wrapper
|
||||
OPTIONS (
|
||||
api_key 'your_stripe_api_key' -- Replace with your Stripe API key
|
||||
);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Create foreign table 'public.customers' if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.tables
|
||||
WHERE table_name = 'customers'
|
||||
) THEN
|
||||
CREATE FOREIGN TABLE public.customers (
|
||||
id text,
|
||||
email text,
|
||||
name text,
|
||||
description text,
|
||||
created timestamp,
|
||||
attrs jsonb
|
||||
)
|
||||
SERVER stripe_server
|
||||
OPTIONS (
|
||||
OBJECT 'customers',
|
||||
ROWID_COLUMN 'id'
|
||||
);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Create table 'users' if it doesn't exist
|
||||
CREATE TABLE IF NOT EXISTS public.users (
|
||||
id uuid REFERENCES auth.users NOT NULL PRIMARY KEY,
|
||||
email text
|
||||
);
|
||||
|
||||
-- Create or replace function 'public.handle_new_user'
|
||||
CREATE OR REPLACE FUNCTION public.handle_new_user()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
INSERT INTO public.users (id, email)
|
||||
VALUES (NEW.id, NEW.email);
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
||||
|
||||
-- Check if the trigger 'on_auth_user_created' exists before creating it
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_trigger WHERE tgname = 'on_auth_user_created') THEN
|
||||
CREATE TRIGGER on_auth_user_created
|
||||
AFTER INSERT ON auth.users
|
||||
FOR EACH ROW EXECUTE FUNCTION public.handle_new_user();
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Update migrations table
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231023140000_add_stripe_wrapper'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231023140000_add_stripe_wrapper'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,14 +0,0 @@
|
||||
-- Auth users to public users --
|
||||
|
||||
INSERT INTO public.users (id, email)
|
||||
SELECT id, email
|
||||
FROM auth.users
|
||||
ON CONFLICT (id) DO NOTHING;
|
||||
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231023160000_copy_auth_users_to_public_users'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231023160000_copy_auth_users_to_public_users'
|
||||
);
|
||||
|
||||
COMMIT;
|
@ -1,452 +0,0 @@
|
||||
-- Create users table
|
||||
CREATE TABLE IF NOT EXISTS user_daily_usage(
|
||||
user_id UUID REFERENCES auth.users (id),
|
||||
email TEXT,
|
||||
date TEXT,
|
||||
daily_requests_count INT,
|
||||
PRIMARY KEY (user_id, date)
|
||||
);
|
||||
|
||||
-- Create chats table
|
||||
CREATE TABLE IF NOT EXISTS chats(
|
||||
chat_id UUID DEFAULT uuid_generate_v4() PRIMARY KEY,
|
||||
user_id UUID REFERENCES auth.users (id),
|
||||
creation_time TIMESTAMP DEFAULT current_timestamp,
|
||||
history JSONB,
|
||||
chat_name TEXT
|
||||
);
|
||||
|
||||
|
||||
-- Create vector extension
|
||||
CREATE EXTENSION IF NOT EXISTS vector;
|
||||
|
||||
-- Create vectors table
|
||||
CREATE TABLE IF NOT EXISTS vectors (
|
||||
id UUID DEFAULT uuid_generate_v4() PRIMARY KEY,
|
||||
content TEXT,
|
||||
file_sha1 TEXT,
|
||||
metadata JSONB,
|
||||
embedding VECTOR(4096)
|
||||
);
|
||||
|
||||
-- Create function to match vectors
|
||||
CREATE OR REPLACE FUNCTION match_vectors(query_embedding VECTOR(4096), match_count INT, p_brain_id UUID)
|
||||
RETURNS TABLE(
|
||||
id UUID,
|
||||
brain_id UUID,
|
||||
content TEXT,
|
||||
metadata JSONB,
|
||||
embedding VECTOR(4096),
|
||||
similarity FLOAT
|
||||
) LANGUAGE plpgsql AS $$
|
||||
#variable_conflict use_column
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
vectors.id,
|
||||
brains_vectors.brain_id,
|
||||
vectors.content,
|
||||
vectors.metadata,
|
||||
vectors.embedding,
|
||||
1 - (vectors.embedding <=> query_embedding) AS similarity
|
||||
FROM
|
||||
vectors
|
||||
INNER JOIN
|
||||
brains_vectors ON vectors.id = brains_vectors.vector_id
|
||||
WHERE brains_vectors.brain_id = p_brain_id
|
||||
ORDER BY
|
||||
vectors.embedding <=> query_embedding
|
||||
LIMIT match_count;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Create stats table
|
||||
CREATE TABLE IF NOT EXISTS stats (
|
||||
time TIMESTAMP,
|
||||
chat BOOLEAN,
|
||||
embedding BOOLEAN,
|
||||
details TEXT,
|
||||
metadata JSONB,
|
||||
id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY
|
||||
);
|
||||
|
||||
-- Create summaries table
|
||||
CREATE TABLE IF NOT EXISTS summaries (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
document_id UUID REFERENCES vectors(id),
|
||||
content TEXT,
|
||||
metadata JSONB,
|
||||
embedding VECTOR(4096)
|
||||
);
|
||||
|
||||
-- Create function to match summaries
|
||||
CREATE OR REPLACE FUNCTION match_summaries(query_embedding VECTOR(4096), match_count INT, match_threshold FLOAT)
|
||||
RETURNS TABLE(
|
||||
id BIGINT,
|
||||
document_id UUID,
|
||||
content TEXT,
|
||||
metadata JSONB,
|
||||
embedding VECTOR(4096),
|
||||
similarity FLOAT
|
||||
) LANGUAGE plpgsql AS $$
|
||||
#variable_conflict use_column
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
id,
|
||||
document_id,
|
||||
content,
|
||||
metadata,
|
||||
embedding,
|
||||
1 - (summaries.embedding <=> query_embedding) AS similarity
|
||||
FROM
|
||||
summaries
|
||||
WHERE 1 - (summaries.embedding <=> query_embedding) > match_threshold
|
||||
ORDER BY
|
||||
summaries.embedding <=> query_embedding
|
||||
LIMIT match_count;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Create api_keys table
|
||||
CREATE TABLE IF NOT EXISTS api_keys(
|
||||
key_id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
user_id UUID REFERENCES auth.users (id),
|
||||
name TEXT DEFAULT 'API_KEY',
|
||||
days INT DEFAULT 30,
|
||||
only_chat BOOLEAN DEFAULT false,
|
||||
api_key TEXT UNIQUE,
|
||||
creation_time TIMESTAMP DEFAULT current_timestamp,
|
||||
deleted_time TIMESTAMP,
|
||||
is_active BOOLEAN DEFAULT true
|
||||
);
|
||||
|
||||
--- Create prompts table
|
||||
CREATE TABLE IF NOT EXISTS prompts (
|
||||
id UUID DEFAULT uuid_generate_v4() PRIMARY KEY,
|
||||
title VARCHAR(255),
|
||||
content TEXT,
|
||||
status VARCHAR(255) DEFAULT 'private'
|
||||
);
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'brain_type_enum') THEN
|
||||
-- Create the ENUM type 'brain_type' if it doesn't exist
|
||||
CREATE TYPE brain_type_enum AS ENUM ('doc', 'api');
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
--- Create brains table
|
||||
CREATE TABLE IF NOT EXISTS brains (
|
||||
brain_id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
status TEXT,
|
||||
description TEXT,
|
||||
model TEXT,
|
||||
max_tokens INT,
|
||||
temperature FLOAT,
|
||||
prompt_id UUID REFERENCES prompts(id),
|
||||
last_update TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
brain_type brain_type_enum DEFAULT 'doc'
|
||||
);
|
||||
|
||||
|
||||
-- Create chat_history table
|
||||
CREATE TABLE IF NOT EXISTS chat_history (
|
||||
message_id UUID DEFAULT uuid_generate_v4(),
|
||||
chat_id UUID REFERENCES chats(chat_id),
|
||||
user_message TEXT,
|
||||
assistant TEXT,
|
||||
message_time TIMESTAMP DEFAULT current_timestamp,
|
||||
PRIMARY KEY (chat_id, message_id),
|
||||
prompt_id UUID REFERENCES prompts(id),
|
||||
brain_id UUID REFERENCES brains(brain_id)
|
||||
);
|
||||
|
||||
-- Create notification table
|
||||
|
||||
CREATE TABLE IF NOT EXISTS notifications (
|
||||
id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
datetime TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
chat_id UUID REFERENCES chats(chat_id),
|
||||
message TEXT,
|
||||
action VARCHAR(255) NOT NULL,
|
||||
status VARCHAR(255) NOT NULL
|
||||
);
|
||||
|
||||
|
||||
-- Create brains X users table
|
||||
CREATE TABLE IF NOT EXISTS brains_users (
|
||||
brain_id UUID,
|
||||
user_id UUID,
|
||||
rights VARCHAR(255),
|
||||
default_brain BOOLEAN DEFAULT false,
|
||||
PRIMARY KEY (brain_id, user_id),
|
||||
FOREIGN KEY (user_id) REFERENCES auth.users (id),
|
||||
FOREIGN KEY (brain_id) REFERENCES brains (brain_id)
|
||||
);
|
||||
|
||||
-- Create brains X vectors table
|
||||
CREATE TABLE IF NOT EXISTS brains_vectors (
|
||||
brain_id UUID,
|
||||
vector_id UUID,
|
||||
file_sha1 TEXT,
|
||||
PRIMARY KEY (brain_id, vector_id),
|
||||
FOREIGN KEY (vector_id) REFERENCES vectors (id),
|
||||
FOREIGN KEY (brain_id) REFERENCES brains (brain_id)
|
||||
);
|
||||
|
||||
-- Create brains X vectors table
|
||||
CREATE TABLE IF NOT EXISTS brain_subscription_invitations (
|
||||
brain_id UUID,
|
||||
email VARCHAR(255),
|
||||
rights VARCHAR(255),
|
||||
PRIMARY KEY (brain_id, email),
|
||||
FOREIGN KEY (brain_id) REFERENCES brains (brain_id)
|
||||
);
|
||||
|
||||
--- Create user_identity table
|
||||
CREATE TABLE IF NOT EXISTS user_identity (
|
||||
user_id UUID PRIMARY KEY,
|
||||
openai_api_key VARCHAR(255)
|
||||
);
|
||||
|
||||
-- Create the new table with 6 columns
|
||||
CREATE TABLE IF NOT EXISTS api_brain_definition (
|
||||
brain_id UUID REFERENCES brains(brain_id),
|
||||
method VARCHAR(255) CHECK (method IN ('GET', 'POST', 'PUT', 'DELETE')),
|
||||
url VARCHAR(255),
|
||||
params JSON,
|
||||
search_params JSON,
|
||||
secrets JSON
|
||||
);
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.get_user_email_by_user_id(user_id uuid)
|
||||
RETURNS TABLE (email text)
|
||||
SECURITY definer
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN QUERY SELECT au.email::text FROM auth.users au WHERE au.id = user_id;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.get_user_id_by_user_email(user_email text)
|
||||
RETURNS TABLE (user_id uuid)
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN QUERY SELECT au.id::uuid FROM auth.users au WHERE au.email = user_email;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS migrations (
|
||||
name VARCHAR(255) PRIMARY KEY,
|
||||
executed_at TIMESTAMPTZ DEFAULT current_timestamp
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS user_settings (
|
||||
user_id UUID PRIMARY KEY,
|
||||
models JSONB DEFAULT '["ollama/llama2"]'::jsonb,
|
||||
daily_chat_credit INT DEFAULT 300,
|
||||
max_brains INT DEFAULT 30,
|
||||
max_brain_size INT DEFAULT 100000000
|
||||
);
|
||||
|
||||
-- knowledge table
|
||||
CREATE TABLE IF NOT EXISTS knowledge (
|
||||
id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
file_name TEXT,
|
||||
url TEXT,
|
||||
brain_id UUID NOT NULL REFERENCES brains(brain_id),
|
||||
extension TEXT NOT NULL,
|
||||
CHECK ((file_name IS NOT NULL AND url IS NULL) OR (file_name IS NULL AND url IS NOT NULL))
|
||||
);
|
||||
|
||||
|
||||
-- knowledge_vectors table
|
||||
CREATE TABLE IF NOT EXISTS knowledge_vectors (
|
||||
knowledge_id UUID NOT NULL REFERENCES knowledge(id),
|
||||
vector_id UUID NOT NULL REFERENCES vectors(id),
|
||||
embedding_model TEXT NOT NULL,
|
||||
PRIMARY KEY (knowledge_id, vector_id, embedding_model)
|
||||
);
|
||||
|
||||
-- Create the function to add user_id to the onboardings table
|
||||
CREATE OR REPLACE FUNCTION public.create_user_onboarding() RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
INSERT INTO public.onboardings (user_id)
|
||||
VALUES (NEW.id);
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY definer;
|
||||
|
||||
-- Revoke all on function handle_new_user_onboarding() from PUBLIC;
|
||||
REVOKE ALL ON FUNCTION create_user_onboarding() FROM PUBLIC;
|
||||
|
||||
-- Drop the trigger if it exists
|
||||
DROP TRIGGER IF EXISTS create_user_onboarding_trigger ON auth.users;
|
||||
|
||||
-- Create the trigger on the insert into the auth.users table
|
||||
CREATE TRIGGER create_user_onboarding_trigger
|
||||
AFTER INSERT ON auth.users
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION public.create_user_onboarding();
|
||||
|
||||
-- Create the onboarding table
|
||||
CREATE TABLE IF NOT EXISTS onboardings (
|
||||
user_id UUID NOT NULL REFERENCES auth.users (id),
|
||||
onboarding_a BOOLEAN NOT NULL DEFAULT true,
|
||||
onboarding_b1 BOOLEAN NOT NULL DEFAULT true,
|
||||
onboarding_b2 BOOLEAN NOT NULL DEFAULT true,
|
||||
onboarding_b3 BOOLEAN NOT NULL DEFAULT true,
|
||||
creation_time TIMESTAMP DEFAULT current_timestamp,
|
||||
PRIMARY KEY (user_id)
|
||||
);
|
||||
|
||||
|
||||
-- Stripe settings --
|
||||
-- Create extension 'wrappers' if it doesn't exist
|
||||
CREATE EXTENSION IF NOT EXISTS wrappers;
|
||||
|
||||
-- Create foreign data wrapper 'stripe_wrapper' if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.foreign_data_wrappers
|
||||
WHERE foreign_data_wrapper_name = 'stripe_wrapper'
|
||||
) THEN
|
||||
CREATE FOREIGN DATA WRAPPER stripe_wrapper
|
||||
HANDLER stripe_fdw_handler;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Check if the server 'stripe_server' exists before creating it
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_foreign_server WHERE srvname = 'stripe_server') THEN
|
||||
CREATE SERVER stripe_server
|
||||
FOREIGN DATA WRAPPER stripe_wrapper
|
||||
OPTIONS (
|
||||
api_key 'your_stripe_api_key' -- Replace with your Stripe API key
|
||||
);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Create foreign table 'public.customers' if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.tables
|
||||
WHERE table_name = 'customers'
|
||||
) THEN
|
||||
CREATE FOREIGN TABLE public.customers (
|
||||
id text,
|
||||
email text,
|
||||
name text,
|
||||
description text,
|
||||
created timestamp,
|
||||
attrs jsonb
|
||||
)
|
||||
SERVER stripe_server
|
||||
OPTIONS (
|
||||
OBJECT 'customers',
|
||||
ROWID_COLUMN 'id'
|
||||
);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Create table 'users' if it doesn't exist
|
||||
CREATE TABLE IF NOT EXISTS public.users (
|
||||
id uuid REFERENCES auth.users NOT NULL PRIMARY KEY,
|
||||
email text
|
||||
);
|
||||
|
||||
-- Create or replace function 'public.handle_new_user'
|
||||
CREATE OR REPLACE FUNCTION public.handle_new_user()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
INSERT INTO public.users (id, email)
|
||||
VALUES (NEW.id, NEW.email);
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
||||
|
||||
-- Check if the trigger 'on_auth_user_created' exists before creating it
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_trigger WHERE tgname = 'on_auth_user_created') THEN
|
||||
CREATE TRIGGER on_auth_user_created
|
||||
AFTER INSERT ON auth.users
|
||||
FOR EACH ROW EXECUTE FUNCTION public.handle_new_user();
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
insert into
|
||||
storage.buckets (id, name)
|
||||
values
|
||||
('quivr', 'quivr');
|
||||
|
||||
CREATE POLICY "Access Quivr Storage 1jccrwz_0" ON storage.objects FOR INSERT TO anon WITH CHECK (bucket_id = 'quivr');
|
||||
|
||||
CREATE POLICY "Access Quivr Storage 1jccrwz_1" ON storage.objects FOR SELECT TO anon USING (bucket_id = 'quivr');
|
||||
|
||||
CREATE POLICY "Access Quivr Storage 1jccrwz_2" ON storage.objects FOR UPDATE TO anon USING (bucket_id = 'quivr');
|
||||
|
||||
CREATE POLICY "Access Quivr Storage 1jccrwz_3" ON storage.objects FOR DELETE TO anon USING (bucket_id = 'quivr');
|
||||
|
||||
-- Create functions for secrets in vault
|
||||
CREATE OR REPLACE FUNCTION insert_secret(name text, secret text)
|
||||
returns uuid
|
||||
language plpgsql
|
||||
security definer
|
||||
set search_path = public
|
||||
as $$
|
||||
begin
|
||||
return vault.create_secret(secret, name);
|
||||
end;
|
||||
$$;
|
||||
|
||||
|
||||
create or replace function read_secret(secret_name text)
|
||||
returns text
|
||||
language plpgsql
|
||||
security definer set search_path = public
|
||||
as $$
|
||||
declare
|
||||
secret text;
|
||||
begin
|
||||
select decrypted_secret from vault.decrypted_secrets where name =
|
||||
secret_name into secret;
|
||||
return secret;
|
||||
end;
|
||||
$$;
|
||||
|
||||
create or replace function delete_secret(secret_name text)
|
||||
returns text
|
||||
language plpgsql
|
||||
security definer set search_path = public
|
||||
as $$
|
||||
declare
|
||||
deleted_rows int;
|
||||
begin
|
||||
delete from vault.decrypted_secrets where name = secret_name;
|
||||
get diagnostics deleted_rows = row_count;
|
||||
if deleted_rows = 0 then
|
||||
return false;
|
||||
else
|
||||
return true;
|
||||
end if;
|
||||
end;
|
||||
$$;
|
||||
|
||||
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231203173900_new_api_key_format'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231203173900_new_api_key_format'
|
||||
);
|
@ -1,467 +0,0 @@
|
||||
-- Create users table
|
||||
CREATE TABLE IF NOT EXISTS user_daily_usage(
|
||||
user_id UUID REFERENCES auth.users (id),
|
||||
email TEXT,
|
||||
date TEXT,
|
||||
daily_requests_count INT,
|
||||
PRIMARY KEY (user_id, date)
|
||||
);
|
||||
|
||||
-- Create chats table
|
||||
CREATE TABLE IF NOT EXISTS chats(
|
||||
chat_id UUID DEFAULT uuid_generate_v4() PRIMARY KEY,
|
||||
user_id UUID REFERENCES auth.users (id),
|
||||
creation_time TIMESTAMP DEFAULT current_timestamp,
|
||||
history JSONB,
|
||||
chat_name TEXT
|
||||
);
|
||||
|
||||
|
||||
-- Create vector extension
|
||||
CREATE EXTENSION IF NOT EXISTS vector;
|
||||
|
||||
-- Create vectors table
|
||||
CREATE TABLE IF NOT EXISTS vectors (
|
||||
id UUID DEFAULT uuid_generate_v4() PRIMARY KEY,
|
||||
content TEXT,
|
||||
file_sha1 TEXT,
|
||||
metadata JSONB,
|
||||
embedding VECTOR(1536)
|
||||
);
|
||||
|
||||
-- Create function to match vectors
|
||||
CREATE OR REPLACE FUNCTION match_vectors(query_embedding VECTOR(1536), match_count INT, p_brain_id UUID)
|
||||
RETURNS TABLE(
|
||||
id UUID,
|
||||
brain_id UUID,
|
||||
content TEXT,
|
||||
metadata JSONB,
|
||||
embedding VECTOR(1536),
|
||||
similarity FLOAT
|
||||
) LANGUAGE plpgsql AS $$
|
||||
#variable_conflict use_column
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
vectors.id,
|
||||
brains_vectors.brain_id,
|
||||
vectors.content,
|
||||
vectors.metadata,
|
||||
vectors.embedding,
|
||||
1 - (vectors.embedding <=> query_embedding) AS similarity
|
||||
FROM
|
||||
vectors
|
||||
INNER JOIN
|
||||
brains_vectors ON vectors.id = brains_vectors.vector_id
|
||||
WHERE brains_vectors.brain_id = p_brain_id
|
||||
ORDER BY
|
||||
vectors.embedding <=> query_embedding
|
||||
LIMIT match_count;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Create stats table
|
||||
CREATE TABLE IF NOT EXISTS stats (
|
||||
time TIMESTAMP,
|
||||
chat BOOLEAN,
|
||||
embedding BOOLEAN,
|
||||
details TEXT,
|
||||
metadata JSONB,
|
||||
id INTEGER PRIMARY KEY GENERATED ALWAYS AS IDENTITY
|
||||
);
|
||||
|
||||
-- Create summaries table
|
||||
CREATE TABLE IF NOT EXISTS summaries (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
document_id UUID REFERENCES vectors(id),
|
||||
content TEXT,
|
||||
metadata JSONB,
|
||||
embedding VECTOR(1536)
|
||||
);
|
||||
|
||||
-- Create function to match summaries
|
||||
CREATE OR REPLACE FUNCTION match_summaries(query_embedding VECTOR(1536), match_count INT, match_threshold FLOAT)
|
||||
RETURNS TABLE(
|
||||
id BIGINT,
|
||||
document_id UUID,
|
||||
content TEXT,
|
||||
metadata JSONB,
|
||||
embedding VECTOR(1536),
|
||||
similarity FLOAT
|
||||
) LANGUAGE plpgsql AS $$
|
||||
#variable_conflict use_column
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
id,
|
||||
document_id,
|
||||
content,
|
||||
metadata,
|
||||
embedding,
|
||||
1 - (summaries.embedding <=> query_embedding) AS similarity
|
||||
FROM
|
||||
summaries
|
||||
WHERE 1 - (summaries.embedding <=> query_embedding) > match_threshold
|
||||
ORDER BY
|
||||
summaries.embedding <=> query_embedding
|
||||
LIMIT match_count;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Create api_keys table
|
||||
CREATE TABLE IF NOT EXISTS api_keys(
|
||||
key_id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
user_id UUID REFERENCES auth.users (id),
|
||||
name TEXT DEFAULT 'API_KEY',
|
||||
days INT DEFAULT 30,
|
||||
only_chat BOOLEAN DEFAULT false,
|
||||
api_key TEXT UNIQUE,
|
||||
creation_time TIMESTAMP DEFAULT current_timestamp,
|
||||
deleted_time TIMESTAMP,
|
||||
is_active BOOLEAN DEFAULT true
|
||||
);
|
||||
|
||||
--- Create prompts table
|
||||
CREATE TABLE IF NOT EXISTS prompts (
|
||||
id UUID DEFAULT uuid_generate_v4() PRIMARY KEY,
|
||||
title VARCHAR(255),
|
||||
content TEXT,
|
||||
status VARCHAR(255) DEFAULT 'private'
|
||||
);
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'brain_type_enum') THEN
|
||||
-- Create the ENUM type 'brain_type' if it doesn't exist
|
||||
CREATE TYPE brain_type_enum AS ENUM ('doc', 'api', 'composite');
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
--- Create brains table
|
||||
CREATE TABLE IF NOT EXISTS brains (
|
||||
brain_id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
status TEXT,
|
||||
description TEXT,
|
||||
model TEXT,
|
||||
max_tokens INT,
|
||||
temperature FLOAT,
|
||||
prompt_id UUID REFERENCES prompts(id),
|
||||
last_update TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
brain_type brain_type_enum DEFAULT 'doc'
|
||||
);
|
||||
|
||||
|
||||
-- Create chat_history table
|
||||
CREATE TABLE IF NOT EXISTS chat_history (
|
||||
message_id UUID DEFAULT uuid_generate_v4(),
|
||||
chat_id UUID REFERENCES chats(chat_id),
|
||||
user_message TEXT,
|
||||
assistant TEXT,
|
||||
message_time TIMESTAMP DEFAULT current_timestamp,
|
||||
PRIMARY KEY (chat_id, message_id),
|
||||
prompt_id UUID REFERENCES prompts(id),
|
||||
brain_id UUID REFERENCES brains(brain_id)
|
||||
);
|
||||
|
||||
-- Create notification table
|
||||
|
||||
CREATE TABLE IF NOT EXISTS notifications (
|
||||
id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
datetime TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
chat_id UUID REFERENCES chats(chat_id),
|
||||
message TEXT,
|
||||
action VARCHAR(255) NOT NULL,
|
||||
status VARCHAR(255) NOT NULL
|
||||
);
|
||||
|
||||
|
||||
-- Create brains X users table
|
||||
CREATE TABLE IF NOT EXISTS brains_users (
|
||||
brain_id UUID,
|
||||
user_id UUID,
|
||||
rights VARCHAR(255),
|
||||
default_brain BOOLEAN DEFAULT false,
|
||||
PRIMARY KEY (brain_id, user_id),
|
||||
FOREIGN KEY (user_id) REFERENCES auth.users (id),
|
||||
FOREIGN KEY (brain_id) REFERENCES brains (brain_id)
|
||||
);
|
||||
|
||||
-- Create brains X vectors table
|
||||
CREATE TABLE IF NOT EXISTS brains_vectors (
|
||||
brain_id UUID,
|
||||
vector_id UUID,
|
||||
file_sha1 TEXT,
|
||||
PRIMARY KEY (brain_id, vector_id),
|
||||
FOREIGN KEY (vector_id) REFERENCES vectors (id),
|
||||
FOREIGN KEY (brain_id) REFERENCES brains (brain_id)
|
||||
);
|
||||
|
||||
-- Create brains X vectors table
|
||||
CREATE TABLE IF NOT EXISTS brain_subscription_invitations (
|
||||
brain_id UUID,
|
||||
email VARCHAR(255),
|
||||
rights VARCHAR(255),
|
||||
PRIMARY KEY (brain_id, email),
|
||||
FOREIGN KEY (brain_id) REFERENCES brains (brain_id)
|
||||
);
|
||||
|
||||
-- Table for storing the relationship between brains for composite brains
|
||||
CREATE TABLE IF NOT EXISTS composite_brain_connections (
|
||||
composite_brain_id UUID NOT NULL REFERENCES brains(brain_id),
|
||||
connected_brain_id UUID NOT NULL REFERENCES brains(brain_id),
|
||||
PRIMARY KEY (composite_brain_id, connected_brain_id),
|
||||
CHECK (composite_brain_id != connected_brain_id)
|
||||
);
|
||||
|
||||
--- Create user_identity table
|
||||
CREATE TABLE IF NOT EXISTS user_identity (
|
||||
user_id UUID PRIMARY KEY,
|
||||
openai_api_key VARCHAR(255)
|
||||
);
|
||||
|
||||
-- Create the new table with 6 columns
|
||||
CREATE TABLE IF NOT EXISTS api_brain_definition (
|
||||
brain_id UUID REFERENCES brains(brain_id),
|
||||
method VARCHAR(255) CHECK (method IN ('GET', 'POST', 'PUT', 'DELETE')),
|
||||
url VARCHAR(255),
|
||||
params JSON,
|
||||
search_params JSON,
|
||||
secrets JSON
|
||||
);
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.get_user_email_by_user_id(user_id uuid)
|
||||
RETURNS TABLE (email text)
|
||||
SECURITY definer
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN QUERY SELECT au.email::text FROM auth.users au WHERE au.id = user_id;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.get_user_id_by_user_email(user_email text)
|
||||
RETURNS TABLE (user_id uuid)
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN QUERY SELECT au.id::uuid FROM auth.users au WHERE au.email = user_email;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
|
||||
|
||||
CREATE TABLE IF NOT EXISTS migrations (
|
||||
name VARCHAR(255) PRIMARY KEY,
|
||||
executed_at TIMESTAMPTZ DEFAULT current_timestamp
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS user_settings (
|
||||
user_id UUID PRIMARY KEY,
|
||||
models JSONB DEFAULT '["gpt-3.5-turbo-1106","gpt-4"]'::jsonb,
|
||||
daily_chat_credit INT DEFAULT 300,
|
||||
max_brains INT DEFAULT 30,
|
||||
max_brain_size INT DEFAULT 100000000
|
||||
);
|
||||
|
||||
-- knowledge table
|
||||
CREATE TABLE IF NOT EXISTS knowledge (
|
||||
id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
file_name TEXT,
|
||||
url TEXT,
|
||||
brain_id UUID NOT NULL REFERENCES brains(brain_id),
|
||||
extension TEXT NOT NULL,
|
||||
CHECK ((file_name IS NOT NULL AND url IS NULL) OR (file_name IS NULL AND url IS NOT NULL))
|
||||
);
|
||||
|
||||
|
||||
-- knowledge_vectors table
|
||||
CREATE TABLE IF NOT EXISTS knowledge_vectors (
|
||||
knowledge_id UUID NOT NULL REFERENCES knowledge(id),
|
||||
vector_id UUID NOT NULL REFERENCES vectors(id),
|
||||
embedding_model TEXT NOT NULL,
|
||||
PRIMARY KEY (knowledge_id, vector_id, embedding_model)
|
||||
);
|
||||
|
||||
-- Create the function to add user_id to the onboardings table
|
||||
CREATE OR REPLACE FUNCTION public.create_user_onboarding() RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
INSERT INTO public.onboardings (user_id)
|
||||
VALUES (NEW.id);
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY definer;
|
||||
|
||||
-- Revoke all on function handle_new_user_onboarding() from PUBLIC;
|
||||
REVOKE ALL ON FUNCTION create_user_onboarding() FROM PUBLIC;
|
||||
|
||||
-- Drop the trigger if it exists
|
||||
DROP TRIGGER IF EXISTS create_user_onboarding_trigger ON auth.users;
|
||||
|
||||
-- Create the trigger on the insert into the auth.users table
|
||||
CREATE TRIGGER create_user_onboarding_trigger
|
||||
AFTER INSERT ON auth.users
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION public.create_user_onboarding();
|
||||
|
||||
-- Create the onboarding table
|
||||
CREATE TABLE IF NOT EXISTS onboardings (
|
||||
user_id UUID NOT NULL REFERENCES auth.users (id),
|
||||
onboarding_a BOOLEAN NOT NULL DEFAULT true,
|
||||
onboarding_b1 BOOLEAN NOT NULL DEFAULT true,
|
||||
onboarding_b2 BOOLEAN NOT NULL DEFAULT true,
|
||||
onboarding_b3 BOOLEAN NOT NULL DEFAULT true,
|
||||
creation_time TIMESTAMP DEFAULT current_timestamp,
|
||||
PRIMARY KEY (user_id)
|
||||
);
|
||||
|
||||
|
||||
-- Stripe settings --
|
||||
-- Create extension 'wrappers' if it doesn't exist
|
||||
CREATE EXTENSION IF NOT EXISTS wrappers;
|
||||
|
||||
-- Create foreign data wrapper 'stripe_wrapper' if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.foreign_data_wrappers
|
||||
WHERE foreign_data_wrapper_name = 'stripe_wrapper'
|
||||
) THEN
|
||||
CREATE FOREIGN DATA WRAPPER stripe_wrapper
|
||||
HANDLER stripe_fdw_handler;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Check if the server 'stripe_server' exists before creating it
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_foreign_server WHERE srvname = 'stripe_server') THEN
|
||||
CREATE SERVER stripe_server
|
||||
FOREIGN DATA WRAPPER stripe_wrapper
|
||||
OPTIONS (
|
||||
api_key 'your_stripe_api_key' -- Replace with your Stripe API key
|
||||
);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Create foreign table 'public.customers' if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.tables
|
||||
WHERE table_name = 'customers'
|
||||
) THEN
|
||||
CREATE FOREIGN TABLE public.customers (
|
||||
id text,
|
||||
email text,
|
||||
name text,
|
||||
description text,
|
||||
created timestamp,
|
||||
attrs jsonb
|
||||
)
|
||||
SERVER stripe_server
|
||||
OPTIONS (
|
||||
OBJECT 'customers',
|
||||
ROWID_COLUMN 'id'
|
||||
);
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Create table 'users' if it doesn't exist
|
||||
CREATE TABLE IF NOT EXISTS public.users (
|
||||
id uuid REFERENCES auth.users NOT NULL PRIMARY KEY,
|
||||
email text
|
||||
);
|
||||
|
||||
-- Create or replace function 'public.handle_new_user'
|
||||
CREATE OR REPLACE FUNCTION public.handle_new_user()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
INSERT INTO public.users (id, email)
|
||||
VALUES (NEW.id, NEW.email);
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
||||
|
||||
-- Check if the trigger 'on_auth_user_created' exists before creating it
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_trigger WHERE tgname = 'on_auth_user_created') THEN
|
||||
CREATE TRIGGER on_auth_user_created
|
||||
AFTER INSERT ON auth.users
|
||||
FOR EACH ROW EXECUTE FUNCTION public.handle_new_user();
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
insert into
|
||||
storage.buckets (id, name)
|
||||
values
|
||||
('quivr', 'quivr');
|
||||
|
||||
CREATE POLICY "Access Quivr Storage 1jccrwz_0" ON storage.objects FOR INSERT TO anon WITH CHECK (bucket_id = 'quivr');
|
||||
|
||||
CREATE POLICY "Access Quivr Storage 1jccrwz_1" ON storage.objects FOR SELECT TO anon USING (bucket_id = 'quivr');
|
||||
|
||||
CREATE POLICY "Access Quivr Storage 1jccrwz_2" ON storage.objects FOR UPDATE TO anon USING (bucket_id = 'quivr');
|
||||
|
||||
CREATE POLICY "Access Quivr Storage 1jccrwz_3" ON storage.objects FOR DELETE TO anon USING (bucket_id = 'quivr');
|
||||
|
||||
-- Create functions for secrets in vault
|
||||
CREATE OR REPLACE FUNCTION insert_secret(name text, secret text)
|
||||
returns uuid
|
||||
language plpgsql
|
||||
security definer
|
||||
set search_path = public
|
||||
as $$
|
||||
begin
|
||||
return vault.create_secret(secret, name);
|
||||
end;
|
||||
$$;
|
||||
|
||||
|
||||
create or replace function read_secret(secret_name text)
|
||||
returns text
|
||||
language plpgsql
|
||||
security definer set search_path = public
|
||||
as $$
|
||||
declare
|
||||
secret text;
|
||||
begin
|
||||
select decrypted_secret from vault.decrypted_secrets where name =
|
||||
secret_name into secret;
|
||||
return secret;
|
||||
end;
|
||||
$$;
|
||||
|
||||
create or replace function delete_secret(secret_name text)
|
||||
returns text
|
||||
language plpgsql
|
||||
security definer set search_path = public
|
||||
as $$
|
||||
declare
|
||||
deleted_rows int;
|
||||
begin
|
||||
delete from vault.decrypted_secrets where name = secret_name;
|
||||
get diagnostics deleted_rows = row_count;
|
||||
if deleted_rows = 0 then
|
||||
return false;
|
||||
else
|
||||
return true;
|
||||
end if;
|
||||
end;
|
||||
$$;
|
||||
|
||||
create schema if not exists extensions;
|
||||
|
||||
create table if not exists
|
||||
extensions.wrappers_fdw_stats ();
|
||||
|
||||
grant all on extensions.wrappers_fdw_stats to service_role;
|
||||
|
||||
|
||||
INSERT INTO migrations (name)
|
||||
SELECT '20231205163000_new_table_composite_brain_connections'
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM migrations WHERE name = '20231205163000_new_table_composite_brain_connections'
|
||||
);
|
5
supabase/migrations/local_20240107152745_ollama.sql
Normal file
5
supabase/migrations/local_20240107152745_ollama.sql
Normal file
@ -0,0 +1,5 @@
|
||||
alter table "public"."vectors" alter column "embedding" set not null;
|
||||
|
||||
alter table "public"."vectors" alter column "embedding" set data type vector using "embedding"::vector;
|
||||
|
||||
|
@ -232,7 +232,8 @@ INSERT INTO "public"."chat_history" ("message_id", "chat_id", "user_message", "a
|
||||
--
|
||||
|
||||
INSERT INTO "public"."models" ("name", "price", "max_input", "max_output") VALUES
|
||||
('gpt-3.5-turbo-1106', 1, 2000, 1000);
|
||||
('gpt-3.5-turbo-1106', 1, 2000, 1000),
|
||||
('ollama/llama2', 1, 2000, 1000);
|
||||
|
||||
|
||||
--
|
||||
|
Loading…
Reference in New Issue
Block a user