feat(service): update Supabase to current latest versions (#8316)

This commit is contained in:
Andras Bacsai 2026-04-05 20:21:05 +02:00 committed by GitHub
commit bebbf16e43
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -8,33 +8,77 @@
services:
supabase-kong:
image: kong:2.8.1
# https://unix.stackexchange.com/a/294837
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
image: kong/kong:3.9.1
entrypoint: /home/kong/kong-entrypoint.sh
depends_on:
supabase-analytics:
condition: service_healthy
healthcheck:
test: ["CMD", "kong", "health"]
interval: 5s
timeout: 5s
retries: 5
environment:
- SERVICE_URL_SUPABASEKONG_8000
- KONG_PORT_MAPS=443:8000
- JWT_SECRET=${SERVICE_PASSWORD_JWT}
- KONG_DATABASE=off
- KONG_DECLARATIVE_CONFIG=/home/kong/kong.yml
- KONG_DECLARATIVE_CONFIG=/usr/local/kong/kong.yml
# https://github.com/supabase/cli/issues/14
- KONG_DNS_ORDER=LAST,A,CNAME
- KONG_PLUGINS=request-transformer,cors,key-auth,acl,basic-auth
- KONG_DNS_NOT_FOUND_TTL=1
- KONG_PLUGINS=request-transformer,cors,key-auth,acl,basic-auth,request-termination,ip-restriction,post-function
- KONG_NGINX_PROXY_PROXY_BUFFER_SIZE=160k
- KONG_NGINX_PROXY_PROXY_BUFFERS=64 160k
- 'KONG_PROXY_ACCESS_LOG=/dev/stdout combined'
- SUPABASE_ANON_KEY=${SERVICE_SUPABASEANON_KEY}
- SUPABASE_SERVICE_KEY=${SERVICE_SUPABASESERVICE_KEY}
- SUPABASE_PUBLISHABLE_KEY=${SUPABASE_PUBLISHABLE_KEY:-}
- SUPABASE_SECRET_KEY=${SUPABASE_SECRET_KEY:-}
- ANON_KEY_ASYMMETRIC=${ANON_KEY_ASYMMETRIC:-}
- SERVICE_ROLE_KEY_ASYMMETRIC=${SERVICE_ROLE_KEY_ASYMMETRIC:-}
- DASHBOARD_USERNAME=${SERVICE_USER_ADMIN}
- DASHBOARD_PASSWORD=${SERVICE_PASSWORD_ADMIN}
- 'KONG_STORAGE_CONNECT_TIMEOUT=${KONG_STORAGE_CONNECT_TIMEOUT:-60}'
- 'KONG_STORAGE_WRITE_TIMEOUT=${KONG_STORAGE_WRITE_TIMEOUT:-3600}'
- 'KONG_STORAGE_READ_TIMEOUT=${KONG_STORAGE_READ_TIMEOUT:-3600}'
- 'KONG_STORAGE_REQUEST_BUFFERING=${KONG_STORAGE_REQUEST_BUFFERING:-false}'
- 'KONG_STORAGE_RESPONSE_BUFFERING=${KONG_STORAGE_RESPONSE_BUFFERING:-false}'
- 'KONG_STORAGE_RESPONSE_BUFFERING=${KONG_STORAGE_RESPONSE_BUFFERING:-false}'
volumes:
- type: bind
source: ./volumes/api/kong-entrypoint.sh
target: /home/kong/kong-entrypoint.sh
content: |
#!/bin/bash
# Custom entrypoint for Kong that builds Lua expressions for request-transformer
# and performs environment variable substitution in the declarative config.
if [ -n "$SUPABASE_SECRET_KEY" ] && [ -n "$SUPABASE_PUBLISHABLE_KEY" ]; then
export LUA_AUTH_EXPR="\$((headers.authorization ~= nil and headers.authorization:sub(1, 10) ~= 'Bearer sb_' and headers.authorization) or (headers.apikey == '$SUPABASE_SECRET_KEY' and 'Bearer $SERVICE_ROLE_KEY_ASYMMETRIC') or (headers.apikey == '$SUPABASE_PUBLISHABLE_KEY' and 'Bearer $ANON_KEY_ASYMMETRIC') or headers.apikey)"
export LUA_RT_WS_EXPR="\$((query_params.apikey == '$SUPABASE_SECRET_KEY' and '$SERVICE_ROLE_KEY_ASYMMETRIC') or (query_params.apikey == '$SUPABASE_PUBLISHABLE_KEY' and '$ANON_KEY_ASYMMETRIC') or query_params.apikey)"
else
export LUA_AUTH_EXPR="\$((headers.authorization ~= nil and headers.authorization:sub(1, 10) ~= 'Bearer sb_' and headers.authorization) or headers.apikey)"
export LUA_RT_WS_EXPR="\$(query_params.apikey)"
fi
awk '{
result = ""
rest = $0
while (match(rest, /\$[A-Za-z_][A-Za-z_0-9]*/)) {
varname = substr(rest, RSTART + 1, RLENGTH - 1)
if (varname in ENVIRON) {
result = result substr(rest, 1, RSTART - 1) ENVIRON[varname]
} else {
result = result substr(rest, 1, RSTART + RLENGTH - 1)
}
rest = substr(rest, RSTART + RLENGTH)
}
print result rest
}' /home/kong/temp.yml > "$KONG_DECLARATIVE_CONFIG"
sed -i '/^[[:space:]]*- key:[[:space:]]*$/d' "$KONG_DECLARATIVE_CONFIG"
exec /entrypoint.sh kong docker-start
# https://github.com/supabase/supabase/issues/12661
- type: bind
source: ./volumes/api/kong.yml
@ -51,9 +95,11 @@ services:
- username: anon
keyauth_credentials:
- key: $SUPABASE_ANON_KEY
- key: $SUPABASE_PUBLISHABLE_KEY
- username: service_role
keyauth_credentials:
- key: $SUPABASE_SERVICE_KEY
- key: $SUPABASE_SECRET_KEY
###
### Access Control List
@ -69,8 +115,8 @@ services:
###
basicauth_credentials:
- consumer: DASHBOARD
username: $DASHBOARD_USERNAME
password: $DASHBOARD_PASSWORD
username: '$DASHBOARD_USERNAME'
password: '$DASHBOARD_PASSWORD'
###
@ -106,6 +152,36 @@ services:
- /auth/v1/authorize
plugins:
- name: cors
- name: auth-v1-open-jwks
_comment: 'Auth: /auth/v1/.well-known/jwks.json -> http://supabase-auth:9999/.well-known/jwks.json'
url: http://supabase-auth:9999/.well-known/jwks.json
routes:
- name: auth-v1-open-jwks
strip_path: true
paths:
- /auth/v1/.well-known/jwks.json
plugins:
- name: cors
- name: auth-v1-open-sso-acs
url: "http://supabase-auth:9999/sso/saml/acs"
routes:
- name: auth-v1-open-sso-acs
strip_path: true
paths:
- /sso/saml/acs
plugins:
- name: cors
- name: auth-v1-open-sso-metadata
url: "http://supabase-auth:9999/sso/saml/metadata"
routes:
- name: auth-v1-open-sso-metadata
strip_path: true
paths:
- /sso/saml/metadata
plugins:
- name: cors
## Secure Auth routes
- name: auth-v1
@ -121,6 +197,14 @@ services:
- name: key-auth
config:
hide_credentials: false
- name: request-transformer
config:
add:
headers:
- "Authorization: $LUA_AUTH_EXPR"
replace:
headers:
- "Authorization: $LUA_AUTH_EXPR"
- name: acl
config:
hide_groups_header: true
@ -141,7 +225,15 @@ services:
- name: cors
- name: key-auth
config:
hide_credentials: true
hide_credentials: false
- name: request-transformer
config:
add:
headers:
- "Authorization: $LUA_AUTH_EXPR"
replace:
headers:
- "Authorization: $LUA_AUTH_EXPR"
- name: acl
config:
hide_groups_header: true
@ -162,12 +254,17 @@ services:
- name: cors
- name: key-auth
config:
hide_credentials: true
hide_credentials: false
- name: request-transformer
config:
add:
headers:
- Content-Profile:graphql_public
- "Content-Profile: graphql_public"
- "Authorization: $LUA_AUTH_EXPR"
replace:
headers:
- "Content-Profile: graphql_public"
- "Authorization: $LUA_AUTH_EXPR"
- name: acl
config:
hide_groups_header: true
@ -190,6 +287,14 @@ services:
- name: key-auth
config:
hide_credentials: false
- name: request-transformer
config:
add:
querystring:
- "apikey: $LUA_RT_WS_EXPR"
replace:
querystring:
- "apikey: $LUA_RT_WS_EXPR"
- name: acl
config:
hide_groups_header: true
@ -197,7 +302,7 @@ services:
- admin
- anon
- name: realtime-v1-rest
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
_comment: 'Realtime: /realtime/v1/api/* -> http://realtime:4000/api/*'
url: http://realtime-dev:4000/api
protocol: http
routes:
@ -210,6 +315,14 @@ services:
- name: key-auth
config:
hide_credentials: false
- name: request-transformer
config:
add:
headers:
- "Authorization: $LUA_AUTH_EXPR"
replace:
headers:
- "Authorization: $LUA_AUTH_EXPR"
- name: acl
config:
hide_groups_header: true
@ -217,7 +330,8 @@ services:
- admin
- anon
## Storage routes: the storage server manages its own auth
## Storage API endpoint
## No key-auth - S3 protocol requests don't carry an apikey header.
- name: storage-v1
_comment: 'Storage: /storage/v1/* -> http://supabase-storage:5000/*'
connect_timeout: $KONG_STORAGE_CONNECT_TIMEOUT
@ -233,11 +347,20 @@ services:
response_buffering: $KONG_STORAGE_RESPONSE_BUFFERING
plugins:
- name: cors
- name: post-function
config:
access:
- |
local auth = kong.request.get_header("authorization")
if auth == nil or auth == "" or auth:find("^%s*$") then
kong.service.request.clear_header("authorization")
end
## Edge Functions routes
- name: functions-v1
_comment: 'Edge Functions: /functions/v1/* -> http://supabase-edge-functions:9000/*'
url: http://supabase-edge-functions:9000/
read_timeout: 150000
routes:
- name: functions-v1-all
strip_path: true
@ -246,15 +369,28 @@ services:
plugins:
- name: cors
## Analytics routes
- name: analytics-v1
_comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*'
url: http://supabase-analytics:4000/
## OAuth 2.0 Authorization Server Metadata (RFC 8414)
- name: well-known-oauth
_comment: 'Auth: /.well-known/oauth-authorization-server -> http://supabase-auth:9999/.well-known/oauth-authorization-server'
url: http://supabase-auth:9999/.well-known/oauth-authorization-server
routes:
- name: analytics-v1-all
- name: well-known-oauth
strip_path: true
paths:
- /analytics/v1/
- /.well-known/oauth-authorization-server
plugins:
- name: cors
## Analytics routes
## Not used - Studio and Vector talk directly to analytics via Docker networking.
# - name: analytics-v1
# _comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*'
# url: http://supabase-analytics:4000/
# routes:
# - name: analytics-v1-all
# strip_path: true
# paths:
# - /analytics/v1/
## Secure Database routes
- name: meta
@ -275,6 +411,48 @@ services:
allow:
- admin
## Block access to /api/mcp
- name: mcp-blocker
_comment: 'Block direct access to /api/mcp'
url: http://supabase-studio:3000/api/mcp
routes:
- name: mcp-blocker-route
strip_path: true
paths:
- /api/mcp
plugins:
- name: request-termination
config:
status_code: 403
message: "Access is forbidden."
## MCP endpoint - local access
- name: mcp
_comment: 'MCP: /mcp -> http://supabase-studio:3000/api/mcp (local access)'
url: http://supabase-studio:3000/api/mcp
routes:
- name: mcp
strip_path: true
paths:
- /mcp
plugins:
# Block access to /mcp by default
- name: request-termination
config:
status_code: 403
message: "Access is forbidden."
# Enable local access (danger zone!)
# 1. Comment out the 'request-termination' section above
# 2. Uncomment the entire section below, including 'deny'
# 3. Add your local IPs to the 'allow' list
#- name: cors
#- name: ip-restriction
# config:
# allow:
# - 127.0.0.1
# - ::1
# deny: []
## Protected Dashboard - catch all remaining routes
- name: dashboard
_comment: 'Studio: /* -> http://studio:3000/*'
@ -290,7 +468,7 @@ services:
config:
hide_credentials: true
supabase-studio:
image: supabase/studio:2026.01.07-sha-037e5f9
image: supabase/studio:2026.03.16-sha-5528817
healthcheck:
test:
[
@ -310,7 +488,11 @@ services:
- STUDIO_PG_META_URL=http://supabase-meta:8080
- POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES}
- POSTGRES_HOST=${POSTGRES_HOST:-supabase-db}
- CURRENT_CLI_VERSION=2.67.1
- POSTGRES_PORT=${POSTGRES_PORT:-5432}
- POSTGRES_DB=${POSTGRES_DB:-postgres}
- 'PGRST_DB_SCHEMAS=${PGRST_DB_SCHEMAS:-public,storage,graphql_public}'
- PGRST_DB_MAX_ROWS=${PGRST_DB_MAX_ROWS:-1000}
- PGRST_DB_EXTRA_SEARCH_PATH=${PGRST_DB_EXTRA_SEARCH_PATH:-public}
- DEFAULT_ORGANIZATION_NAME=${STUDIO_DEFAULT_ORGANIZATION:-Default Organization}
- DEFAULT_PROJECT_NAME=${STUDIO_DEFAULT_PROJECT:-Default Project}
@ -320,10 +502,12 @@ services:
- SUPABASE_ANON_KEY=${SERVICE_SUPABASEANON_KEY}
- SUPABASE_SERVICE_KEY=${SERVICE_SUPABASESERVICE_KEY}
- AUTH_JWT_SECRET=${SERVICE_PASSWORD_JWT}
- PG_META_CRYPTO_KEY=${SERVICE_PASSWORD_PGMETACRYPTO}
- LOGFLARE_API_KEY=${SERVICE_PASSWORD_LOGFLARE}
- LOGFLARE_PUBLIC_ACCESS_TOKEN=${SERVICE_PASSWORD_LOGFLARE}
- LOGFLARE_PRIVATE_ACCESS_TOKEN=${SERVICE_PASSWORD_LOGFLAREPRIVATE}
- LOGFLARE_URL=http://supabase-analytics:4000
- 'SUPABASE_PUBLIC_API=${SERVICE_URL_SUPABASEKONG}'
# Next.js client-side environment variables (required for browser access)
- 'NEXT_PUBLIC_SUPABASE_URL=${SERVICE_URL_SUPABASEKONG}'
- NEXT_PUBLIC_SUPABASE_ANON_KEY=${SERVICE_SUPABASEANON_KEY}
@ -333,8 +517,13 @@ services:
# Uncomment to use Big Query backend for analytics
# NEXT_ANALYTICS_BACKEND_PROVIDER=bigquery
- 'OPENAI_API_KEY=${OPENAI_API_KEY}'
- SNIPPETS_MANAGEMENT_FOLDER=/app/snippets
- EDGE_FUNCTIONS_MANAGEMENT_FOLDER=/app/edge-functions
volumes:
- ./volumes/snippets:/app/snippets
- ./volumes/functions:/app/edge-functions
supabase-db:
image: supabase/postgres:15.8.1.048
image: supabase/postgres:15.8.1.085
healthcheck:
test: pg_isready -U postgres -h 127.0.0.1
interval: 5s
@ -365,7 +554,7 @@ services:
source: ./volumes/db/realtime.sql
target: /docker-entrypoint-initdb.d/migrations/99-realtime.sql
content: |
\set pguser `echo "supabase_admin"`
\set pguser `echo "$POSTGRES_USER"`
create schema if not exists _realtime;
alter schema _realtime owner to :pguser;
@ -380,7 +569,7 @@ services:
source: ./volumes/db/pooler.sql
target: /docker-entrypoint-initdb.d/migrations/99-pooler.sql
content: |
\set pguser `echo "supabase_admin"`
\set pguser `echo "$POSTGRES_USER"`
\c _supabase
create schema if not exists _supavisor;
alter schema _supavisor owner to :pguser;
@ -624,7 +813,7 @@ services:
source: ./volumes/db/logs.sql
target: /docker-entrypoint-initdb.d/migrations/99-logs.sql
content: |
\set pguser `echo "supabase_admin"`
\set pguser `echo "$POSTGRES_USER"`
\c _supabase
create schema if not exists _analytics;
alter schema _analytics owner to :pguser;
@ -633,7 +822,7 @@ services:
- supabase-db-config:/etc/postgresql-custom
supabase-analytics:
image: supabase/logflare:1.4.0
image: supabase/logflare:1.31.2
healthcheck:
test: ["CMD", "curl", "http://127.0.0.1:4000/health"]
timeout: 5s
@ -655,11 +844,10 @@ services:
- DB_PORT=${POSTGRES_PORT:-5432}
- DB_PASSWORD=${SERVICE_PASSWORD_POSTGRES}
- DB_SCHEMA=_analytics
- LOGFLARE_API_KEY=${SERVICE_PASSWORD_LOGFLARE}
- LOGFLARE_PUBLIC_ACCESS_TOKEN=${SERVICE_PASSWORD_LOGFLARE}
- LOGFLARE_PRIVATE_ACCESS_TOKEN=${SERVICE_PASSWORD_LOGFLAREPRIVATE}
- LOGFLARE_SINGLE_TENANT=true
- LOGFLARE_SINGLE_TENANT_MODE=true
- LOGFLARE_SUPABASE_MODE=true
- LOGFLARE_MIN_CLUSTER_SIZE=1
# Comment variables to use Big Query backend for analytics
- POSTGRES_BACKEND_URL=postgresql://supabase_admin:${SERVICE_PASSWORD_POSTGRES}@${POSTGRES_HOSTNAME:-supabase-db}:${POSTGRES_PORT:-5432}/_supabase
@ -670,7 +858,7 @@ services:
# GOOGLE_PROJECT_ID=${GOOGLE_PROJECT_ID}
# GOOGLE_PROJECT_NUMBER=${GOOGLE_PROJECT_NUMBER}
supabase-vector:
image: timberio/vector:0.28.1-alpine
image: timberio/vector:0.53.0-alpine
healthcheck:
test:
[
@ -722,13 +910,13 @@ services:
inputs:
- project_logs
route:
kong: 'starts_with(string!(.appname), "supabase-kong")'
auth: 'starts_with(string!(.appname), "supabase-auth")'
rest: 'starts_with(string!(.appname), "supabase-rest")'
realtime: 'starts_with(string!(.appname), "realtime-dev")'
storage: 'starts_with(string!(.appname), "supabase-storage")'
functions: 'starts_with(string!(.appname), "supabase-functions")'
db: 'starts_with(string!(.appname), "supabase-db")'
kong: 'contains(string!(.appname), "supabase-kong")'
auth: 'contains(string!(.appname), "supabase-auth")'
rest: 'contains(string!(.appname), "supabase-rest")'
realtime: 'contains(string!(.appname), "supabase-realtime")'
storage: 'contains(string!(.appname), "supabase-storage")'
functions: 'contains(string!(.appname), "supabase-edge-functions")'
db: 'contains(string!(.appname), "supabase-db")'
# Ignores non nginx errors since they are related with kong booting up
kong_logs:
type: remap
@ -741,10 +929,13 @@ services:
.metadata.request.headers.referer = req.referer
.metadata.request.headers.user_agent = req.agent
.metadata.request.headers.cf_connecting_ip = req.client
.metadata.request.method = req.method
.metadata.request.path = req.path
.metadata.request.protocol = req.protocol
.metadata.response.status_code = req.status
url, split_err = split(req.request, " ")
if split_err == null {
.metadata.request.method = url[0]
.metadata.request.path = url[1]
.metadata.request.protocol = url[2]
}
}
if err != null {
abort
@ -793,14 +984,20 @@ services:
parsed, err = parse_regex(.event_message, r'^(?P<time>.*): (?P<msg>.*)$')
if err == null {
.event_message = parsed.msg
.timestamp = to_timestamp!(parsed.time)
.timestamp = parse_timestamp!(value: parsed.time, format: "%d/%b/%Y:%H:%M:%S %z")
.metadata.host = .project
}
# Filter out healthcheck logs from Realtime
realtime_logs_filtered:
type: filter
inputs:
- router.realtime
condition: '!contains(string!(.event_message), "/health")'
# Realtime logs are structured so we parse the severity level using regex (ignore time because it has no date)
realtime_logs:
type: remap
inputs:
- router.realtime
- realtime_logs_filtered
source: |-
.metadata.project = del(.project)
.metadata.external_id = .metadata.project
@ -825,6 +1022,13 @@ services:
.metadata.context[0].host = parsed.hostname
.metadata.context[0].pid = parsed.pid
}
# Function logs are unstructured messages on stderr
functions_logs:
type: remap
inputs:
- router.functions
source: |-
.metadata.project_ref = del(.project)
# Postgres logs some messages to stderr which we map to warning severity level
db_logs:
type: remap
@ -839,8 +1043,8 @@ services:
if err != null || parsed == null {
.metadata.parsed.error_severity = "info"
}
if parsed != null {
.metadata.parsed.error_severity = parsed.level
if parsed.level != null {
.metadata.parsed.error_severity = parsed.level
}
if .metadata.parsed.error_severity == "info" {
.metadata.parsed.error_severity = "log"
@ -856,8 +1060,11 @@ services:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://supabase-analytics:4000/api/logs?source_name=gotrue.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: '${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
uri: 'http://supabase-analytics:4000/api/logs?source_name=gotrue.logs.prod'
logflare_realtime:
type: 'http'
inputs:
@ -866,8 +1073,11 @@ services:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://supabase-analytics:4000/api/logs?source_name=realtime.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: '${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
uri: 'http://supabase-analytics:4000/api/logs?source_name=realtime.logs.prod'
logflare_rest:
type: 'http'
inputs:
@ -876,8 +1086,11 @@ services:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://supabase-analytics:4000/api/logs?source_name=postgREST.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: '${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
uri: 'http://supabase-analytics:4000/api/logs?source_name=postgREST.logs.prod'
logflare_db:
type: 'http'
inputs:
@ -886,21 +1099,24 @@ services:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
# We must route the sink through kong because ingesting logs before logflare is fully initialised will
# lead to broken queries from studio. This works by the assumption that containers are started in the
# following order: vector > db > logflare > kong
uri: 'http://supabase-kong:8000/analytics/v1/api/logs?source_name=postgres.logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: '${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
uri: 'http://supabase-analytics:4000/api/logs?source_name=postgres.logs'
logflare_functions:
type: 'http'
inputs:
- router.functions
- functions_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://supabase-analytics:4000/api/logs?source_name=deno-relay-logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: '${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
uri: 'http://supabase-analytics:4000/api/logs?source_name=deno-relay-logs'
logflare_storage:
type: 'http'
inputs:
@ -909,8 +1125,11 @@ services:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://supabase-analytics:4000/api/logs?source_name=storage.logs.prod.2&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: '${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
uri: 'http://supabase-analytics:4000/api/logs?source_name=storage.logs.prod.2'
logflare_kong:
type: 'http'
inputs:
@ -920,16 +1139,19 @@ services:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://supabase-analytics:4000/api/logs?source_name=cloudflare.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
retry_max_duration_secs: 30
retry_initial_backoff_secs: 1
headers:
x-api-key: '${LOGFLARE_PUBLIC_ACCESS_TOKEN?LOGFLARE_PUBLIC_ACCESS_TOKEN is required}'
uri: 'http://supabase-analytics:4000/api/logs?source_name=cloudflare.logs.prod'
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- LOGFLARE_API_KEY=${SERVICE_PASSWORD_LOGFLARE}
command: ["--config", "etc/vector/vector.yml"]
- LOGFLARE_PUBLIC_ACCESS_TOKEN=${SERVICE_PASSWORD_LOGFLARE}
command: ["--config", "/etc/vector/vector.yml"]
supabase-rest:
image: postgrest/postgrest:v12.2.12
image: postgrest/postgrest:v14.6
depends_on:
supabase-db:
# Disable this if you are using an external Postgres database
@ -939,6 +1161,8 @@ services:
environment:
- PGRST_DB_URI=postgres://authenticator:${SERVICE_PASSWORD_POSTGRES}@${POSTGRES_HOSTNAME:-supabase-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-postgres}
- 'PGRST_DB_SCHEMAS=${PGRST_DB_SCHEMAS:-public,storage,graphql_public}'
- PGRST_DB_MAX_ROWS=${PGRST_DB_MAX_ROWS:-1000}
- PGRST_DB_EXTRA_SEARCH_PATH=${PGRST_DB_EXTRA_SEARCH_PATH:-public}
- PGRST_DB_ANON_ROLE=anon
- PGRST_JWT_SECRET=${SERVICE_PASSWORD_JWT}
- PGRST_DB_USE_LEGACY_GUCS=false
@ -947,7 +1171,7 @@ services:
command: "postgrest"
exclude_from_hc: true
supabase-auth:
image: supabase/gotrue:v2.174.0
image: supabase/gotrue:v2.186.0
depends_on:
supabase-db:
# Disable this if you are using an external Postgres database
@ -1038,7 +1262,7 @@ services:
realtime-dev:
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
image: supabase/realtime:v2.34.47
image: supabase/realtime:v2.76.5
container_name: realtime-dev.supabase-realtime
depends_on:
supabase-db:
@ -1062,6 +1286,7 @@ services:
timeout: 5s
interval: 5s
retries: 3
start_period: 10s
environment:
- PORT=4000
- DB_HOST=${POSTGRES_HOSTNAME:-supabase-db}
@ -1072,11 +1297,9 @@ services:
- DB_AFTER_CONNECT_QUERY=SET search_path TO _realtime
- DB_ENC_KEY=supabaserealtime
- API_JWT_SECRET=${SERVICE_PASSWORD_JWT}
- FLY_ALLOC_ID=fly123
- FLY_APP_NAME=realtime
- SECRET_KEY_BASE=${SECRET_PASSWORD_REALTIME}
- METRICS_JWT_SECRET=${SERVICE_PASSWORD_JWT}
- ERL_AFLAGS=-proto_dist inet_tcp
- ENABLE_TAILSCALE=false
- DNS_NODES=''
- RLIMIT_NOFILE=10000
- APP_NAME=realtime
@ -1084,6 +1307,7 @@ services:
- LOG_LEVEL=error
- RUN_JANITOR=true
- JANITOR_INTERVAL=60000
- DISABLE_HEALTHCHECK_LOGGING=true
command: >
sh -c "/app/bin/migrate && /app/bin/realtime eval 'Realtime.Release.seeds(Realtime.Repo)' && /app/bin/server"
supabase-minio:
@ -1121,7 +1345,7 @@ services:
exit 0
supabase-storage:
image: supabase/storage-api:v1.14.6
image: supabase/storage-api:v1.44.2
depends_on:
supabase-db:
# Disable this if you are using an external Postgres database
@ -1160,7 +1384,7 @@ services:
- UPLOAD_FILE_SIZE_LIMIT=524288000
- UPLOAD_FILE_SIZE_LIMIT_STANDARD=524288000
- UPLOAD_SIGNED_URL_EXPIRATION_TIME=120
- TUS_URL_PATH=upload/resumable
- TUS_URL_PATH=/upload/resumable
- TUS_MAX_SIZE=3600000
- ENABLE_IMAGE_TRANSFORMATION=true
- IMGPROXY_URL=http://imgproxy:8080
@ -1168,46 +1392,32 @@ services:
- DATABASE_SEARCH_PATH=storage
- NODE_ENV=production
- REQUEST_ALLOW_X_FORWARDED_PATH=true
# - ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogImFub24iLAogICJpc3MiOiAic3VwYWJhc2UiLAogICJpYXQiOiAxNzA4OTg4NDAwLAogICJleHAiOiAxODY2ODQxMjAwCn0.jCDqsoXGT58JnAjf27KOowNQsokkk0aR7rdbGG18P-8
# - SERVICE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogInNlcnZpY2Vfcm9sZSIsCiAgImlzcyI6ICJzdXBhYmFzZSIsCiAgImlhdCI6IDE3MDg5ODg0MDAsCiAgImV4cCI6IDE4NjY4NDEyMDAKfQ.GA7yF2BmqTzqGkP_oqDdJAQVt0djjIxGYuhE0zFDJV4
# - POSTGREST_URL=http://supabase-rest:3000
# - PGRST_JWT_SECRET=${SERVICE_PASSWORD_JWT}
# - DATABASE_URL=postgres://supabase_storage_admin:${SERVICE_PASSWORD_POSTGRES}@${POSTGRES_HOSTNAME:-supabase-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-postgres}
# - FILE_SIZE_LIMIT=52428800
# - STORAGE_BACKEND=s3
# - STORAGE_S3_BUCKET=stub
# - STORAGE_S3_ENDPOINT=http://supabase-minio:9000
# - STORAGE_S3_PROTOCOL=http
# - STORAGE_S3_REGION=stub
# - STORAGE_S3_FORCE_PATH_STYLE=true
# - AWS_ACCESS_KEY_ID=${SERVICE_USER_MINIO}
# - AWS_SECRET_ACCESS_KEY=${SERVICE_PASSWORD_MINIO}
# - AWS_DEFAULT_REGION=stub
# - FILE_STORAGE_BACKEND_PATH=/var/lib/storage
# - TENANT_ID=stub
# # TODO: https://github.com/supabase/storage-api/issues/55
# - REGION=stub
# - ENABLE_IMAGE_TRANSFORMATION=true
# - IMGPROXY_URL=http://imgproxy:8080
- ANON_KEY=${SERVICE_SUPABASEANON_KEY}
- SERVICE_KEY=${SERVICE_SUPABASESERVICE_KEY}
- POSTGREST_URL=http://supabase-rest:3000
- PGRST_JWT_SECRET=${SERVICE_PASSWORD_JWT}
- STORAGE_PUBLIC_URL=${SERVICE_URL_SUPABASEKONG}
- TENANT_ID=${STORAGE_TENANT_ID:-storage-single-tenant}
volumes:
- ./volumes/storage:/var/lib/storage
imgproxy:
image: darthsim/imgproxy:v3.8.0
image: darthsim/imgproxy:v3.30.1
healthcheck:
test: ["CMD", "imgproxy", "health"]
timeout: 5s
interval: 5s
retries: 3
environment:
- IMGPROXY_BIND=:8080
- IMGPROXY_LOCAL_FILESYSTEM_ROOT=/
- IMGPROXY_USE_ETAG=true
- IMGPROXY_ENABLE_WEBP_DETECTION=${IMGPROXY_ENABLE_WEBP_DETECTION:-true}
- IMGPROXY_AUTO_WEBP=${IMGPROXY_AUTO_WEBP:-true}
- IMGPROXY_MAX_SRC_RESOLUTION=16.8
volumes:
- ./volumes/storage:/var/lib/storage
supabase-meta:
image: supabase/postgres-meta:v0.89.3
image: supabase/postgres-meta:v0.95.2
depends_on:
supabase-db:
# Disable this if you are using an external Postgres database
@ -1221,9 +1431,10 @@ services:
- PG_META_DB_NAME=${POSTGRES_DB:-postgres}
- PG_META_DB_USER=supabase_admin
- PG_META_DB_PASSWORD=${SERVICE_PASSWORD_POSTGRES}
- CRYPTO_KEY=${SERVICE_PASSWORD_PGMETACRYPTO}
supabase-edge-functions:
image: supabase/edge-runtime:v1.67.4
image: supabase/edge-runtime:v1.71.2
depends_on:
supabase-analytics:
condition: service_healthy
@ -1234,26 +1445,40 @@ services:
retries: 3
environment:
- JWT_SECRET=${SERVICE_PASSWORD_JWT}
- SUPABASE_URL=${SERVICE_URL_SUPABASEKONG}
- SUPABASE_URL=http://supabase-kong:8000
- SUPABASE_PUBLIC_URL=${SERVICE_URL_SUPABASEKONG}
- SUPABASE_ANON_KEY=${SERVICE_SUPABASEANON_KEY}
- SUPABASE_SERVICE_ROLE_KEY=${SERVICE_SUPABASESERVICE_KEY}
- SUPABASE_DB_URL=postgresql://postgres:${SERVICE_PASSWORD_POSTGRES}@${POSTGRES_HOSTNAME:-supabase-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-postgres}
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
# TODO: Allow configuring VERIFY_JWT per function.
- VERIFY_JWT=${FUNCTIONS_VERIFY_JWT:-false}
volumes:
- ./volumes/functions:/home/deno/functions
- deno-cache:/root/.cache/deno
- type: bind
source: ./volumes/functions/main/index.ts
target: /home/deno/functions/main/index.ts
content: |
import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts'
console.log('main function started')
const JWT_SECRET = Deno.env.get('JWT_SECRET')
const SUPABASE_URL = Deno.env.get('SUPABASE_URL')
const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true'
// Create JWKS for ES256/RS256 tokens (newer tokens)
let SUPABASE_JWT_KEYS: ReturnType<typeof jose.createRemoteJWKSet> | null = null
if (SUPABASE_URL) {
try {
SUPABASE_JWT_KEYS = jose.createRemoteJWKSet(
new URL('/auth/v1/.well-known/jwks.json', SUPABASE_URL)
)
} catch (e) {
console.error('Failed to fetch JWKS from SUPABASE_URL:', e)
}
}
function getAuthToken(req: Request) {
const authHeader = req.headers.get('authorization')
if (!authHeader) {
@ -1266,23 +1491,61 @@ services:
return token
}
async function verifyJWT(jwt: string): Promise<boolean> {
const encoder = new TextEncoder()
const secretKey = encoder.encode(JWT_SECRET)
try {
await jose.jwtVerify(jwt, secretKey)
} catch (err) {
console.error(err)
async function isValidLegacyJWT(jwt: string): Promise<boolean> {
if (!JWT_SECRET) {
console.error('JWT_SECRET not available for HS256 token verification')
return false
}
return true
const encoder = new TextEncoder();
const secretKey = encoder.encode(JWT_SECRET)
try {
await jose.jwtVerify(jwt, secretKey);
} catch (e) {
console.error('Symmetric Legacy JWT verification error', e);
return false;
}
return true;
}
serve(async (req: Request) => {
async function isValidJWT(jwt: string): Promise<boolean> {
if (!SUPABASE_JWT_KEYS) {
console.error('JWKS not available for ES256/RS256 token verification')
return false
}
try {
await jose.jwtVerify(jwt, SUPABASE_JWT_KEYS)
} catch (e) {
console.error('Asymmetric JWT verification error', e);
return false
}
return true;
}
async function isValidHybridJWT(jwt: string): Promise<boolean> {
const { alg: jwtAlgorithm } = jose.decodeProtectedHeader(jwt)
if (jwtAlgorithm === 'HS256') {
console.log(`Legacy token type detected, attempting ${jwtAlgorithm} verification.`)
return await isValidLegacyJWT(jwt)
}
if (jwtAlgorithm === 'ES256' || jwtAlgorithm === 'RS256') {
return await isValidJWT(jwt)
}
return false;
}
Deno.serve(async (req: Request) => {
if (req.method !== 'OPTIONS' && VERIFY_JWT) {
try {
const token = getAuthToken(req)
const isValidJWT = await verifyJWT(token)
const isValidJWT = await isValidHybridJWT(token);
if (!isValidJWT) {
return new Response(JSON.stringify({ msg: 'Invalid JWT' }), {
@ -1348,9 +1611,7 @@ services:
// https://deno.land/manual/getting_started/setup_your_environment
// This enables autocomplete, go to definition, etc.
import { serve } from "https://deno.land/std@0.177.1/http/server.ts"
serve(async () => {
Deno.serve(async () => {
return new Response(
`"Hello from Edge Functions!"`,
{ headers: { "Content-Type": "application/json" } },
@ -1367,7 +1628,7 @@ services:
- /home/deno/functions/main
supabase-supavisor:
image: 'supabase/supavisor:2.5.1'
image: 'supabase/supavisor:2.7.4'
healthcheck:
test:
- CMD
@ -1379,13 +1640,14 @@ services:
timeout: 5s
interval: 5s
retries: 10
start_period: 30s
depends_on:
supabase-db:
condition: service_healthy
supabase-analytics:
condition: service_healthy
environment:
- POOLER_TENANT_ID=dev_tenant
- POOLER_TENANT_ID=${POOLER_TENANT_ID:-dev_tenant}
- POOLER_POOL_MODE=transaction
- POOLER_DEFAULT_POOL_SIZE=${POOLER_DEFAULT_POOL_SIZE:-20}
- POOLER_MAX_CLIENT_CONN=${POOLER_MAX_CLIENT_CONN:-100}
@ -1402,10 +1664,20 @@ services:
- 'METRICS_JWT_SECRET=${SERVICE_PASSWORD_JWT}'
- REGION=local
- 'ERL_AFLAGS=-proto_dist inet_tcp'
- 'DB_POOL_SIZE=${POOLER_DB_POOL_SIZE:-5}'
# TLS for downstream connections (fixes Supabase CLI TLS requirement)
- GLOBAL_DOWNSTREAM_CERT_PATH=/etc/ssl/server.crt
- GLOBAL_DOWNSTREAM_KEY_PATH=/etc/ssl/server.key
command:
- /bin/sh
- "-c"
- '/app/bin/migrate && /app/bin/supavisor eval "$$(cat /etc/pooler/pooler.exs)" && /app/bin/server'
- |
if [ ! -f /etc/ssl/server.crt ]; then
openssl req -x509 -nodes -days 3650 -newkey rsa:2048 \
-keyout /etc/ssl/server.key -out /etc/ssl/server.crt \
-subj "/CN=supabase-pooler"
fi
/app/bin/migrate && /app/bin/supavisor eval "$$(cat /etc/pooler/pooler.exs)" && /app/bin/server
volumes:
- type: bind
source: ./volumes/pooler/pooler.exs