diff --git a/app/Http/Controllers/Api/ApplicationsController.php b/app/Http/Controllers/Api/ApplicationsController.php
index 799a622db..1e045ff5a 100644
--- a/app/Http/Controllers/Api/ApplicationsController.php
+++ b/app/Http/Controllers/Api/ApplicationsController.php
@@ -20,6 +20,7 @@
use App\Services\DockerImageParser;
use Illuminate\Http\Request;
use Illuminate\Support\Facades\Validator;
+use Illuminate\Support\Facades\Http;
use Illuminate\Validation\Rule;
use OpenApi\Attributes as OA;
use Spatie\Url\Url;
@@ -1344,24 +1345,28 @@ private function create_application(Request $request, $type)
return response()->json(['message' => 'Failed to generate Github App token.'], 400);
}
- $repositories = collect();
- $page = 1;
- $repositories = loadRepositoryByPage($githubApp, $token, $page);
- if ($repositories['total_count'] > 0) {
- while (count($repositories['repositories']) < $repositories['total_count']) {
- $page++;
- $repositories = loadRepositoryByPage($githubApp, $token, $page);
- }
- }
-
$gitRepository = $request->git_repository;
if (str($gitRepository)->startsWith('http') || str($gitRepository)->contains('github.com')) {
$gitRepository = str($gitRepository)->replace('https://', '')->replace('http://', '')->replace('github.com/', '');
}
- $gitRepositoryFound = collect($repositories['repositories'])->firstWhere('full_name', $gitRepository);
- if (! $gitRepositoryFound) {
- return response()->json(['message' => 'Repository not found.'], 404);
+ $gitRepository = str($gitRepository)->trim('/')->replaceEnd('.git', '')->toString();
+
+ // Use direct API call to verify repository access instead of loading all repositories
+ // This is much faster and avoids timeouts for GitHub Apps with many repositories
+ $response = Http::GitHub($githubApp->api_url, $token)
+ ->timeout(20)
+ ->retry(3, 200, throw: false)
+ ->get("/repos/{$gitRepository}");
+
+ if ($response->status() === 404 || $response->status() === 403) {
+ return response()->json(['message' => 'Repository not found or not accessible by the GitHub App.'], 404);
}
+
+ if (! $response->successful()) {
+ return response()->json(['message' => 'Failed to verify repository access: '.($response->json()['message'] ?? 'Unknown error')], 400);
+ }
+
+ $gitRepositoryFound = $response->json();
$repository_project_id = data_get($gitRepositoryFound, 'id');
$application = new Application;
diff --git a/public/svgs/alexandrie.svg b/public/svgs/alexandrie.svg
new file mode 100644
index 000000000..404fc5e2b
--- /dev/null
+++ b/public/svgs/alexandrie.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/public/svgs/langflow.svg b/public/svgs/langflow.svg
new file mode 100644
index 000000000..08bd5557d
--- /dev/null
+++ b/public/svgs/langflow.svg
@@ -0,0 +1,5 @@
+
diff --git a/svgs/goatcounter.svg b/svgs/goatcounter.svg
new file mode 100644
index 000000000..9477e0354
--- /dev/null
+++ b/svgs/goatcounter.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/templates/compose/alexandrie.yaml b/templates/compose/alexandrie.yaml
new file mode 100644
index 000000000..9d7d59227
--- /dev/null
+++ b/templates/compose/alexandrie.yaml
@@ -0,0 +1,93 @@
+# documentation: https://github.com/Smaug6739/Alexandrie/tree/main/docs
+# slogan: A powerful Markdown workspace designed for speed, clarity, and creativity.
+# category: productivity
+# tags: note-taking, markdown, knowledge-management, personal-wiki, productivity
+# logo: svgs/alexandrie.svg
+# port: 8200
+
+services:
+ frontend:
+ image: ghcr.io/smaug6739/alexandrie-frontend:v8.4.1
+ environment:
+ - SERVICE_URL_FRONTEND_8200
+ - PORT=8200
+ - NUXT_PUBLIC_CONFIG_DISABLE_SIGNUP_PAGE=${CONFIG_DISABLE_SIGNUP:-false}
+ - NUXT_PUBLIC_CONFIG_DISABLE_LANDING_PAGE=${CONFIG_DISABLE_LANDING:-false}
+ - NUXT_PUBLIC_BASE_API=${SERVICE_URL_BACKEND}
+ - NUXT_PUBLIC_BASE_CDN=${SERVICE_URL_RUSTFS}
+ - NUXT_PUBLIC_CDN_ENDPOINT=${CDN_ENDPOINT:-/alexandrie/}
+ - NUXT_PUBLIC_BASE_URL=${SERVICE_URL_FRONTEND}
+ depends_on:
+ - backend
+
+ backend:
+ image: ghcr.io/smaug6739/alexandrie-backend:v8.4.1
+ environment:
+ - SERVICE_URL_BACKEND_8201
+ - BACKEND_PORT=8201
+ - GIN_MODE=release
+ - JWT_SECRET=${SERVICE_PASSWORD_JWT}
+ - COOKIE_DOMAIN=${SERVICE_URL_FRONTEND}
+ - FRONTEND_URL=${SERVICE_URL_FRONTEND}
+ - ALLOW_UNSECURE=${ALLOW_UNSECURE:-false}
+ - DATABASE_HOST=mysql
+ - DATABASE_PORT=3306
+ - DATABASE_NAME=${MYSQL_DATABASE:-alexandrie-db}
+ - DATABASE_USER=${SERVICE_USER_MYSQL}
+ - DATABASE_PASSWORD=${SERVICE_PASSWORD_MYSQL}
+ - MINIO_ENDPOINT=rustfs:9000
+ - MINIO_PUBLIC_URL=${SERVICE_URL_RUSTFS}
+ - MINIO_SECURE=${MINIO_SECURE:-false}
+ - MINIO_ACCESSKEY=${SERVICE_USER_RUSTFS}
+ - MINIO_SECRETKEY=${SERVICE_PASSWORD_RUSTFS}
+ - MINIO_BUCKET=${MINIO_BUCKET:-alexandrie}
+ - SMTP_HOST=${SMTP_HOST:-}
+ - SMTP_MAIL=${SMTP_MAIL:-}
+ - SMTP_PASSWORD=${SMTP_PASSWORD:-}
+ depends_on:
+ mysql:
+ condition: service_healthy
+ rustfs:
+ condition: service_healthy
+
+ mysql:
+ image: mysql:8.0
+ environment:
+ - MYSQL_ROOT_PASSWORD=${SERVICE_PASSWORD_MYSQLROOT}
+ - MYSQL_USER=${SERVICE_USER_MYSQL}
+ - MYSQL_PASSWORD=${SERVICE_PASSWORD_MYSQL}
+ - MYSQL_DATABASE=${MYSQL_DATABASE:-alexandrie-db}
+ volumes:
+ - mysql-data:/var/lib/mysql
+ healthcheck:
+ test:
+ - CMD
+ - mysqladmin
+ - ping
+ - "-h"
+ - localhost
+ - "-u"
+ - root
+ - "-p${SERVICE_PASSWORD_MYSQLROOT}"
+ timeout: 5s
+ interval: 10s
+ retries: 5
+
+ rustfs:
+ image: rustfs/rustfs:1.0.0-alpha.81
+ environment:
+ - SERVICE_URL_RUSTFS_9000
+ - RUSTFS_ACCESS_KEY=${SERVICE_USER_RUSTFS}
+ - RUSTFS_SECRET_KEY=${SERVICE_PASSWORD_RUSTFS}
+ - RUSTFS_CONSOLE_ENABLE=${RUSTFS_CONSOLE_ENABLE:-false}
+ - RUSTFS_LOG_LEVEL=${RUSTFS_LOG_LEVEL:-info}
+ volumes:
+ - rustfs-data:/data
+ - rustfs-logs:/logs
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - "nc -z localhost 9000 || exit 1"
+ interval: 10s
+ timeout: 5s
+ retries: 5
diff --git a/templates/compose/elasticsearch-with-kibana.yaml b/templates/compose/elasticsearch-with-kibana.yaml
index 6cc08d889..2893f9875 100644
--- a/templates/compose/elasticsearch-with-kibana.yaml
+++ b/templates/compose/elasticsearch-with-kibana.yaml
@@ -7,82 +7,82 @@
services:
elasticsearch:
image: 'elastic/elasticsearch:9.1.2'
- container_name: elasticsearch
- restart: unless-stopped
environment:
- - ELASTIC_PASSWORD=${SERVICE_PASSWORD_ELASTICSEARCH}
- - 'ES_JAVA_OPTS=-Xms512m -Xmx512m'
- - discovery.type=single-node
- - bootstrap.memory_lock=true
- - xpack.security.enabled=true
- - xpack.security.http.ssl.enabled=false
- - xpack.security.transport.ssl.enabled=false
+ - ELASTIC_USER=elastic # Default built-in superuser (can't be changed); included here to avoid confusion about the username
+ - 'ELASTIC_PASSWORD=${SERVICE_PASSWORD_ELASTICSEARCH}'
+ - 'ES_JAVA_OPTS=-Xms512m -Xmx512m' # Limit JVM heap size to 512MB to prevent Elasticsearch from consuming all system memory
+ - discovery.type=single-node # Disable clustering; run as a standalone node (sufficient for most local or single-host setups)
+ - bootstrap.memory_lock=true # Prevent memory swapping by locking JVM memory (helps with performance/stability)
+ - xpack.security.http.ssl.enabled=false # SSL is unnecessary for HTTP traffic within the isolated Docker network
volumes:
- - '/etc/localtime:/etc/localtime:ro'
+ - '/etc/localtime:/etc/localtime:ro' # Sync container timezone with host
- 'elasticsearch-data:/usr/share/elasticsearch/data'
healthcheck:
test:
- CMD-SHELL
- - 'curl --user elastic:${SERVICE_PASSWORD_ELASTICSEARCH} --silent --fail http://localhost:9200/_cluster/health || exit 1'
+ - 'curl --user elastic:${SERVICE_PASSWORD_ELASTICSEARCH} --silent --fail http://localhost:9200/_cluster/health'
interval: 10s
timeout: 10s
retries: 24
-
kibana:
image: 'kibana:9.1.2'
- container_name: kibana
- restart: unless-stopped
environment:
- SERVICE_URL_KIBANA_5601
- - 'SERVER_NAME=${SERVICE_URL_KIBANA}'
- - 'SERVER_PUBLICBASEURL=${SERVICE_URL_KIBANA}'
- - 'ELASTICSEARCH_HOSTS=http://elasticsearch:9200'
- - 'ELASTICSEARCH_USERNAME=kibana_system'
- - 'ELASTICSEARCH_PASSWORD=${SERVICE_PASSWORD_KIBANA}'
- - 'XPACK_SECURITY_ENCRYPTIONKEY=${SERVICE_PASSWORD_XPACKSECURITY}'
- - 'XPACK_REPORTING_ENCRYPTIONKEY=${SERVICE_PASSWORD_XPACKREPORTING}'
- - 'XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=${SERVICE_PASSWORD_XPACKENCRYPTEDSAVEDOBJECTS}'
- - 'TELEMETRY_OPTIN=${TELEMETRY_OPTIN:-false}'
+ - 'KIBANA_PASSWORD=${SERVICE_PASSWORD_KIBANA}'
+ - 'ELASTICSEARCH_SERVICEACCOUNTTOKEN=${ELASTICSEARCH_SERVICEACCOUNTTOKEN}' # Kibana authenticates to Elasticsearch using this service token
+ - 'SERVER_NAME=${SERVICE_FQDN_KIBANA}' # For generating links and setting cookie domains
+ - 'SERVER_PUBLICBASEURL=${SERVICE_URL_KIBANA}' # Public URL used in generated links (reporting, alerting, etc.)
+ - 'ELASTICSEARCH_HOSTS=http://elasticsearch:9200' # Connect Kibana to Elasticsearch Service
+ - XPACK.SECURITY.ENABLED=true # Enable authentication and authorization (required for service tokens, roles, etc.)
+ - 'XPACK_SECURITY_ENCRYPTIONKEY=${SERVICE_PASSWORD_XPACKSECURITY}' # Required for encrypted session & auth tokens
+ - 'XPACK_REPORTING_ENCRYPTIONKEY=${SERVICE_PASSWORD_XPACKREPORTING}' # Required for reporting (PDFs, PNGs)
+ - 'XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=${SERVICE_PASSWORD_XPACKENCRYPTEDSAVEDOBJECTS}' # Required for encrypting saved objects like alerts
+ - 'TELEMETRY_OPTIN=${TELEMETRY_OPTIN:-false}' # Disable telemetry by default (opt-in only)
volumes:
- - '/etc/localtime:/etc/localtime:ro'
+ - '/etc/localtime:/etc/localtime:ro' # Sync container timezone with host
- 'kibana-data:/usr/share/kibana/data'
depends_on:
- setup:
- condition: service_completed_successfully
+ elasticsearch:
+ condition: service_healthy
healthcheck:
test:
- CMD-SHELL
- - "curl -s http://localhost:5601/api/status | grep -q '\"level\":\"available\"' || exit 1"
+ - "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'" # Expect HTTP 302 (redirect) from Kibana login page
interval: 10s
timeout: 10s
retries: 120
-
- setup:
- image: 'elastic/elasticsearch:9.1.2'
- container_name: kibana-setup
+ kibana-token-generator:
+ image: 'alpine:latest'
depends_on:
elasticsearch:
condition: service_healthy
exclude_from_hc: true
environment:
- - 'ELASTIC_PASSWORD=${SERVICE_PASSWORD_ELASTICSEARCH}'
- - 'KIBANA_PASSWORD=${SERVICE_PASSWORD_KIBANA}'
+ - 'ELASTIC_PASSWORD=${SERVICE_PASSWORD_ELASTICSEARCH}' # Needed to authenticate the ELASTICSEARCH_SERVICEACCOUNTTOKEN creation request
entrypoint:
- sh
- '-c'
- |
- echo "Setting up Kibana user password..."
-
- until curl -s -u "elastic:${ELASTIC_PASSWORD}" http://elasticsearch:9200/_cluster/health | grep -q '"status":"green\|yellow"'; do
- echo "Waiting for Elasticsearch..."
- sleep 2
- done
-
- echo "Setting password for kibana_system user..."
- curl -s -X POST -u "elastic:${ELASTIC_PASSWORD}" \
- -H "Content-Type: application/json" \
- http://elasticsearch:9200/_security/user/kibana_system/_password \
- -d "{\"password\":\"${KIBANA_PASSWORD}\"}" || exit 1
-
- echo "Kibana setup completed successfully"
- restart: 'no'
+ apk add --no-cache curl jq >/dev/null 2>&1
+ echo "Generating Kibana service token..."
+ RESPONSE=$(curl -s -w "\n%{http_code}" -u elastic:"$${ELASTIC_PASSWORD}" -X POST "http://elasticsearch:9200/_security/service/elastic/kibana/credential/token/kibana-service-token")
+ HTTP_CODE=$$(echo "$${RESPONSE}" | tail -n1)
+ BODY=$$(echo "$${RESPONSE}" | head -n -1)
+ if [ "$${HTTP_CODE}" = "200" ]; then
+ CREATED=$$(echo "$${BODY}" | jq -r '.created')
+ if [ "$${CREATED}" = "true" ]; then
+ TOKEN_VALUE=$$(echo "$${BODY}" | jq -r '.token.value')
+ echo "Token created successfully:"
+ echo "$${TOKEN_VALUE}"
+ else
+ echo "Unexpected response, token not created:"
+ echo "$${BODY}"
+ fi
+ elif [ "$${HTTP_CODE}" = "409" ]; then
+ echo "Token already exists. Skipping token creation."
+ else
+ echo "Failed to create token. HTTP code: $${HTTP_CODE}"
+ echo "$${BODY}"
+ exit 1
+ fi
+ restart: 'no' # Run once to generate token, then exit
diff --git a/templates/compose/formbricks.yaml b/templates/compose/formbricks.yaml
index a54455089..17d462486 100644
--- a/templates/compose/formbricks.yaml
+++ b/templates/compose/formbricks.yaml
@@ -1,13 +1,13 @@
-# documentation: https://formbricks.com/docs/self-hosting/configuration
+# documentation: https://formbricks.com/docs/self-hosting/setup/docker
# slogan: Open Source Survey Platform
# category: analytics
-# tags: form, builder, forms, survey, open source, experience, management, self-hosted, docker
+# tags: form, builder, forms, survey
# logo: svgs/formbricks.png
# port: 3000
services:
formbricks:
- image: ghcr.io/formbricks/formbricks:latest
+ image: ghcr.io/formbricks/formbricks:4.5.0 # Released on Jan 5 2026
environment:
- SERVICE_URL_FORMBRICKS_3000
- WEBAPP_URL=$SERVICE_URL_FORMBRICKS
@@ -57,15 +57,25 @@ services:
- IMPRINT_URL=${IMPRINT_URL}
- RATE_LIMITING_DISABLED=${RATE_LIMITING_DISABLED:-0}
- OPENTELEMETRY_LISTENER_URL=${OPENTELEMETRY_LISTENER_URL}
- - REDIS_URL=${REDIS_URL}
- - REDIS_HTTP_URL=${REDIS_HTTP_URL}
+ - 'REDIS_URL=redis://valkey:6379'
- DEFAULT_ORGANIZATION_ID=${DEFAULT_ORGANIZATION_ID}
- DEFAULT_ORGANIZATION_ROLE=${DEFAULT_ORGANIZATION_ROLE:-owner}
+ - S3_ACCESS_KEY=$SERVICE_USER_MINIO
+ - S3_SECRET_KEY=${SERVICE_PASSWORD_MINIO}
+ - S3_REGION=us-east-1
+ - S3_BUCKET_NAME=formbricks
+ - S3_ENDPOINT_URL=$MINIO_SERVER_URL # This has to be publically accessible by frombricks frontend, using http://minio:9000 doesn't work!!
+ - 'S3_FORCE_PATH_STYLE=1'
volumes:
- formbricks-uploads:/apps/web/uploads/
depends_on:
postgresql:
condition: service_healthy
+ valkey:
+ condition: service_healthy
+ minio:
+ condition: service_healthy
+
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:3000"]
interval: 2s
@@ -85,3 +95,47 @@ services:
interval: 5s
timeout: 20s
retries: 10
+
+ valkey:
+ image: valkey/valkey:8-alpine
+ command: valkey-server --appendonly yes
+ volumes:
+ - formbricks-valkey:/data
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - 'valkey-cli ping | grep PONG'
+ interval: 5s
+ timeout: 5s
+ retries: 5
+ start_period: 3s
+
+ minio:
+ image: ghcr.io/coollabsio/minio:RELEASE.2025-10-15T17-29-55Z # Released on 15 October 2025
+ command: server /data --console-address ":9001"
+ environment:
+ - MINIO_SERVER_URL=$MINIO_SERVER_URL
+ - MINIO_BROWSER_REDIRECT_URL=$MINIO_BROWSER_REDIRECT_URL
+ - MINIO_ROOT_USER=$SERVICE_USER_MINIO
+ - MINIO_ROOT_PASSWORD=$SERVICE_PASSWORD_MINIO
+ volumes:
+ - formbricks-minio-data:/data
+ healthcheck:
+ test: ["CMD", "mc", "ready", "local"]
+ interval: 5s
+ timeout: 20s
+ retries: 10
+
+ minio-init:
+ image: minio/mc:latest
+ depends_on:
+ minio:
+ condition: service_healthy
+ entrypoint: >
+ sh -c "
+ mc alias set local http://minio:9000 ${SERVICE_USER_MINIO} ${SERVICE_PASSWORD_MINIO} &&
+ mc mb -p local/formbricks || true &&
+ mc anonymous set private local/formbricks
+ "
+ restart: "no"
+ exclude_from_hc: true
diff --git a/templates/compose/goatcounter.yaml b/templates/compose/goatcounter.yaml
new file mode 100644
index 000000000..3ff1ccb5b
--- /dev/null
+++ b/templates/compose/goatcounter.yaml
@@ -0,0 +1,19 @@
+# documentation: https://www.goatcounter.com/help
+# slogan: Lightweight web analytics platform.
+# category: analytics
+# tags: analytics, insights, privacy
+# logo: svgs/goatcounter.svg
+# port: 8080
+
+services:
+ goatcounter:
+ image: arp242/goatcounter:2.7
+ environment:
+ - SERVICE_URL_GOATCOUNTER_8080
+ volumes:
+ - goatcounter-data:/home/goatcounter/goatcounter-data
+ healthcheck:
+ test: ["CMD-SHELL", "wget -qO- http://127.0.0.1:8080/status >/dev/null 2>&1 || exit 1"]
+ interval: 10s
+ timeout: 5s
+ retries: 10
diff --git a/templates/compose/kimai.yaml b/templates/compose/kimai.yaml
index e121c4eb1..c9795f0d2 100644
--- a/templates/compose/kimai.yaml
+++ b/templates/compose/kimai.yaml
@@ -37,7 +37,7 @@ services:
- ADMINMAIL=${ADMINMAIL:-admin@kimai.local}
- ADMINPASS=${SERVICE_PASSWORD_ADMINPASS}
- DATABASE_URL=mysql://${SERVICE_USER_MYSQL}:${SERVICE_PASSWORD_MYSQL}@mysql/${MYSQL_DATABASE}?charset=utf8mb4&serverVersion=8.3.0
- - TRUSTED_HOSTS=localhost
+ - 'TRUSTED_HOSTS=${TRUSTED_HOSTS}|localhost|127.0.0.1'
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:8001"]
interval: 2s
diff --git a/templates/compose/langflow.yaml b/templates/compose/langflow.yaml
new file mode 100644
index 000000000..0d2f03a63
--- /dev/null
+++ b/templates/compose/langflow.yaml
@@ -0,0 +1,43 @@
+# documentation: https://docs.langflow.org
+# slogan: Langflow is an open-source, Python-based, customizable framework for building AI applications.
+# category: ai
+# tags: langflow, ai, openai, gpt, llm, workflow, automation, open source, low code
+# logo: svgs/langflow.svg
+# port: 7860
+
+services:
+ langflow:
+ image: langflowai/langflow:1.7.2
+ environment:
+ - SERVICE_URL_LANGFLOW_7860
+ - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN:-false}
+ - LANGFLOW_SUPERUSER=$SERVICE_USER_LANGFLOW
+ - LANGFLOW_SUPERUSER_PASSWORD=$SERVICE_PASSWORD_LANGFLOW
+ - LANGFLOW_DATABASE_URL=postgresql://$SERVICE_USER_POSTGRES:$SERVICE_PASSWORD_POSTGRES@postgres:5432/${POSTGRES_DB:-langflow-db}
+ - LANGFLOW_CONFIG_DIR=app/langflow
+ - LANGFLOW_HOST=0.0.0.0
+ - PORT=7860
+ healthcheck:
+ test: ["CMD-SHELL", "curl -f http://127.0.0.1:7860/health"]
+ interval: 5s
+ timeout: 20s
+ retries: 10
+ depends_on:
+ postgres:
+ condition: service_healthy
+ volumes:
+ - langflow-data:/app/langflow
+
+ postgres:
+ image: postgres:18-alpine
+ environment:
+ - POSTGRES_USER=$SERVICE_USER_POSTGRES
+ - POSTGRES_PASSWORD=$SERVICE_PASSWORD_POSTGRES
+ - POSTGRES_DB=${POSTGRES_DB:-langflow-db}
+ volumes:
+ - langflow-postgres:/var/lib/postgres/data
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"]
+ interval: 5s
+ timeout: 20s
+ retries: 10
diff --git a/templates/compose/listmonk.yaml b/templates/compose/listmonk.yaml
index a25bbdad5..fa73f6ff7 100644
--- a/templates/compose/listmonk.yaml
+++ b/templates/compose/listmonk.yaml
@@ -7,7 +7,7 @@
services:
listmonk:
- image: listmonk/listmonk:latest
+ image: listmonk/listmonk:v6.0.0
environment:
- SERVICE_URL_LISTMONK_9000
- LISTMONK_app__address=0.0.0.0:9000
@@ -18,17 +18,18 @@ services:
- LISTMONK_db__port=5432
- TZ=Etc/UTC
volumes:
- - "listmonk-data:/listmonk/uploads"
- depends_on:
- postgres:
- condition: service_healthy
+ - listmonk-data:/listmonk/uploads
healthcheck:
test: ["CMD", "wget", "-q", "--spider", "http://127.0.0.1:9000"]
interval: 5s
timeout: 20s
retries: 10
+ depends_on:
+ postgres:
+ condition: service_healthy
+
listmonk-initial-database-setup:
- image: listmonk/listmonk:latest
+ image: listmonk/listmonk:v6.0.0
command: "./listmonk --install --yes --idempotent"
restart: "no"
depends_on:
@@ -40,14 +41,15 @@ services:
- LISTMONK_db__user=$SERVICE_USER_POSTGRES
- LISTMONK_db__password=$SERVICE_PASSWORD_POSTGRES
- LISTMONK_db__port=5432
+
postgres:
- image: "postgres:latest"
+ image: postgres:18-alpine
environment:
- POSTGRES_DB=listmonk
- POSTGRES_PASSWORD=$SERVICE_PASSWORD_POSTGRES
- POSTGRES_USER=$SERVICE_USER_POSTGRES
volumes:
- - "pg-data:/var/lib/postgresql/data"
+ - postgres-data:/var/lib/postgresql
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"]
interval: 5s
diff --git a/templates/compose/n8n-with-postgres-and-worker.yaml b/templates/compose/n8n-with-postgres-and-worker.yaml
index 507d0f9dc..e03b38960 100644
--- a/templates/compose/n8n-with-postgres-and-worker.yaml
+++ b/templates/compose/n8n-with-postgres-and-worker.yaml
@@ -124,7 +124,7 @@ services:
task-runners:
image: n8nio/runners:2.1.5
environment:
- - N8N_RUNNERS_TASK_BROKER_URI=${N8N_RUNNERS_TASK_BROKER_URI:-http://n8n:5679}
+ - N8N_RUNNERS_TASK_BROKER_URI=${N8N_RUNNERS_TASK_BROKER_URI:-http://n8n-worker:5679}
- N8N_RUNNERS_AUTH_TOKEN=$SERVICE_PASSWORD_N8N
- N8N_RUNNERS_AUTO_SHUTDOWN_TIMEOUT=${N8N_RUNNERS_AUTO_SHUTDOWN_TIMEOUT:-15}
- N8N_RUNNERS_MAX_CONCURRENCY=${N8N_RUNNERS_MAX_CONCURRENCY:-5}
diff --git a/templates/compose/openpanel.yaml b/templates/compose/openpanel.yaml
index fd7c9bd64..0d37886a8 100644
--- a/templates/compose/openpanel.yaml
+++ b/templates/compose/openpanel.yaml
@@ -7,7 +7,7 @@
services:
openpanel-dashboard:
- image: lindesvard/openpanel-dashboard:2.0.0
+ image: lindesvard/openpanel-dashboard:2
environment:
- NODE_ENV=production
- SELF_HOSTED=true
@@ -37,7 +37,7 @@ services:
start_period: 15s
openpanel-api:
- image: lindesvard/openpanel-api:2.0.0
+ image: lindesvard/openpanel-api:2
command: >
sh -c "
echo 'Running migrations...'
@@ -74,7 +74,7 @@ services:
retries: 5
openpanel-worker:
- image: lindesvard/openpanel-worker:2.0.0
+ image: lindesvard/openpanel-worker:2
environment:
- DISABLE_BULLBOARD=${DISABLE_BULLBOARD:-1}
- NODE_ENV=production
diff --git a/templates/compose/reactive-resume.yaml b/templates/compose/reactive-resume.yaml
index 0b53b0d7c..a5c5332ec 100644
--- a/templates/compose/reactive-resume.yaml
+++ b/templates/compose/reactive-resume.yaml
@@ -7,7 +7,7 @@
services:
reactive-resume:
- image: amruthpillai/reactive-resume:latest
+ image: amruthpillai/reactive-resume:v4.3.7
environment:
- SERVICE_URL_REACTIVERESUME_3000
- PUBLIC_URL=$SERVICE_URL_REACTIVERESUME
@@ -31,6 +31,16 @@ services:
- postgres
- minio
- chrome
+ healthcheck:
+ test:
+ - CMD
+ - node
+ - '-e'
+ - "require('http').get('http://127.0.0.1:3000', res => process.exit(res.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))"
+ interval: 10s
+ timeout: 3s
+ retries: 5
+ start_period: 10s
postgres:
image: postgres:16-alpine
environment:
@@ -69,7 +79,16 @@ services:
- TIMEOUT=10000
- CONCURRENT=10
- TOKEN=$SERVICE_PASSWORD_CHROMETOKEN
-
+ healthcheck:
+ test:
+ - CMD
+ - curl
+ - '-f'
+ - 'http://127.0.0.1:3000/'
+ interval: 10s
+ timeout: 3s
+ retries: 5
+
redis:
image: redis:7-alpine
command: redis-server
diff --git a/templates/compose/rocketchat.yaml b/templates/compose/rocketchat.yaml
index 1ffb02327..001ba85b4 100644
--- a/templates/compose/rocketchat.yaml
+++ b/templates/compose/rocketchat.yaml
@@ -7,44 +7,39 @@
services:
rocketchat:
- image: registry.rocket.chat/rocketchat/rocket.chat:latest
+ image: 'registry.rocket.chat/rocketchat/rocket.chat:8.0.1'
environment:
- SERVICE_URL_ROCKETCHAT_3000
- - MONGO_URL=mongodb://${MONGODB_ADVERTISED_HOSTNAME:-mongodb}:${MONGODB_INITIAL_PRIMARY_PORT_NUMBER:-27017}/${MONGODB_DATABASE:-rocketchat}?replicaSet=${MONGODB_REPLICA_SET_NAME:-rs0}
- - MONGO_OPLOG_URL=mongodb://${MONGODB_ADVERTISED_HOSTNAME:-mongodb}:${MONGODB_INITIAL_PRIMARY_PORT_NUMBER:-27017}/local?replicaSet=${MONGODB_REPLICA_SET_NAME:-rs0}
+ - 'MONGO_URL=mongodb://${MONGODB_ADVERTISED_HOSTNAME:-mongodb}:${MONGODB_INITIAL_PRIMARY_PORT_NUMBER:-27017}/${MONGODB_DATABASE:-rocketchat}?replicaSet=${MONGODB_REPLICA_SET_NAME:-rs0}'
+ - 'MONGO_OPLOG_URL=mongodb://${MONGODB_ADVERTISED_HOSTNAME:-mongodb}:${MONGODB_INITIAL_PRIMARY_PORT_NUMBER:-27017}/local?replicaSet=${MONGODB_REPLICA_SET_NAME:-rs0}'
- ROOT_URL=$SERVICE_URL_ROCKETCHAT
- DEPLOY_METHOD=docker
- REG_TOKEN=$REG_TOKEN
+ - 'MAIL_URL=${MAIL_URL:-test@example.com}'
depends_on:
mongodb:
condition: service_healthy
healthcheck:
test:
- [
- "CMD",
- "node",
- "--eval",
- "const http = require('http'); const options = { host: '0.0.0.0', port: 3000, timeout: 2000, path: '/health' }; const healthCheck = http.request(options, (res) => { console.log('HEALTHCHECK STATUS:', res.statusCode); if (res.statusCode == 200) { process.exit(0); } else { process.exit(1); } }); healthCheck.on('error', function (err) { console.error('ERROR'); process.exit(1); }); healthCheck.end();",
- ]
+ - CMD
+ - node
+ - '--eval'
+ - "const http = require('http'); const options = { host: '0.0.0.0', port: 3000, timeout: 2000, path: '/health' }; const healthCheck = http.request(options, (res) => { console.log('HEALTHCHECK STATUS:', res.statusCode); if (res.statusCode == 200) { process.exit(0); } else { process.exit(1); } }); healthCheck.on('error', function (err) { console.error('ERROR'); process.exit(1); }); healthCheck.end();"
interval: 2s
timeout: 10s
retries: 15
-
mongodb:
- image: docker.io/bitnamilegacy/mongodb:5.0
+ image: 'mongo:7'
volumes:
- - mongodb_data:/bitnami/mongodb
- environment:
- - MONGODB_REPLICA_SET_MODE=primary
- - MONGODB_REPLICA_SET_NAME=${MONGODB_REPLICA_SET_NAME:-rs0}
- - MONGODB_PORT_NUMBER=${MONGODB_PORT_NUMBER:-27017}
- - MONGODB_INITIAL_PRIMARY_HOST=${MONGODB_INITIAL_PRIMARY_HOST:-mongodb}
- - MONGODB_INITIAL_PRIMARY_PORT_NUMBER=${MONGODB_INITIAL_PRIMARY_PORT_NUMBER:-27017}
- - MONGODB_ADVERTISED_HOSTNAME=${MONGODB_ADVERTISED_HOSTNAME:-mongodb}
- - MONGODB_ENABLE_JOURNAL=${MONGODB_ENABLE_JOURNAL:-true}
- - ALLOW_EMPTY_PASSWORD=${ALLOW_EMPTY_PASSWORD:-yes}
+ - 'mongodb_data:/data/db'
+ command: "sh -c \"\n mongod --replSet ${MONGODB_REPLICA_SET_NAME:-rs0} --bind_ip_all &\n sleep 5 &&\n mongosh --eval 'rs.initiate({_id:\\\"${MONGODB_REPLICA_SET_NAME:-rs0}\\\", members:[{_id:0, host:\\\"mongodb:27017\\\"}]})' ||\n true &&\n wait\n\"\n"
healthcheck:
- test: echo 'db.stats().ok' | mongo localhost:27017/test --quiet
+ test:
+ - CMD
+ - mongosh
+ - '--quiet'
+ - '--eval'
+ - "db.adminCommand('ping')"
interval: 2s
timeout: 10s
retries: 15
diff --git a/templates/compose/supabase.yaml b/templates/compose/supabase.yaml
index 81a6f5fa3..fad059a08 100644
--- a/templates/compose/supabase.yaml
+++ b/templates/compose/supabase.yaml
@@ -29,6 +29,11 @@ services:
- SUPABASE_SERVICE_KEY=${SERVICE_SUPABASESERVICE_KEY}
- DASHBOARD_USERNAME=${SERVICE_USER_ADMIN}
- DASHBOARD_PASSWORD=${SERVICE_PASSWORD_ADMIN}
+ - 'KONG_STORAGE_CONNECT_TIMEOUT=${KONG_STORAGE_CONNECT_TIMEOUT:-60}'
+ - 'KONG_STORAGE_WRITE_TIMEOUT=${KONG_STORAGE_WRITE_TIMEOUT:-3600}'
+ - 'KONG_STORAGE_READ_TIMEOUT=${KONG_STORAGE_READ_TIMEOUT:-3600}'
+ - 'KONG_STORAGE_REQUEST_BUFFERING=${KONG_STORAGE_REQUEST_BUFFERING:-false}'
+ - 'KONG_STORAGE_RESPONSE_BUFFERING=${KONG_STORAGE_RESPONSE_BUFFERING:-false}'
volumes:
# https://github.com/supabase/supabase/issues/12661
- type: bind
@@ -215,12 +220,17 @@ services:
## Storage routes: the storage server manages its own auth
- name: storage-v1
_comment: 'Storage: /storage/v1/* -> http://supabase-storage:5000/*'
+ connect_timeout: $KONG_STORAGE_CONNECT_TIMEOUT
+ write_timeout: $KONG_STORAGE_WRITE_TIMEOUT
+ read_timeout: $KONG_STORAGE_READ_TIMEOUT
url: http://supabase-storage:5000/
routes:
- name: storage-v1-all
strip_path: true
paths:
- /storage/v1/
+ request_buffering: $KONG_STORAGE_REQUEST_BUFFERING
+ response_buffering: $KONG_STORAGE_RESPONSE_BUFFERING
plugins:
- name: cors