From fbbb4f46f377f316eaf58fabb80bca1d9a33401d Mon Sep 17 00:00:00 2001 From: Mauricio Siu <47042324+Siumauricio@users.noreply.github.com> Date: Sun, 9 Mar 2025 19:05:57 -0600 Subject: [PATCH 01/36] feat: Add numerous new blueprint templates for various applications --- blueprints/activepieces/docker-compose.yml | 64 + blueprints/activepieces/index.ts | 44 + blueprints/actualbudget/docker-compose.yml | 12 + blueprints/actualbudget/index.ts | 20 + blueprints/alist/docker-compose.yml | 14 + blueprints/alist/index.ts | 22 + blueprints/answer/docker-compose.yml | 30 + blueprints/answer/index.ts | 33 + blueprints/appsmith/docker-compose.yml | 6 + blueprints/appsmith/index.ts | 23 + blueprints/appwrite/docker-compose.yml | 887 ++++++++ blueprints/appwrite/index.ts | 153 ++ blueprints/aptabase/docker-compose.yml | 49 + blueprints/aptabase/index.ts | 27 + blueprints/baserow/docker-compose.yml | 10 + blueprints/baserow/index.ts | 24 + blueprints/blender/docker-compose.yml | 26 + blueprints/blender/index.ts | 34 + blueprints/browserless/docker-compose.yml | 16 + blueprints/browserless/index.ts | 28 + blueprints/budibase/docker-compose.yml | 192 ++ blueprints/budibase/index.ts | 45 + blueprints/calcom/docker-compose.yml | 25 + blueprints/calcom/index.ts | 32 + blueprints/chatwoot/docker-compose.yml | 74 + blueprints/chatwoot/index.ts | 46 + blueprints/checkmate/docker-compose.yml | 42 + blueprints/checkmate/index.ts | 25 + blueprints/cloudflared/docker-compose.yml | 18 + blueprints/cloudflared/index.ts | 9 + blueprints/coder/docker-compose.yml | 37 + blueprints/coder/index.ts | 30 + blueprints/conduit/docker-compose.yml | 31 + blueprints/conduit/index.ts | 28 + blueprints/conduwuit/docker-compose.yml | 48 + blueprints/conduwuit/index.ts | 30 + blueprints/convex/docker-compose.yml | 37 + blueprints/convex/index.ts | 38 + blueprints/couchdb/docker-compose.yml | 17 + blueprints/couchdb/index.ts | 28 + blueprints/datalens/docker-compose.yml | 96 + blueprints/datalens/index.ts | 23 + blueprints/directus/docker-compose.yml | 64 + blueprints/directus/index.ts | 31 + blueprints/discord-tickets/docker-compose.yml | 54 + blueprints/discord-tickets/index.ts | 45 + blueprints/discourse/docker-compose.yml | 90 + blueprints/discourse/index.ts | 37 + blueprints/docmost/docker-compose.yml | 44 + blueprints/docmost/index.ts | 29 + blueprints/documenso/docker-compose.yml | 42 + blueprints/documenso/index.ts | 36 + blueprints/doublezero/docker-compose.yml | 19 + blueprints/doublezero/index.ts | 36 + blueprints/drawio/docker-compose.yml | 59 + blueprints/drawio/index.ts | 31 + blueprints/elastic-search/docker-compose.yml | 34 + blueprints/elastic-search/index.ts | 28 + blueprints/erpnext/docker-compose.yml | 354 ++++ blueprints/erpnext/index.ts | 39 + blueprints/evolutionapi/docker-compose.yml | 58 + blueprints/evolutionapi/index.ts | 59 + blueprints/excalidraw/docker-compose.yml | 6 + blueprints/excalidraw/index.ts | 22 + blueprints/filebrowser/docker-compose.yml | 14 + blueprints/filebrowser/index.ts | 24 + blueprints/formbricks/docker-compose.yml | 37 + blueprints/formbricks/index.ts | 38 + blueprints/frappe-hr/docker-compose.yml | 354 ++++ blueprints/frappe-hr/index.ts | 39 + blueprints/ghost/docker-compose.yml | 28 + blueprints/ghost/index.ts | 24 + blueprints/gitea/docker-compose.yml | 35 + blueprints/gitea/index.ts | 24 + blueprints/glance/docker-compose.yml | 11 + blueprints/glance/index.ts | 108 + blueprints/glitchtip/docker-compose.yml | 55 + blueprints/glitchtip/index.ts | 30 + blueprints/glpi/docker-compose.yml | 26 + blueprints/glpi/index.ts | 20 + blueprints/gotenberg/docker-compose.yml | 18 + blueprints/gotenberg/index.ts | 29 + blueprints/grafana/docker-compose.yml | 9 + blueprints/grafana/index.ts | 19 + blueprints/heyform/docker-compose.yml | 48 + blueprints/heyform/index.ts | 32 + blueprints/hi-events/docker-compose.yml | 44 + blueprints/hi-events/index.ts | 41 + blueprints/hoarder/docker-compose.yml | 45 + blueprints/hoarder/index.ts | 34 + blueprints/homarr/docker-compose.yml | 11 + blueprints/homarr/index.ts | 27 + blueprints/huly/docker-compose.yml | 172 ++ blueprints/huly/index.ts | 152 ++ blueprints/immich/docker-compose.yml | 107 + blueprints/immich/index.ts | 46 + blueprints/infisical/docker-compose.yml | 83 + blueprints/infisical/index.ts | 93 + blueprints/influxdb/docker-compose.yml | 11 + blueprints/influxdb/index.ts | 19 + blueprints/invoiceshelf/docker-compose.yml | 57 + blueprints/invoiceshelf/index.ts | 34 + blueprints/it-tools/docker-compose.yml | 8 + blueprints/it-tools/index.ts | 20 + blueprints/jellyfin/docker-compose.yml | 19 + blueprints/jellyfin/index.ts | 25 + blueprints/kimai/docker-compose.yml | 49 + blueprints/kimai/index.ts | 37 + blueprints/langflow/docker-compose.yml | 31 + blueprints/langflow/index.ts | 28 + blueprints/linkwarden/docker-compose.yml | 40 + blueprints/linkwarden/index.ts | 33 + blueprints/listmonk/docker-compose.yml | 49 + blueprints/listmonk/index.ts | 52 + blueprints/lobe-chat/docker-compose.yml | 12 + blueprints/lobe-chat/index.ts | 22 + blueprints/logto/docker-compose.yml | 40 + blueprints/logto/index.ts | 37 + blueprints/macos/docker-compose.yml | 16 + blueprints/macos/index.ts | 33 + blueprints/mailpit/docker-compose.yml | 25 + blueprints/mailpit/index.ts | 31 + blueprints/maybe/docker-compose.yml | 36 + blueprints/maybe/index.ts | 43 + blueprints/meilisearch/docker-compose.yml | 14 + blueprints/meilisearch/index.ts | 26 + blueprints/metabase/docker-compose.yml | 25 + blueprints/metabase/index.ts | 22 + blueprints/minio/docker-compose.yml | 13 + blueprints/minio/index.ts | 28 + blueprints/n8n/docker-compose.yml | 18 + blueprints/n8n/index.ts | 28 + blueprints/nextcloud-aio/docker-compose.yml | 36 + blueprints/nextcloud-aio/index.ts | 28 + blueprints/nocodb/docker-compose.yml | 31 + blueprints/nocodb/index.ts | 28 + blueprints/odoo/docker-compose.yml | 28 + blueprints/odoo/index.ts | 22 + blueprints/onedev/docker-compose.yml | 12 + blueprints/onedev/index.ts | 22 + blueprints/ontime/docker-compose.yml | 14 + blueprints/ontime/index.ts | 25 + blueprints/open-webui/docker-compose.yml | 25 + blueprints/open-webui/index.ts | 24 + blueprints/outline/docker-compose.yml | 57 + blueprints/outline/index.ts | 90 + blueprints/penpot/docker-compose.yml | 207 ++ blueprints/penpot/index.ts | 25 + blueprints/peppermint/docker-compose.yml | 38 + blueprints/peppermint/index.ts | 40 + blueprints/photoprism/docker-compose.yml | 76 + blueprints/photoprism/index.ts | 30 + blueprints/phpmyadmin/docker-compose.yml | 27 + blueprints/phpmyadmin/index.ts | 32 + blueprints/pocket-id/docker-compose.yml | 21 + blueprints/pocket-id/index.ts | 29 + blueprints/portainer/docker-compose.yml | 29 + blueprints/portainer/index.ts | 19 + blueprints/postiz/docker-compose.yml | 65 + blueprints/postiz/index.ts | 37 + blueprints/registry/docker-compose.yml | 19 + blueprints/registry/index.ts | 35 + blueprints/rocketchat/docker-compose.yml | 34 + blueprints/rocketchat/index.ts | 25 + blueprints/roundcube/docker-compose.yml | 16 + blueprints/roundcube/index.ts | 24 + blueprints/ryot/docker-compose.yml | 37 + blueprints/ryot/index.ts | 34 + blueprints/shlink/docker-compose.yml | 29 + blueprints/shlink/index.ts | 35 + blueprints/slash/docker-compose.yml | 39 + blueprints/slash/index.ts | 33 + blueprints/soketi/docker-compose.yml | 11 + blueprints/soketi/index.ts | 28 + blueprints/spacedrive/docker-compose.yml | 9 + blueprints/spacedrive/index.ts | 28 + blueprints/stirling/docker-compose.yml | 22 + blueprints/stirling/index.ts | 22 + blueprints/supabase/docker-compose.yml | 448 ++++ blueprints/supabase/index.ts | 995 +++++++++ blueprints/superset/docker-compose.yml | 87 + blueprints/superset/index.ts | 77 + blueprints/teable/docker-compose.yml | 67 + blueprints/teable/index.ts | 54 + blueprints/tolgee/docker-compose.yml | 24 + blueprints/tolgee/index.ts | 41 + blueprints/triggerdotdev/docker-compose.yml | 107 + blueprints/triggerdotdev/index.ts | 92 + blueprints/trilium/docker-compose.yml | 14 + blueprints/trilium/index.ts | 22 + blueprints/twenty/docker-compose.yml | 99 + blueprints/twenty/index.ts | 37 + blueprints/typebot/docker-compose.yml | 48 + blueprints/typebot/index.ts | 44 + blueprints/unifi/docker-compose.yml | 46 + blueprints/unifi/index.ts | 27 + blueprints/unsend/docker-compose.yml | 73 + blueprints/unsend/index.ts | 43 + blueprints/uptime-kuma/docker-compose.yml | 10 + blueprints/uptime-kuma/index.ts | 22 + blueprints/utils/index.ts | 85 + blueprints/vaultwarden/docker-compose.yml | 14 + blueprints/vaultwarden/index.ts | 28 + blueprints/wikijs/docker-compose.yml | 31 + blueprints/wikijs/index.ts | 35 + blueprints/windmill/docker-compose.yml | 105 + blueprints/windmill/index.ts | 43 + blueprints/windows/docker-compose.yml | 17 + blueprints/windows/index.ts | 39 + blueprints/wordpress/docker-compose.yml | 25 + blueprints/wordpress/index.ts | 22 + blueprints/yourls/docker-compose.yml | 41 + blueprints/yourls/index.ts | 35 + blueprints/zipline/docker-compose.yml | 38 + blueprints/zipline/index.ts | 32 + meta.json | 1841 ++++++++++++++++- 216 files changed, 13005 insertions(+), 48 deletions(-) create mode 100644 blueprints/activepieces/docker-compose.yml create mode 100644 blueprints/activepieces/index.ts create mode 100644 blueprints/actualbudget/docker-compose.yml create mode 100644 blueprints/actualbudget/index.ts create mode 100644 blueprints/alist/docker-compose.yml create mode 100644 blueprints/alist/index.ts create mode 100644 blueprints/answer/docker-compose.yml create mode 100644 blueprints/answer/index.ts create mode 100644 blueprints/appsmith/docker-compose.yml create mode 100644 blueprints/appsmith/index.ts create mode 100644 blueprints/appwrite/docker-compose.yml create mode 100644 blueprints/appwrite/index.ts create mode 100644 blueprints/aptabase/docker-compose.yml create mode 100644 blueprints/aptabase/index.ts create mode 100644 blueprints/baserow/docker-compose.yml create mode 100644 blueprints/baserow/index.ts create mode 100644 blueprints/blender/docker-compose.yml create mode 100644 blueprints/blender/index.ts create mode 100644 blueprints/browserless/docker-compose.yml create mode 100644 blueprints/browserless/index.ts create mode 100644 blueprints/budibase/docker-compose.yml create mode 100644 blueprints/budibase/index.ts create mode 100644 blueprints/calcom/docker-compose.yml create mode 100644 blueprints/calcom/index.ts create mode 100644 blueprints/chatwoot/docker-compose.yml create mode 100644 blueprints/chatwoot/index.ts create mode 100644 blueprints/checkmate/docker-compose.yml create mode 100644 blueprints/checkmate/index.ts create mode 100644 blueprints/cloudflared/docker-compose.yml create mode 100644 blueprints/cloudflared/index.ts create mode 100644 blueprints/coder/docker-compose.yml create mode 100644 blueprints/coder/index.ts create mode 100644 blueprints/conduit/docker-compose.yml create mode 100644 blueprints/conduit/index.ts create mode 100644 blueprints/conduwuit/docker-compose.yml create mode 100644 blueprints/conduwuit/index.ts create mode 100644 blueprints/convex/docker-compose.yml create mode 100644 blueprints/convex/index.ts create mode 100644 blueprints/couchdb/docker-compose.yml create mode 100644 blueprints/couchdb/index.ts create mode 100644 blueprints/datalens/docker-compose.yml create mode 100644 blueprints/datalens/index.ts create mode 100644 blueprints/directus/docker-compose.yml create mode 100644 blueprints/directus/index.ts create mode 100644 blueprints/discord-tickets/docker-compose.yml create mode 100644 blueprints/discord-tickets/index.ts create mode 100644 blueprints/discourse/docker-compose.yml create mode 100644 blueprints/discourse/index.ts create mode 100644 blueprints/docmost/docker-compose.yml create mode 100644 blueprints/docmost/index.ts create mode 100644 blueprints/documenso/docker-compose.yml create mode 100644 blueprints/documenso/index.ts create mode 100644 blueprints/doublezero/docker-compose.yml create mode 100644 blueprints/doublezero/index.ts create mode 100644 blueprints/drawio/docker-compose.yml create mode 100644 blueprints/drawio/index.ts create mode 100644 blueprints/elastic-search/docker-compose.yml create mode 100644 blueprints/elastic-search/index.ts create mode 100644 blueprints/erpnext/docker-compose.yml create mode 100644 blueprints/erpnext/index.ts create mode 100644 blueprints/evolutionapi/docker-compose.yml create mode 100644 blueprints/evolutionapi/index.ts create mode 100644 blueprints/excalidraw/docker-compose.yml create mode 100644 blueprints/excalidraw/index.ts create mode 100644 blueprints/filebrowser/docker-compose.yml create mode 100644 blueprints/filebrowser/index.ts create mode 100644 blueprints/formbricks/docker-compose.yml create mode 100644 blueprints/formbricks/index.ts create mode 100644 blueprints/frappe-hr/docker-compose.yml create mode 100644 blueprints/frappe-hr/index.ts create mode 100644 blueprints/ghost/docker-compose.yml create mode 100644 blueprints/ghost/index.ts create mode 100644 blueprints/gitea/docker-compose.yml create mode 100644 blueprints/gitea/index.ts create mode 100644 blueprints/glance/docker-compose.yml create mode 100644 blueprints/glance/index.ts create mode 100644 blueprints/glitchtip/docker-compose.yml create mode 100644 blueprints/glitchtip/index.ts create mode 100644 blueprints/glpi/docker-compose.yml create mode 100644 blueprints/glpi/index.ts create mode 100644 blueprints/gotenberg/docker-compose.yml create mode 100644 blueprints/gotenberg/index.ts create mode 100644 blueprints/grafana/docker-compose.yml create mode 100644 blueprints/grafana/index.ts create mode 100644 blueprints/heyform/docker-compose.yml create mode 100644 blueprints/heyform/index.ts create mode 100644 blueprints/hi-events/docker-compose.yml create mode 100644 blueprints/hi-events/index.ts create mode 100644 blueprints/hoarder/docker-compose.yml create mode 100644 blueprints/hoarder/index.ts create mode 100644 blueprints/homarr/docker-compose.yml create mode 100644 blueprints/homarr/index.ts create mode 100644 blueprints/huly/docker-compose.yml create mode 100644 blueprints/huly/index.ts create mode 100644 blueprints/immich/docker-compose.yml create mode 100644 blueprints/immich/index.ts create mode 100644 blueprints/infisical/docker-compose.yml create mode 100644 blueprints/infisical/index.ts create mode 100644 blueprints/influxdb/docker-compose.yml create mode 100644 blueprints/influxdb/index.ts create mode 100644 blueprints/invoiceshelf/docker-compose.yml create mode 100644 blueprints/invoiceshelf/index.ts create mode 100644 blueprints/it-tools/docker-compose.yml create mode 100644 blueprints/it-tools/index.ts create mode 100644 blueprints/jellyfin/docker-compose.yml create mode 100644 blueprints/jellyfin/index.ts create mode 100644 blueprints/kimai/docker-compose.yml create mode 100644 blueprints/kimai/index.ts create mode 100644 blueprints/langflow/docker-compose.yml create mode 100644 blueprints/langflow/index.ts create mode 100644 blueprints/linkwarden/docker-compose.yml create mode 100644 blueprints/linkwarden/index.ts create mode 100644 blueprints/listmonk/docker-compose.yml create mode 100644 blueprints/listmonk/index.ts create mode 100644 blueprints/lobe-chat/docker-compose.yml create mode 100644 blueprints/lobe-chat/index.ts create mode 100644 blueprints/logto/docker-compose.yml create mode 100644 blueprints/logto/index.ts create mode 100644 blueprints/macos/docker-compose.yml create mode 100644 blueprints/macos/index.ts create mode 100644 blueprints/mailpit/docker-compose.yml create mode 100644 blueprints/mailpit/index.ts create mode 100644 blueprints/maybe/docker-compose.yml create mode 100644 blueprints/maybe/index.ts create mode 100644 blueprints/meilisearch/docker-compose.yml create mode 100644 blueprints/meilisearch/index.ts create mode 100644 blueprints/metabase/docker-compose.yml create mode 100644 blueprints/metabase/index.ts create mode 100644 blueprints/minio/docker-compose.yml create mode 100644 blueprints/minio/index.ts create mode 100644 blueprints/n8n/docker-compose.yml create mode 100644 blueprints/n8n/index.ts create mode 100644 blueprints/nextcloud-aio/docker-compose.yml create mode 100644 blueprints/nextcloud-aio/index.ts create mode 100644 blueprints/nocodb/docker-compose.yml create mode 100644 blueprints/nocodb/index.ts create mode 100644 blueprints/odoo/docker-compose.yml create mode 100644 blueprints/odoo/index.ts create mode 100644 blueprints/onedev/docker-compose.yml create mode 100644 blueprints/onedev/index.ts create mode 100644 blueprints/ontime/docker-compose.yml create mode 100644 blueprints/ontime/index.ts create mode 100644 blueprints/open-webui/docker-compose.yml create mode 100644 blueprints/open-webui/index.ts create mode 100644 blueprints/outline/docker-compose.yml create mode 100644 blueprints/outline/index.ts create mode 100644 blueprints/penpot/docker-compose.yml create mode 100644 blueprints/penpot/index.ts create mode 100644 blueprints/peppermint/docker-compose.yml create mode 100644 blueprints/peppermint/index.ts create mode 100644 blueprints/photoprism/docker-compose.yml create mode 100644 blueprints/photoprism/index.ts create mode 100644 blueprints/phpmyadmin/docker-compose.yml create mode 100644 blueprints/phpmyadmin/index.ts create mode 100644 blueprints/pocket-id/docker-compose.yml create mode 100644 blueprints/pocket-id/index.ts create mode 100644 blueprints/portainer/docker-compose.yml create mode 100644 blueprints/portainer/index.ts create mode 100644 blueprints/postiz/docker-compose.yml create mode 100644 blueprints/postiz/index.ts create mode 100644 blueprints/registry/docker-compose.yml create mode 100644 blueprints/registry/index.ts create mode 100644 blueprints/rocketchat/docker-compose.yml create mode 100644 blueprints/rocketchat/index.ts create mode 100644 blueprints/roundcube/docker-compose.yml create mode 100644 blueprints/roundcube/index.ts create mode 100644 blueprints/ryot/docker-compose.yml create mode 100644 blueprints/ryot/index.ts create mode 100644 blueprints/shlink/docker-compose.yml create mode 100644 blueprints/shlink/index.ts create mode 100644 blueprints/slash/docker-compose.yml create mode 100644 blueprints/slash/index.ts create mode 100644 blueprints/soketi/docker-compose.yml create mode 100644 blueprints/soketi/index.ts create mode 100644 blueprints/spacedrive/docker-compose.yml create mode 100644 blueprints/spacedrive/index.ts create mode 100644 blueprints/stirling/docker-compose.yml create mode 100644 blueprints/stirling/index.ts create mode 100644 blueprints/supabase/docker-compose.yml create mode 100644 blueprints/supabase/index.ts create mode 100644 blueprints/superset/docker-compose.yml create mode 100644 blueprints/superset/index.ts create mode 100644 blueprints/teable/docker-compose.yml create mode 100644 blueprints/teable/index.ts create mode 100644 blueprints/tolgee/docker-compose.yml create mode 100644 blueprints/tolgee/index.ts create mode 100644 blueprints/triggerdotdev/docker-compose.yml create mode 100644 blueprints/triggerdotdev/index.ts create mode 100644 blueprints/trilium/docker-compose.yml create mode 100644 blueprints/trilium/index.ts create mode 100644 blueprints/twenty/docker-compose.yml create mode 100644 blueprints/twenty/index.ts create mode 100644 blueprints/typebot/docker-compose.yml create mode 100644 blueprints/typebot/index.ts create mode 100644 blueprints/unifi/docker-compose.yml create mode 100644 blueprints/unifi/index.ts create mode 100644 blueprints/unsend/docker-compose.yml create mode 100644 blueprints/unsend/index.ts create mode 100644 blueprints/uptime-kuma/docker-compose.yml create mode 100644 blueprints/uptime-kuma/index.ts create mode 100644 blueprints/utils/index.ts create mode 100644 blueprints/vaultwarden/docker-compose.yml create mode 100644 blueprints/vaultwarden/index.ts create mode 100644 blueprints/wikijs/docker-compose.yml create mode 100644 blueprints/wikijs/index.ts create mode 100644 blueprints/windmill/docker-compose.yml create mode 100644 blueprints/windmill/index.ts create mode 100644 blueprints/windows/docker-compose.yml create mode 100644 blueprints/windows/index.ts create mode 100644 blueprints/wordpress/docker-compose.yml create mode 100644 blueprints/wordpress/index.ts create mode 100644 blueprints/yourls/docker-compose.yml create mode 100644 blueprints/yourls/index.ts create mode 100644 blueprints/zipline/docker-compose.yml create mode 100644 blueprints/zipline/index.ts diff --git a/blueprints/activepieces/docker-compose.yml b/blueprints/activepieces/docker-compose.yml new file mode 100644 index 000000000..a5511e7fa --- /dev/null +++ b/blueprints/activepieces/docker-compose.yml @@ -0,0 +1,64 @@ +version: "3.8" + +services: + activepieces: + image: activepieces/activepieces:0.35.0 + restart: unless-stopped + + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + environment: + AP_ENGINE_EXECUTABLE_PATH: dist/packages/engine/main.js + AP_API_KEY: ${AP_API_KEY} + AP_ENCRYPTION_KEY: ${AP_ENCRYPTION_KEY} + AP_JWT_SECRET: ${AP_JWT_SECRET} + AP_ENVIRONMENT: prod + AP_FRONTEND_URL: https://${AP_HOST} + AP_WEBHOOK_TIMEOUT_SECONDS: 30 + AP_TRIGGER_DEFAULT_POLL_INTERVAL: 5 + AP_POSTGRES_DATABASE: activepieces + AP_POSTGRES_HOST: postgres + AP_POSTGRES_PORT: 5432 + AP_POSTGRES_USERNAME: activepieces + AP_POSTGRES_PASSWORD: ${AP_POSTGRES_PASSWORD} + AP_EXECUTION_MODE: UNSANDBOXED + AP_REDIS_HOST: redis + AP_REDIS_PORT: 6379 + AP_SANDBOX_RUN_TIME_SECONDS: 600 + AP_TELEMETRY_ENABLED: "false" + AP_TEMPLATES_SOURCE_URL: https://cloud.activepieces.com/api/v1/flow-templates + + postgres: + image: postgres:14 + restart: unless-stopped + + environment: + POSTGRES_DB: activepieces + POSTGRES_PASSWORD: ${AP_POSTGRES_PASSWORD} + POSTGRES_USER: activepieces + volumes: + - postgres_data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U activepieces -d activepieces"] + interval: 30s + timeout: 30s + retries: 3 + + redis: + image: redis:7 + restart: unless-stopped + + volumes: + - redis_data:/data + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 30s + timeout: 30s + retries: 3 + +volumes: + postgres_data: + redis_data: \ No newline at end of file diff --git a/blueprints/activepieces/index.ts b/blueprints/activepieces/index.ts new file mode 100644 index 000000000..f1d97ccc3 --- /dev/null +++ b/blueprints/activepieces/index.ts @@ -0,0 +1,44 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const apiKey = Array.from({ length: 32 }, () => + Math.floor(Math.random() * 16).toString(16), + ).join(""); + const encryptionKey = Array.from({ length: 32 }, () => + Math.floor(Math.random() * 16).toString(16), + ).join(""); + const jwtSecret = Array.from({ length: 32 }, () => + Math.floor(Math.random() * 16).toString(16), + ).join(""); + const postgresPassword = Array.from({ length: 32 }, () => + Math.floor(Math.random() * 16).toString(16), + ).join(""); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 80, + serviceName: "activepieces", + }, + ]; + + const envs = [ + `AP_HOST=${mainDomain}`, + `AP_API_KEY=${apiKey}`, + `AP_ENCRYPTION_KEY=${encryptionKey}`, + `AP_JWT_SECRET=${jwtSecret}`, + `AP_POSTGRES_PASSWORD=${postgresPassword}`, + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/actualbudget/docker-compose.yml b/blueprints/actualbudget/docker-compose.yml new file mode 100644 index 000000000..388a9f0b6 --- /dev/null +++ b/blueprints/actualbudget/docker-compose.yml @@ -0,0 +1,12 @@ +services: + actualbudget: + image: docker.io/actualbudget/actual-server:latest + environment: + # See all options at https://actualbudget.org/docs/config + - ACTUAL_PORT=5006 + volumes: + - actual-data:/data + restart: unless-stopped + +volumes: + actual-data: diff --git a/blueprints/actualbudget/index.ts b/blueprints/actualbudget/index.ts new file mode 100644 index 000000000..33b6fea1c --- /dev/null +++ b/blueprints/actualbudget/index.ts @@ -0,0 +1,20 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 5006, + serviceName: "actualbudget", + }, + ]; + + return { + domains, + }; +} diff --git a/blueprints/alist/docker-compose.yml b/blueprints/alist/docker-compose.yml new file mode 100644 index 000000000..9ff67c943 --- /dev/null +++ b/blueprints/alist/docker-compose.yml @@ -0,0 +1,14 @@ +version: '3.3' +services: + alist: + image: xhofe/alist:v3.41.0 + volumes: + - alist-data:/opt/alist/data + environment: + - PUID=0 + - PGID=0 + - UMASK=022 + restart: unless-stopped + +volumes: + alist-data: \ No newline at end of file diff --git a/blueprints/alist/index.ts b/blueprints/alist/index.ts new file mode 100644 index 000000000..2a27f5708 --- /dev/null +++ b/blueprints/alist/index.ts @@ -0,0 +1,22 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 5244, + serviceName: "alist", + }, + ]; + + return { + domains, + }; +} diff --git a/blueprints/answer/docker-compose.yml b/blueprints/answer/docker-compose.yml new file mode 100644 index 000000000..2b9fc3440 --- /dev/null +++ b/blueprints/answer/docker-compose.yml @@ -0,0 +1,30 @@ +services: + answer: + image: apache/answer:1.4.1 + ports: + - '80' + restart: on-failure + volumes: + - answer-data:/data + depends_on: + db: + condition: service_healthy + db: + image: postgres:16 + restart: always + healthcheck: + test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"] + interval: 5s + timeout: 5s + retries: 5 + + volumes: + - db-data:/var/lib/postgresql/data + environment: + POSTGRES_DB: answer + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + +volumes: + answer-data: + db-data: diff --git a/blueprints/answer/index.ts b/blueprints/answer/index.ts new file mode 100644 index 000000000..36d48cb36 --- /dev/null +++ b/blueprints/answer/index.ts @@ -0,0 +1,33 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateHash, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainServiceHash = generateHash(schema.projectName); + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 9080, + serviceName: "answer", + }, + ]; + + const envs = [ + `ANSWER_HOST=http://${mainDomain}`, + `SERVICE_HASH=${mainServiceHash}`, + ]; + + const mounts: Template["mounts"] = []; + + return { + envs, + mounts, + domains, + }; +} diff --git a/blueprints/appsmith/docker-compose.yml b/blueprints/appsmith/docker-compose.yml new file mode 100644 index 000000000..f520ee362 --- /dev/null +++ b/blueprints/appsmith/docker-compose.yml @@ -0,0 +1,6 @@ +version: "3.8" +services: + appsmith: + image: index.docker.io/appsmith/appsmith-ee:v1.29 + volumes: + - ../files/stacks:/appsmith-stacks diff --git a/blueprints/appsmith/index.ts b/blueprints/appsmith/index.ts new file mode 100644 index 000000000..73279e91b --- /dev/null +++ b/blueprints/appsmith/index.ts @@ -0,0 +1,23 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateHash, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const _mainServiceHash = generateHash(schema.projectName); + + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 80, + serviceName: "appsmith", + }, + ]; + + return { + domains, + }; +} diff --git a/blueprints/appwrite/docker-compose.yml b/blueprints/appwrite/docker-compose.yml new file mode 100644 index 000000000..163cb3d03 --- /dev/null +++ b/blueprints/appwrite/docker-compose.yml @@ -0,0 +1,887 @@ +version: "3.8" + +x-logging: &x-logging + logging: + driver: "json-file" + options: + max-file: "5" + max-size: "10m" + +services: + appwrite: + image: appwrite/appwrite:1.6.0 + container_name: appwrite + <<: *x-logging + restart: unless-stopped + networks: + - dokploy-network + labels: + - traefik.enable=true + - traefik.constraint-label-stack=appwrite + volumes: + - appwrite-uploads:/storage/uploads:rw + - appwrite-cache:/storage/cache:rw + - appwrite-config:/storage/config:rw + - appwrite-certificates:/storage/certificates:rw + - appwrite-functions:/storage/functions:rw + depends_on: + - mariadb + - redis + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_LOCALE + - _APP_CONSOLE_WHITELIST_ROOT + - _APP_CONSOLE_WHITELIST_EMAILS + - _APP_CONSOLE_SESSION_ALERTS + - _APP_CONSOLE_WHITELIST_IPS + - _APP_CONSOLE_HOSTNAMES + - _APP_SYSTEM_EMAIL_NAME + - _APP_SYSTEM_EMAIL_ADDRESS + - _APP_EMAIL_SECURITY + - _APP_SYSTEM_RESPONSE_FORMAT + - _APP_OPTIONS_ABUSE + - _APP_OPTIONS_ROUTER_PROTECTION + - _APP_OPTIONS_FORCE_HTTPS + - _APP_OPTIONS_FUNCTIONS_FORCE_HTTPS + - _APP_OPENSSL_KEY_V1 + - _APP_DOMAIN + - _APP_DOMAIN_TARGET + - _APP_DOMAIN_FUNCTIONS + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_SMTP_HOST + - _APP_SMTP_PORT + - _APP_SMTP_SECURE + - _APP_SMTP_USERNAME + - _APP_SMTP_PASSWORD + - _APP_USAGE_STATS + - _APP_STORAGE_LIMIT + - _APP_STORAGE_PREVIEW_LIMIT + - _APP_STORAGE_ANTIVIRUS + - _APP_STORAGE_ANTIVIRUS_HOST + - _APP_STORAGE_ANTIVIRUS_PORT + - _APP_STORAGE_DEVICE + - _APP_STORAGE_S3_ACCESS_KEY + - _APP_STORAGE_S3_SECRET + - _APP_STORAGE_S3_REGION + - _APP_STORAGE_S3_BUCKET + - _APP_STORAGE_DO_SPACES_ACCESS_KEY + - _APP_STORAGE_DO_SPACES_SECRET + - _APP_STORAGE_DO_SPACES_REGION + - _APP_STORAGE_DO_SPACES_BUCKET + - _APP_STORAGE_BACKBLAZE_ACCESS_KEY + - _APP_STORAGE_BACKBLAZE_SECRET + - _APP_STORAGE_BACKBLAZE_REGION + - _APP_STORAGE_BACKBLAZE_BUCKET + - _APP_STORAGE_LINODE_ACCESS_KEY + - _APP_STORAGE_LINODE_SECRET + - _APP_STORAGE_LINODE_REGION + - _APP_STORAGE_LINODE_BUCKET + - _APP_STORAGE_WASABI_ACCESS_KEY + - _APP_STORAGE_WASABI_SECRET + - _APP_STORAGE_WASABI_REGION + - _APP_STORAGE_WASABI_BUCKET + - _APP_FUNCTIONS_SIZE_LIMIT + - _APP_FUNCTIONS_TIMEOUT + - _APP_FUNCTIONS_BUILD_TIMEOUT + - _APP_FUNCTIONS_CPUS + - _APP_FUNCTIONS_MEMORY + - _APP_FUNCTIONS_RUNTIMES + - _APP_EXECUTOR_SECRET + - _APP_EXECUTOR_HOST + - _APP_LOGGING_CONFIG + - _APP_MAINTENANCE_INTERVAL + - _APP_MAINTENANCE_DELAY + - _APP_MAINTENANCE_RETENTION_EXECUTION + - _APP_MAINTENANCE_RETENTION_CACHE + - _APP_MAINTENANCE_RETENTION_ABUSE + - _APP_MAINTENANCE_RETENTION_AUDIT + - _APP_MAINTENANCE_RETENTION_USAGE_HOURLY + - _APP_MAINTENANCE_RETENTION_SCHEDULES + - _APP_SMS_PROVIDER + - _APP_SMS_FROM + - _APP_GRAPHQL_MAX_BATCH_SIZE + - _APP_GRAPHQL_MAX_COMPLEXITY + - _APP_GRAPHQL_MAX_DEPTH + - _APP_VCS_GITHUB_APP_NAME + - _APP_VCS_GITHUB_PRIVATE_KEY + - _APP_VCS_GITHUB_APP_ID + - _APP_VCS_GITHUB_WEBHOOK_SECRET + - _APP_VCS_GITHUB_CLIENT_SECRET + - _APP_VCS_GITHUB_CLIENT_ID + - _APP_MIGRATIONS_FIREBASE_CLIENT_ID + - _APP_MIGRATIONS_FIREBASE_CLIENT_SECRET + - _APP_ASSISTANT_OPENAI_API_KEY + + appwrite-console: + image: appwrite/console:5.0.12 + container_name: appwrite-console + <<: *x-logging + restart: unless-stopped + networks: + - dokploy-network + labels: + - "traefik.enable=true" + - "traefik.constraint-label-stack=appwrite" + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_LOCALE + - _APP_CONSOLE_WHITELIST_ROOT + - _APP_CONSOLE_WHITELIST_EMAILS + - _APP_CONSOLE_SESSION_ALERTS + - _APP_CONSOLE_WHITELIST_IPS + - _APP_CONSOLE_HOSTNAMES + - _APP_SYSTEM_EMAIL_NAME + - _APP_SYSTEM_EMAIL_ADDRESS + - _APP_EMAIL_SECURITY + - _APP_SYSTEM_RESPONSE_FORMAT + - _APP_OPTIONS_ABUSE + - _APP_OPTIONS_ROUTER_PROTECTION + - _APP_OPTIONS_FORCE_HTTPS + - _APP_OPTIONS_FUNCTIONS_FORCE_HTTPS + - _APP_OPENSSL_KEY_V1 + - _APP_DOMAIN + - _APP_DOMAIN_TARGET + - _APP_DOMAIN_FUNCTIONS + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_SMTP_HOST + - _APP_SMTP_PORT + - _APP_SMTP_SECURE + - _APP_SMTP_USERNAME + - _APP_SMTP_PASSWORD + - _APP_USAGE_STATS + - _APP_STORAGE_LIMIT + - _APP_STORAGE_PREVIEW_LIMIT + - _APP_STORAGE_ANTIVIRUS + - _APP_STORAGE_ANTIVIRUS_HOST + - _APP_STORAGE_ANTIVIRUS_PORT + - _APP_STORAGE_DEVICE + - _APP_STORAGE_S3_ACCESS_KEY + - _APP_STORAGE_S3_SECRET + - _APP_STORAGE_S3_REGION + - _APP_STORAGE_S3_BUCKET + - _APP_STORAGE_DO_SPACES_ACCESS_KEY + - _APP_STORAGE_DO_SPACES_SECRET + - _APP_STORAGE_DO_SPACES_REGION + - _APP_STORAGE_DO_SPACES_BUCKET + - _APP_STORAGE_BACKBLAZE_ACCESS_KEY + - _APP_STORAGE_BACKBLAZE_SECRET + - _APP_STORAGE_BACKBLAZE_REGION + - _APP_STORAGE_BACKBLAZE_BUCKET + - _APP_STORAGE_LINODE_ACCESS_KEY + - _APP_STORAGE_LINODE_SECRET + - _APP_STORAGE_LINODE_REGION + - _APP_STORAGE_LINODE_BUCKET + - _APP_STORAGE_WASABI_ACCESS_KEY + - _APP_STORAGE_WASABI_SECRET + - _APP_STORAGE_WASABI_REGION + - _APP_STORAGE_WASABI_BUCKET + + appwrite-realtime: + image: appwrite/appwrite:1.6.0 + entrypoint: realtime + container_name: appwrite-realtime + <<: *x-logging + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - mariadb + - redis + labels: + - "traefik.enable=true" + - "traefik.constraint-label-stack=appwrite" + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPTIONS_ABUSE + - _APP_OPTIONS_ROUTER_PROTECTION + - _APP_OPENSSL_KEY_V1 + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_USAGE_STATS + - _APP_LOGGING_CONFIG + + appwrite-worker-audits: + image: appwrite/appwrite:1.6.0 + entrypoint: worker-audits + <<: *x-logging + container_name: appwrite-worker-audits + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - redis + - mariadb + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_LOGGING_CONFIG + + appwrite-worker-webhooks: + image: appwrite/appwrite:1.6.0 + entrypoint: worker-webhooks + <<: *x-logging + container_name: appwrite-worker-webhooks + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - redis + - mariadb + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_EMAIL_SECURITY + - _APP_SYSTEM_SECURITY_EMAIL_ADDRESS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_LOGGING_CONFIG + + appwrite-worker-deletes: + image: appwrite/appwrite:1.6.0 + entrypoint: worker-deletes + <<: *x-logging + container_name: appwrite-worker-deletes + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - redis + - mariadb + volumes: + - appwrite-uploads:/storage/uploads:rw + - appwrite-cache:/storage/cache:rw + - appwrite-functions:/storage/functions:rw + - appwrite-builds:/storage/builds:rw + - appwrite-certificates:/storage/certificates:rw + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_STORAGE_DEVICE + - _APP_STORAGE_S3_ACCESS_KEY + - _APP_STORAGE_S3_SECRET + - _APP_STORAGE_S3_REGION + - _APP_STORAGE_S3_BUCKET + - _APP_STORAGE_DO_SPACES_ACCESS_KEY + - _APP_STORAGE_DO_SPACES_SECRET + - _APP_STORAGE_DO_SPACES_REGION + - _APP_STORAGE_DO_SPACES_BUCKET + - _APP_STORAGE_BACKBLAZE_ACCESS_KEY + - _APP_STORAGE_BACKBLAZE_SECRET + - _APP_STORAGE_BACKBLAZE_REGION + - _APP_STORAGE_BACKBLAZE_BUCKET + - _APP_STORAGE_LINODE_ACCESS_KEY + - _APP_STORAGE_LINODE_SECRET + - _APP_STORAGE_LINODE_REGION + - _APP_STORAGE_LINODE_BUCKET + - _APP_STORAGE_WASABI_ACCESS_KEY + - _APP_STORAGE_WASABI_SECRET + - _APP_STORAGE_WASABI_REGION + - _APP_STORAGE_WASABI_BUCKET + - _APP_LOGGING_CONFIG + - _APP_EXECUTOR_SECRET + - _APP_EXECUTOR_HOST + - _APP_MAINTENANCE_RETENTION_ABUSE + - _APP_MAINTENANCE_RETENTION_AUDIT + - _APP_MAINTENANCE_RETENTION_EXECUTION + + appwrite-worker-databases: + image: appwrite/appwrite:1.6.0 + entrypoint: worker-databases + <<: *x-logging + container_name: appwrite-worker-databases + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - redis + - mariadb + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_LOGGING_CONFIG + + appwrite-worker-builds: + image: appwrite/appwrite:1.6.0 + entrypoint: worker-builds + <<: *x-logging + container_name: appwrite-worker-builds + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - redis + - mariadb + volumes: + - appwrite-functions:/storage/functions:rw + - appwrite-builds:/storage/builds:rw + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_EXECUTOR_SECRET + - _APP_EXECUTOR_HOST + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_LOGGING_CONFIG + - _APP_VCS_GITHUB_APP_NAME + - _APP_VCS_GITHUB_PRIVATE_KEY + - _APP_VCS_GITHUB_APP_ID + - _APP_FUNCTIONS_TIMEOUT + - _APP_FUNCTIONS_BUILD_TIMEOUT + - _APP_FUNCTIONS_CPUS + - _APP_FUNCTIONS_MEMORY + - _APP_FUNCTIONS_SIZE_LIMIT + - _APP_OPTIONS_FORCE_HTTPS + - _APP_OPTIONS_FUNCTIONS_FORCE_HTTPS + - _APP_DOMAIN + - _APP_STORAGE_DEVICE + - _APP_STORAGE_S3_ACCESS_KEY + - _APP_STORAGE_S3_SECRET + - _APP_STORAGE_S3_REGION + - _APP_STORAGE_S3_BUCKET + - _APP_STORAGE_DO_SPACES_ACCESS_KEY + - _APP_STORAGE_DO_SPACES_SECRET + - _APP_STORAGE_DO_SPACES_REGION + - _APP_STORAGE_DO_SPACES_BUCKET + - _APP_STORAGE_BACKBLAZE_ACCESS_KEY + - _APP_STORAGE_BACKBLAZE_SECRET + - _APP_STORAGE_BACKBLAZE_REGION + - _APP_STORAGE_BACKBLAZE_BUCKET + - _APP_STORAGE_LINODE_ACCESS_KEY + - _APP_STORAGE_LINODE_SECRET + - _APP_STORAGE_LINODE_REGION + - _APP_STORAGE_LINODE_BUCKET + - _APP_STORAGE_WASABI_ACCESS_KEY + - _APP_STORAGE_WASABI_SECRET + - _APP_STORAGE_WASABI_REGION + - _APP_STORAGE_WASABI_BUCKET + + appwrite-worker-certificates: + image: appwrite/appwrite:1.6.0 + entrypoint: worker-certificates + <<: *x-logging + container_name: appwrite-worker-certificates + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - redis + - mariadb + volumes: + - appwrite-config:/storage/config:rw + - appwrite-certificates:/storage/certificates:rw + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_DOMAIN + - _APP_DOMAIN_TARGET + - _APP_DOMAIN_FUNCTIONS + - _APP_EMAIL_CERTIFICATES + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_LOGGING_CONFIG + + appwrite-worker-functions: + image: appwrite/appwrite:1.6.0 + entrypoint: worker-functions + <<: *x-logging + container_name: appwrite-worker-functions + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - redis + - mariadb + - openruntimes-executor + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_DOMAIN + - _APP_OPTIONS_FORCE_HTTPS + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_FUNCTIONS_TIMEOUT + - _APP_FUNCTIONS_BUILD_TIMEOUT + - _APP_FUNCTIONS_CPUS + - _APP_FUNCTIONS_MEMORY + - _APP_EXECUTOR_SECRET + - _APP_EXECUTOR_HOST + - _APP_USAGE_STATS + - _APP_DOCKER_HUB_USERNAME + - _APP_DOCKER_HUB_PASSWORD + - _APP_LOGGING_CONFIG + + appwrite-worker-mails: + image: appwrite/appwrite:1.6.0 + entrypoint: worker-mails + <<: *x-logging + container_name: appwrite-worker-mails + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - redis + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_SYSTEM_EMAIL_NAME + - _APP_SYSTEM_EMAIL_ADDRESS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_SMTP_HOST + - _APP_SMTP_PORT + - _APP_SMTP_SECURE + - _APP_SMTP_USERNAME + - _APP_SMTP_PASSWORD + - _APP_LOGGING_CONFIG + + appwrite-worker-messaging: + image: appwrite/appwrite:1.6.0 + entrypoint: worker-messaging + container_name: appwrite-worker-messaging + <<: *x-logging + restart: unless-stopped + networks: + - dokploy-network + volumes: + - appwrite-uploads:/storage/uploads:rw + depends_on: + - redis + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_LOGGING_CONFIG + - _APP_SMS_FROM + - _APP_SMS_PROVIDER + - _APP_STORAGE_DEVICE + - _APP_STORAGE_S3_ACCESS_KEY + - _APP_STORAGE_S3_SECRET + - _APP_STORAGE_S3_REGION + - _APP_STORAGE_S3_BUCKET + - _APP_STORAGE_DO_SPACES_ACCESS_KEY + - _APP_STORAGE_DO_SPACES_SECRET + - _APP_STORAGE_DO_SPACES_REGION + - _APP_STORAGE_DO_SPACES_BUCKET + - _APP_STORAGE_BACKBLAZE_ACCESS_KEY + - _APP_STORAGE_BACKBLAZE_SECRET + - _APP_STORAGE_BACKBLAZE_REGION + - _APP_STORAGE_BACKBLAZE_BUCKET + - _APP_STORAGE_LINODE_ACCESS_KEY + - _APP_STORAGE_LINODE_SECRET + - _APP_STORAGE_LINODE_REGION + - _APP_STORAGE_LINODE_BUCKET + - _APP_STORAGE_WASABI_ACCESS_KEY + - _APP_STORAGE_WASABI_SECRET + - _APP_STORAGE_WASABI_REGION + - _APP_STORAGE_WASABI_BUCKET + + appwrite-worker-migrations: + image: appwrite/appwrite:1.6.0 + entrypoint: worker-migrations + <<: *x-logging + container_name: appwrite-worker-migrations + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - mariadb + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_DOMAIN + - _APP_DOMAIN_TARGET + - _APP_EMAIL_SECURITY + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_LOGGING_CONFIG + - _APP_MIGRATIONS_FIREBASE_CLIENT_ID + - _APP_MIGRATIONS_FIREBASE_CLIENT_SECRET + + appwrite-task-maintenance: + image: appwrite/appwrite:1.6.0 + entrypoint: maintenance + <<: *x-logging + container_name: appwrite-task-maintenance + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - redis + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_DOMAIN + - _APP_DOMAIN_TARGET + - _APP_DOMAIN_FUNCTIONS + - _APP_OPENSSL_KEY_V1 + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_MAINTENANCE_INTERVAL + - _APP_MAINTENANCE_RETENTION_EXECUTION + - _APP_MAINTENANCE_RETENTION_CACHE + - _APP_MAINTENANCE_RETENTION_ABUSE + - _APP_MAINTENANCE_RETENTION_AUDIT + - _APP_MAINTENANCE_RETENTION_USAGE_HOURLY + - _APP_MAINTENANCE_RETENTION_SCHEDULES + + appwrite-worker-usage: + image: appwrite/appwrite:1.6.0 + entrypoint: worker-usage + container_name: appwrite-worker-usage + <<: *x-logging + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - redis + - mariadb + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_USAGE_STATS + - _APP_LOGGING_CONFIG + - _APP_USAGE_AGGREGATION_INTERVAL + + appwrite-worker-usage-dump: + image: appwrite/appwrite:1.6.0 + entrypoint: worker-usage-dump + container_name: appwrite-worker-usage-dump + <<: *x-logging + networks: + - dokploy-network + depends_on: + - redis + - mariadb + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_USAGE_STATS + - _APP_LOGGING_CONFIG + - _APP_USAGE_AGGREGATION_INTERVAL + + appwrite-task-scheduler-functions: + image: appwrite/appwrite:1.6.0 + entrypoint: schedule-functions + container_name: appwrite-task-scheduler-functions + <<: *x-logging + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - mariadb + - redis + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + + appwrite-task-scheduler-executions: + image: appwrite/appwrite:1.6.0 + entrypoint: schedule-executions + container_name: appwrite-task-scheduler-executions + <<: *x-logging + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - mariadb + - redis + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + + appwrite-task-scheduler-messages: + image: appwrite/appwrite:1.6.0 + entrypoint: schedule-messages + container_name: appwrite-task-scheduler-messages + <<: *x-logging + restart: unless-stopped + networks: + - dokploy-network + depends_on: + - mariadb + - redis + environment: + - _APP_ENV + - _APP_WORKER_PER_CORE + - _APP_OPENSSL_KEY_V1 + - _APP_REDIS_HOST + - _APP_REDIS_PORT + - _APP_REDIS_USER + - _APP_REDIS_PASS + - _APP_DB_HOST + - _APP_DB_PORT + - _APP_DB_SCHEMA + - _APP_DB_USER + - _APP_DB_PASS + + appwrite-assistant: + image: appwrite/assistant:0.4.0 + container_name: appwrite-assistant + <<: *x-logging + restart: unless-stopped + networks: + - dokploy-network + environment: + - _APP_ASSISTANT_OPENAI_API_KEY + + openruntimes-executor: + container_name: openruntimes-executor + hostname: exc1 + <<: *x-logging + restart: unless-stopped + stop_signal: SIGINT + image: openruntimes/executor:0.6.11 + networks: + - dokploy-network + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - appwrite-builds:/storage/builds:rw + - appwrite-functions:/storage/functions:rw + - /tmp:/tmp:rw + environment: + - OPR_EXECUTOR_INACTIVE_TRESHOLD=$_APP_FUNCTIONS_INACTIVE_THRESHOLD + - OPR_EXECUTOR_MAINTENANCE_INTERVAL=$_APP_FUNCTIONS_MAINTENANCE_INTERVAL + - OPR_EXECUTOR_NETWORK=$_APP_FUNCTIONS_RUNTIMES_NETWORK + - OPR_EXECUTOR_DOCKER_HUB_USERNAME=$_APP_DOCKER_HUB_USERNAME + - OPR_EXECUTOR_DOCKER_HUB_PASSWORD=$_APP_DOCKER_HUB_PASSWORD + - OPR_EXECUTOR_ENV=$_APP_ENV + - OPR_EXECUTOR_RUNTIMES=$_APP_FUNCTIONS_RUNTIMES + - OPR_EXECUTOR_SECRET=$_APP_EXECUTOR_SECRET + - OPR_EXECUTOR_LOGGING_CONFIG=$_APP_LOGGING_CONFIG + - OPR_EXECUTOR_STORAGE_DEVICE=$_APP_STORAGE_DEVICE + - OPR_EXECUTOR_STORAGE_S3_ACCESS_KEY=$_APP_STORAGE_S3_ACCESS_KEY + - OPR_EXECUTOR_STORAGE_S3_SECRET=$_APP_STORAGE_S3_SECRET + - OPR_EXECUTOR_STORAGE_S3_REGION=$_APP_STORAGE_S3_REGION + - OPR_EXECUTOR_STORAGE_S3_BUCKET=$_APP_STORAGE_S3_BUCKET + - OPR_EXECUTOR_STORAGE_DO_SPACES_ACCESS_KEY=$_APP_STORAGE_DO_SPACES_ACCESS_KEY + - OPR_EXECUTOR_STORAGE_DO_SPACES_SECRET=$_APP_STORAGE_DO_SPACES_SECRET + - OPR_EXECUTOR_STORAGE_DO_SPACES_REGION=$_APP_STORAGE_DO_SPACES_REGION + - OPR_EXECUTOR_STORAGE_DO_SPACES_BUCKET=$_APP_STORAGE_DO_SPACES_BUCKET + - OPR_EXECUTOR_STORAGE_BACKBLAZE_ACCESS_KEY=$_APP_STORAGE_BACKBLAZE_ACCESS_KEY + - OPR_EXECUTOR_STORAGE_BACKBLAZE_SECRET=$_APP_STORAGE_BACKBLAZE_SECRET + - OPR_EXECUTOR_STORAGE_BACKBLAZE_REGION=$_APP_STORAGE_BACKBLAZE_REGION + - OPR_EXECUTOR_STORAGE_BACKBLAZE_BUCKET=$_APP_STORAGE_BACKBLAZE_BUCKET + - OPR_EXECUTOR_STORAGE_LINODE_ACCESS_KEY=$_APP_STORAGE_LINODE_ACCESS_KEY + - OPR_EXECUTOR_STORAGE_LINODE_SECRET=$_APP_STORAGE_LINODE_SECRET + - OPR_EXECUTOR_STORAGE_LINODE_REGION=$_APP_STORAGE_LINODE_REGION + - OPR_EXECUTOR_STORAGE_LINODE_BUCKET=$_APP_STORAGE_LINODE_BUCKET + - OPR_EXECUTOR_STORAGE_WASABI_ACCESS_KEY=$_APP_STORAGE_WASABI_ACCESS_KEY + - OPR_EXECUTOR_STORAGE_WASABI_SECRET=$_APP_STORAGE_WASABI_SECRET + - OPR_EXECUTOR_STORAGE_WASABI_REGION=$_APP_STORAGE_WASABI_REGION + - OPR_EXECUTOR_STORAGE_WASABI_BUCKET=$_APP_STORAGE_WASABI_BUCKET + + mariadb: + image: mariadb:10.11 + container_name: appwrite-mariadb + <<: *x-logging + restart: unless-stopped + networks: + - dokploy-network + volumes: + - appwrite-mariadb:/var/lib/mysql:rw + environment: + - MYSQL_ROOT_PASSWORD=${_APP_DB_ROOT_PASS} + - MYSQL_DATABASE=${_APP_DB_SCHEMA} + - MYSQL_USER=${_APP_DB_USER} + - MYSQL_PASSWORD=${_APP_DB_PASS} + - MARIADB_AUTO_UPGRADE=1 + command: "mysqld --innodb-flush-method=fsync" + + redis: + image: redis:7.2.4-alpine + container_name: appwrite-redis + <<: *x-logging + restart: unless-stopped + command: > + redis-server + --maxmemory 512mb + --maxmemory-policy allkeys-lru + --maxmemory-samples 5 + networks: + - dokploy-network + volumes: + - appwrite-redis:/data:rw + +# Uncomment and configure if ClamAV is needed +# clamav: +# image: appwrite/clamav:1.2.0 +# container_name: appwrite-clamav +# restart: unless-stopped +# networks: +# - dokploy-network +# volumes: +# - appwrite-uploads:/storage/uploads + +volumes: + appwrite-mariadb: + appwrite-redis: + appwrite-cache: + appwrite-uploads: + appwrite-certificates: + appwrite-functions: + appwrite-builds: + appwrite-config: + +networks: + dokploy-network: + external: true diff --git a/blueprints/appwrite/index.ts b/blueprints/appwrite/index.ts new file mode 100644 index 000000000..4e671324f --- /dev/null +++ b/blueprints/appwrite/index.ts @@ -0,0 +1,153 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { host: mainDomain, port: 80, serviceName: "appwrite", path: "/" }, + { + host: mainDomain, + port: 80, + serviceName: "appwrite-console", + path: "/console", + }, + { + host: mainDomain, + port: 80, + serviceName: "appwrite-realtime", + path: "/v1/realtime", + }, + ]; + + const envs = [ + "_APP_ENV=production", + "_APP_LOCALE=en", + "_APP_OPTIONS_ABUSE=enabled", + "_APP_OPTIONS_FORCE_HTTPS=disabled", + "_APP_OPTIONS_FUNCTIONS_FORCE_HTTPS=disabled", + "_APP_OPTIONS_ROUTER_PROTECTION=disabled", + "_APP_OPENSSL_KEY_V1=your-secret-key", + `_APP_DOMAIN=${mainDomain}`, + `_APP_DOMAIN_FUNCTIONS=${mainDomain}`, + `_APP_DOMAIN_TARGET=${mainDomain}`, + "_APP_CONSOLE_WHITELIST_ROOT=enabled", + "_APP_CONSOLE_WHITELIST_EMAILS=", + "_APP_CONSOLE_WHITELIST_IPS=", + "_APP_CONSOLE_HOSTNAMES=", + "_APP_SYSTEM_EMAIL_NAME=Appwrite", + "_APP_SYSTEM_EMAIL_ADDRESS=noreply@appwrite.io", + "_APP_SYSTEM_TEAM_EMAIL=team@appwrite.io", + "_APP_SYSTEM_RESPONSE_FORMAT=", + "_APP_SYSTEM_SECURITY_EMAIL_ADDRESS=certs@appwrite.io", + "_APP_EMAIL_SECURITY=", + "_APP_EMAIL_CERTIFICATES=", + "_APP_USAGE_STATS=enabled", + "_APP_LOGGING_PROVIDER=", + "_APP_LOGGING_CONFIG=", + "_APP_USAGE_AGGREGATION_INTERVAL=30", + "_APP_USAGE_TIMESERIES_INTERVAL=30", + "_APP_USAGE_DATABASE_INTERVAL=900", + "_APP_WORKER_PER_CORE=6", + "_APP_CONSOLE_SESSION_ALERTS=disabled", + "_APP_REDIS_HOST=redis", + "_APP_REDIS_PORT=6379", + "_APP_REDIS_USER=", + "_APP_REDIS_PASS=", + "_APP_DB_HOST=mariadb", + "_APP_DB_PORT=3306", + "_APP_DB_SCHEMA=appwrite", + "_APP_DB_USER=user", + "_APP_DB_PASS=password", + "_APP_DB_ROOT_PASS=rootsecretpassword", + "_APP_INFLUXDB_HOST=influxdb", + "_APP_INFLUXDB_PORT=8086", + "_APP_STATSD_HOST=telegraf", + "_APP_STATSD_PORT=8125", + "_APP_SMTP_HOST=", + "_APP_SMTP_PORT=", + "_APP_SMTP_SECURE=", + "_APP_SMTP_USERNAME=", + "_APP_SMTP_PASSWORD=", + "_APP_SMS_PROVIDER=", + "_APP_SMS_FROM=", + "_APP_STORAGE_LIMIT=30000000", + "_APP_STORAGE_PREVIEW_LIMIT=20000000", + "_APP_STORAGE_ANTIVIRUS=disabled", + "_APP_STORAGE_ANTIVIRUS_HOST=clamav", + "_APP_STORAGE_ANTIVIRUS_PORT=3310", + "_APP_STORAGE_DEVICE=local", + "_APP_STORAGE_S3_ACCESS_KEY=", + "_APP_STORAGE_S3_SECRET=", + "_APP_STORAGE_S3_REGION=us-east-1", + "_APP_STORAGE_S3_BUCKET=", + "_APP_STORAGE_DO_SPACES_ACCESS_KEY=", + "_APP_STORAGE_DO_SPACES_SECRET=", + "_APP_STORAGE_DO_SPACES_REGION=us-east-1", + "_APP_STORAGE_DO_SPACES_BUCKET=", + "_APP_STORAGE_BACKBLAZE_ACCESS_KEY=", + "_APP_STORAGE_BACKBLAZE_SECRET=", + "_APP_STORAGE_BACKBLAZE_REGION=us-west-004", + "_APP_STORAGE_BACKBLAZE_BUCKET=", + "_APP_STORAGE_LINODE_ACCESS_KEY=", + "_APP_STORAGE_LINODE_SECRET=", + "_APP_STORAGE_LINODE_REGION=eu-central-1", + "_APP_STORAGE_LINODE_BUCKET=", + "_APP_STORAGE_WASABI_ACCESS_KEY=", + "_APP_STORAGE_WASABI_SECRET=", + "_APP_STORAGE_WASABI_REGION=eu-central-1", + "_APP_STORAGE_WASABI_BUCKET=", + "_APP_FUNCTIONS_SIZE_LIMIT=30000000", + "_APP_FUNCTIONS_BUILD_SIZE_LIMIT=2000000000", + "_APP_FUNCTIONS_TIMEOUT=900", + "_APP_FUNCTIONS_BUILD_TIMEOUT=900", + "_APP_FUNCTIONS_CONTAINERS=10", + "_APP_FUNCTIONS_CPUS=0", + "_APP_FUNCTIONS_MEMORY=0", + "_APP_FUNCTIONS_MEMORY_SWAP=0", + "_APP_FUNCTIONS_RUNTIMES=node-16.0,php-8.0,python-3.9,ruby-3.0", + "_APP_EXECUTOR_SECRET=your-secret-key", + "_APP_EXECUTOR_HOST=http://exc1/v1", + "_APP_EXECUTOR_RUNTIME_NETWORK=appwrite_runtimes", + "_APP_FUNCTIONS_ENVS=node-16.0,php-7.4,python-3.9,ruby-3.0", + "_APP_FUNCTIONS_INACTIVE_THRESHOLD=60", + "DOCKERHUB_PULL_USERNAME=", + "DOCKERHUB_PULL_PASSWORD=", + "DOCKERHUB_PULL_EMAIL=", + "OPEN_RUNTIMES_NETWORK=appwrite_runtimes", + "_APP_FUNCTIONS_RUNTIMES_NETWORK=runtimes", + "_APP_DOCKER_HUB_USERNAME=", + "_APP_DOCKER_HUB_PASSWORD=", + "_APP_FUNCTIONS_MAINTENANCE_INTERVAL=3600", + "_APP_VCS_GITHUB_APP_NAME=", + "_APP_VCS_GITHUB_PRIVATE_KEY=", + "_APP_VCS_GITHUB_APP_ID=", + "_APP_VCS_GITHUB_CLIENT_ID=", + "_APP_VCS_GITHUB_CLIENT_SECRET=", + "_APP_VCS_GITHUB_WEBHOOK_SECRET=", + "_APP_MAINTENANCE_INTERVAL=86400", + "_APP_MAINTENANCE_DELAY=0", + "_APP_MAINTENANCE_RETENTION_CACHE=2592000", + "_APP_MAINTENANCE_RETENTION_EXECUTION=1209600", + "_APP_MAINTENANCE_RETENTION_AUDIT=1209600", + "_APP_MAINTENANCE_RETENTION_ABUSE=86400", + "_APP_MAINTENANCE_RETENTION_USAGE_HOURLY=8640000", + "_APP_MAINTENANCE_RETENTION_SCHEDULES=86400", + "_APP_GRAPHQL_MAX_BATCH_SIZE=10", + "_APP_GRAPHQL_MAX_COMPLEXITY=250", + "_APP_GRAPHQL_MAX_DEPTH=3", + "_APP_MIGRATIONS_FIREBASE_CLIENT_ID=", + "_APP_MIGRATIONS_FIREBASE_CLIENT_SECRET=", + "_APP_ASSISTANT_OPENAI_API_KEY=", + ]; + + return { + domains, + envs, + mounts: [], + }; +} diff --git a/blueprints/aptabase/docker-compose.yml b/blueprints/aptabase/docker-compose.yml new file mode 100644 index 000000000..dfde1caef --- /dev/null +++ b/blueprints/aptabase/docker-compose.yml @@ -0,0 +1,49 @@ +services: + aptabase_db: + image: postgres:15-alpine + restart: always + volumes: + - db-data:/var/lib/postgresql/data + environment: + POSTGRES_USER: aptabase + POSTGRES_PASSWORD: sTr0NGp4ssw0rd + + healthcheck: + test: ["CMD-SHELL", "pg_isready -U aptabase"] + interval: 10s + timeout: 5s + retries: 5 + + aptabase_events_db: + image: clickhouse/clickhouse-server:23.8.16.16-alpine + restart: always + volumes: + - events-db-data:/var/lib/clickhouse + environment: + CLICKHOUSE_USER: aptabase + CLICKHOUSE_PASSWORD: sTr0NGp4ssw0rd + ulimits: + nofile: + soft: 262144 + hard: 262144 + + healthcheck: + test: ["CMD-SHELL", "curl -f http://localhost:8123 || exit 1"] + interval: 10s + timeout: 5s + retries: 5 + + aptabase: + image: ghcr.io/aptabase/aptabase:main + restart: always + environment: + BASE_URL: http://${APTABASE_HOST} + AUTH_SECRET: ${AUTH_SECRET} + DATABASE_URL: Server=aptabase_db;Port=5432;User Id=aptabase;Password=sTr0NGp4ssw0rd;Database=aptabase + CLICKHOUSE_URL: Host=aptabase_events_db;Port=8123;Username=aptabase;Password=sTr0NGp4ssw0rd + +volumes: + db-data: + driver: local + events-db-data: + driver: local diff --git a/blueprints/aptabase/index.ts b/blueprints/aptabase/index.ts new file mode 100644 index 000000000..38b077ae8 --- /dev/null +++ b/blueprints/aptabase/index.ts @@ -0,0 +1,27 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const authSecret = generateBase64(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 8080, + serviceName: "aptabase", + }, + ]; + + const envs = [`APTABASE_HOST=${mainDomain}`, `AUTH_SECRET=${authSecret}`]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/baserow/docker-compose.yml b/blueprints/baserow/docker-compose.yml new file mode 100644 index 000000000..db588e83a --- /dev/null +++ b/blueprints/baserow/docker-compose.yml @@ -0,0 +1,10 @@ +version: "3.8" +services: + baserow: + image: baserow/baserow:1.25.2 + environment: + BASEROW_PUBLIC_URL: "http://${BASEROW_HOST}" + volumes: + - baserow_data:/baserow/data +volumes: + baserow_data: diff --git a/blueprints/baserow/index.ts b/blueprints/baserow/index.ts new file mode 100644 index 000000000..fa57417cc --- /dev/null +++ b/blueprints/baserow/index.ts @@ -0,0 +1,24 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainHost = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainHost, + port: 80, + serviceName: "baserow", + }, + ]; + const envs = [`BASEROW_HOST=${mainHost}`]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/blender/docker-compose.yml b/blueprints/blender/docker-compose.yml new file mode 100644 index 000000000..893f3deea --- /dev/null +++ b/blueprints/blender/docker-compose.yml @@ -0,0 +1,26 @@ +version: "3.8" + +services: + blender: + image: lscr.io/linuxserver/blender:latest + runtime: nvidia + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: all + capabilities: + - gpu + environment: + - NVIDIA_VISIBLE_DEVICES=all + - NVIDIA_DRIVER_CAPABILITIES=all + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + - SUBFOLDER=/ #optional + ports: + - 3000 + - 3001 + restart: unless-stopped + shm_size: 1gb diff --git a/blueprints/blender/index.ts b/blueprints/blender/index.ts new file mode 100644 index 000000000..79508bed5 --- /dev/null +++ b/blueprints/blender/index.ts @@ -0,0 +1,34 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateHash, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const _mainServiceHash = generateHash(schema.projectName); + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3000, + serviceName: "blender", + }, + ]; + + const envs = [ + "PUID=1000", + "PGID=1000", + "TZ=Etc/UTC", + "SUBFOLDER=/", + "NVIDIA_VISIBLE_DEVICES=all", + "NVIDIA_DRIVER_CAPABILITIES=all", + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/browserless/docker-compose.yml b/blueprints/browserless/docker-compose.yml new file mode 100644 index 000000000..11d6d95f6 --- /dev/null +++ b/blueprints/browserless/docker-compose.yml @@ -0,0 +1,16 @@ +services: + browserless: + image: ghcr.io/browserless/chromium:v2.23.0 + environment: + TOKEN: ${BROWSERLESS_TOKEN} + expose: + - 3000 + healthcheck: + test: + - CMD + - curl + - '-f' + - 'http://127.0.0.1:3000/docs' + interval: 2s + timeout: 10s + retries: 15 diff --git a/blueprints/browserless/index.ts b/blueprints/browserless/index.ts new file mode 100644 index 000000000..f922e8639 --- /dev/null +++ b/blueprints/browserless/index.ts @@ -0,0 +1,28 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainHost = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainHost, + port: 3000, + serviceName: "browserless", + }, + ]; + const envs = [ + `BROWERLESS_HOST=${mainHost}`, + `BROWSERLESS_TOKEN=${generatePassword(16)}`, + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/budibase/docker-compose.yml b/blueprints/budibase/docker-compose.yml new file mode 100644 index 000000000..d1d6744af --- /dev/null +++ b/blueprints/budibase/docker-compose.yml @@ -0,0 +1,192 @@ +services: + apps: + image: budibase.docker.scarf.sh/budibase/apps:3.2.25 + restart: unless-stopped + + environment: + SELF_HOSTED: 1 + LOG_LEVEL: info + PORT: 4002 + INTERNAL_API_KEY: ${BB_INTERNAL_API_KEY} + API_ENCRYPTION_KEY: ${BB_API_ENCRYPTION_KEY} + JWT_SECRET: ${BB_JWT_SECRET} + MINIO_ACCESS_KEY: ${BB_MINIO_ACCESS_KEY} + MINIO_SECRET_KEY: ${BB_MINIO_SECRET_KEY} + MINIO_URL: http://minio:9000 + REDIS_URL: redis:6379 + REDIS_PASSWORD: ${BB_REDIS_PASSWORD} + WORKER_URL: http://worker:4003 + COUCH_DB_USERNAME: budibase + COUCH_DB_PASSWORD: ${BB_COUCHDB_PASSWORD} + COUCH_DB_URL: http://budibase:${BB_COUCHDB_PASSWORD}@couchdb:5984 + BUDIBASE_ENVIRONMENT: ${BUDIBASE_ENVIRONMENT:-PRODUCTION} + ENABLE_ANALYTICS: ${ENABLE_ANALYTICS:-true} + BB_ADMIN_USER_EMAIL: '' + BB_ADMIN_USER_PASSWORD: '' + depends_on: + worker: + condition: service_healthy + redis: + condition: service_healthy + healthcheck: + test: + - CMD + - wget + - '--spider' + - '-qO-' + - 'http://localhost:4002/health' + interval: 15s + timeout: 15s + retries: 5 + start_period: 10s + worker: + image: budibase.docker.scarf.sh/budibase/worker:3.2.25 + restart: unless-stopped + + environment: + SELF_HOSTED: 1 + LOG_LEVEL: info + PORT: 4003 + CLUSTER_PORT: 10000 + INTERNAL_API_KEY: ${BB_INTERNAL_API_KEY} + API_ENCRYPTION_KEY: ${BB_API_ENCRYPTION_KEY} + JWT_SECRET: ${BB_JWT_SECRET} + MINIO_ACCESS_KEY: ${BB_MINIO_ACCESS_KEY} + MINIO_SECRET_KEY: ${BB_MINIO_SECRET_KEY} + APPS_URL: http://apps:4002 + MINIO_URL: http://minio:9000 + REDIS_URL: redis:6379 + REDIS_PASSWORD: ${BB_REDIS_PASSWORD} + COUCH_DB_USERNAME: budibase + COUCH_DB_PASSWORD: ${BB_COUCHDB_PASSWORD} + COUCH_DB_URL: http://budibase:${BB_COUCHDB_PASSWORD}@couchdb:5984 + BUDIBASE_ENVIRONMENT: ${BUDIBASE_ENVIRONMENT:-PRODUCTION} + ENABLE_ANALYTICS: ${ENABLE_ANALYTICS:-true} + depends_on: + redis: + condition: service_healthy + minio: + condition: service_healthy + healthcheck: + test: + - CMD + - wget + - '--spider' + - '-qO-' + - 'http://localhost:4003/health' + interval: 15s + timeout: 15s + retries: 5 + start_period: 10s + minio: + image: minio/minio:RELEASE.2024-11-07T00-52-20Z + restart: unless-stopped + + volumes: + - 'minio_data:/data' + environment: + MINIO_ROOT_USER: ${BB_MINIO_ACCESS_KEY} + MINIO_ROOT_PASSWORD: ${BB_MINIO_SECRET_KEY} + MINIO_BROWSER: off + command: 'server /data --console-address ":9001"' + healthcheck: + test: + - CMD + - curl + - '-f' + - 'http://localhost:9000/minio/health/live' + interval: 30s + timeout: 20s + retries: 3 + proxy: + image: budibase/proxy:3.2.25 + restart: unless-stopped + + environment: + PROXY_RATE_LIMIT_WEBHOOKS_PER_SECOND: 10 + PROXY_RATE_LIMIT_API_PER_SECOND: 20 + APPS_UPSTREAM_URL: http://apps:4002 + WORKER_UPSTREAM_URL: http://worker:4003 + MINIO_UPSTREAM_URL: http://minio:9000 + COUCHDB_UPSTREAM_URL: http://couchdb:5984 + WATCHTOWER_UPSTREAM_URL: http://watchtower:8080 + RESOLVER: 127.0.0.11 + depends_on: + minio: + condition: service_healthy + worker: + condition: service_healthy + apps: + condition: service_healthy + couchdb: + condition: service_healthy + healthcheck: + test: + - CMD + - curl + - '-f' + - 'http://localhost:10000/' + interval: 15s + timeout: 15s + retries: 5 + start_period: 10s + couchdb: + image: budibase/couchdb:v3.3.3 + restart: unless-stopped + + environment: + COUCHDB_USER: budibase + COUCHDB_PASSWORD: ${BB_COUCHDB_PASSWORD} + TARGETBUILD: docker-compose + healthcheck: + test: + - CMD + - curl + - '-f' + - 'http://localhost:5984/' + interval: 15s + timeout: 15s + retries: 5 + start_period: 10s + volumes: + - 'couchdb3_data:/opt/couchdb/data' + redis: + image: redis:7.2-alpine + + restart: unless-stopped + command: 'redis-server --requirepass "${BB_REDIS_PASSWORD}"' + volumes: + - 'redis_data:/data' + healthcheck: + test: + - CMD + - redis-cli + - '-a' + - ${BB_REDIS_PASSWORD} + - ping + interval: 15s + timeout: 15s + retries: 5 + start_period: 10s + watchtower: + restart: unless-stopped + + image: containrrr/watchtower:1.7.1 + volumes: + - '/var/run/docker.sock:/var/run/docker.sock' + command: '--debug --http-api-update bbapps bbworker bbproxy' + environment: + WATCHTOWER_HTTP_API: true + WATCHTOWER_HTTP_API_TOKEN: ${BB_WATCHTOWER_PASSWORD} + WATCHTOWER_CLEANUP: true + labels: + - com.centurylinklabs.watchtower.enable=false + +networks: + dokploy-network: + external: true + +volumes: + minio_data: + couchdb3_data: + redis_data: \ No newline at end of file diff --git a/blueprints/budibase/index.ts b/blueprints/budibase/index.ts new file mode 100644 index 000000000..50bdfdba7 --- /dev/null +++ b/blueprints/budibase/index.ts @@ -0,0 +1,45 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const apiKey = generatePassword(32); + const encryptionKey = generatePassword(32); + const jwtSecret = generatePassword(32); + const couchDbPassword = generatePassword(32); + const redisPassword = generatePassword(32); + const minioAccessKey = generatePassword(32); + const minioSecretKey = generatePassword(32); + const watchtowerPassword = generatePassword(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 10000, + serviceName: "proxy", + }, + ]; + + const envs = [ + `BB_HOST=${mainDomain}`, + `BB_INTERNAL_API_KEY=${apiKey}`, + `BB_API_ENCRYPTION_KEY=${encryptionKey}`, + `BB_JWT_SECRET=${jwtSecret}`, + `BB_COUCHDB_PASSWORD=${couchDbPassword}`, + `BB_REDIS_PASSWORD=${redisPassword}`, + `BB_WATCHTOWER_PASSWORD=${watchtowerPassword}`, + `BB_MINIO_ACCESS_KEY=${minioAccessKey}`, + `BB_MINIO_SECRET_KEY=${minioSecretKey}`, + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/calcom/docker-compose.yml b/blueprints/calcom/docker-compose.yml new file mode 100644 index 000000000..a309a1da4 --- /dev/null +++ b/blueprints/calcom/docker-compose.yml @@ -0,0 +1,25 @@ +services: + postgres: + image: postgres:16-alpine + + volumes: + - calcom-data:/var/lib/postgresql/data + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=password + - POSTGRES_DB=db + - DATABASE_URL=postgres://postgres:password@postgres:5432/db + + calcom: + image: calcom/cal.com:v2.7.6 + depends_on: + - postgres + environment: + - NEXTAUTH_SECRET=${NEXTAUTH_SECRET} + - CALENDSO_ENCRYPTION_KEY=${CALENDSO_ENCRYPTION_KEY} + - DATABASE_URL=postgres://postgres:password@postgres:5432/db + - NEXT_PUBLIC_WEBAPP_URL=http://${CALCOM_HOST} + - NEXTAUTH_URL=http://${CALCOM_HOST}/api/auth + +volumes: + calcom-data: diff --git a/blueprints/calcom/index.ts b/blueprints/calcom/index.ts new file mode 100644 index 000000000..d359e9c7c --- /dev/null +++ b/blueprints/calcom/index.ts @@ -0,0 +1,32 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const calcomEncryptionKey = generateBase64(32); + const nextAuthSecret = generateBase64(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3000, + serviceName: "calcom", + }, + ]; + + const envs = [ + `CALCOM_HOST=${mainDomain}`, + `NEXTAUTH_SECRET=${nextAuthSecret}`, + `CALENDSO_ENCRYPTION_KEY=${calcomEncryptionKey}`, + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/chatwoot/docker-compose.yml b/blueprints/chatwoot/docker-compose.yml new file mode 100644 index 000000000..b24ca0b56 --- /dev/null +++ b/blueprints/chatwoot/docker-compose.yml @@ -0,0 +1,74 @@ +version: '3' + +x-base-config: &base-config + image: chatwoot/chatwoot:v3.14.1 + volumes: + - chatwoot-storage:/app/storage + networks: + - dokploy-network + environment: + - FRONTEND_URL=${FRONTEND_URL} + - SECRET_KEY_BASE=${SECRET_KEY_BASE} + - RAILS_ENV=${RAILS_ENV} + - NODE_ENV=${NODE_ENV} + - INSTALLATION_ENV=${INSTALLATION_ENV} + - RAILS_LOG_TO_STDOUT=${RAILS_LOG_TO_STDOUT} + - LOG_LEVEL=${LOG_LEVEL} + - DEFAULT_LOCALE=${DEFAULT_LOCALE} + - POSTGRES_HOST=${POSTGRES_HOST} + - POSTGRES_PORT=${POSTGRES_PORT} + - POSTGRES_DATABASE=${POSTGRES_DATABASE} + - POSTGRES_USERNAME=${POSTGRES_USERNAME} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + - REDIS_URL=${REDIS_URL} + - ENABLE_ACCOUNT_SIGNUP=${ENABLE_ACCOUNT_SIGNUP} + - ACTIVE_STORAGE_SERVICE=${ACTIVE_STORAGE_SERVICE} + +services: + chatwoot-rails: + <<: *base-config + depends_on: + chatwoot-postgres: + condition: service_started + chatwoot-redis: + condition: service_started + entrypoint: docker/entrypoints/rails.sh + command: ['bundle', 'exec', 'sh', '-c', 'rails db:chatwoot_prepare && rails s -p 3000 -b 0.0.0.0'] + restart: always + + chatwoot-sidekiq: + <<: *base-config + depends_on: + chatwoot-postgres: + condition: service_started + chatwoot-redis: + condition: service_started + command: ['bundle', 'exec', 'sidekiq', '-C', 'config/sidekiq.yml'] + restart: always + + chatwoot-postgres: + image: postgres:12 + restart: always + volumes: + - chatwoot-postgres-data:/var/lib/postgresql/data + + environment: + - POSTGRES_DB=${POSTGRES_DATABASE} + - POSTGRES_USER=${POSTGRES_USERNAME} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + + chatwoot-redis: + image: redis:alpine + restart: always + volumes: + - chatwoot-redis-data:/data + + +networks: + dokploy-network: + external: true + +volumes: + chatwoot-storage: + chatwoot-postgres-data: + chatwoot-redis-data: \ No newline at end of file diff --git a/blueprints/chatwoot/index.ts b/blueprints/chatwoot/index.ts new file mode 100644 index 000000000..9c5e44c6b --- /dev/null +++ b/blueprints/chatwoot/index.ts @@ -0,0 +1,46 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const secretKeyBase = generateBase64(64); + const postgresPassword = generatePassword(); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3000, + serviceName: "chatwoot-rails", + }, + ]; + + const envs = [ + `FRONTEND_URL=http://${mainDomain}`, + `SECRET_KEY_BASE=${secretKeyBase}`, + "RAILS_ENV=production", + "NODE_ENV=production", + "INSTALLATION_ENV=docker", + "RAILS_LOG_TO_STDOUT=true", + "LOG_LEVEL=info", + "DEFAULT_LOCALE=en", + "POSTGRES_HOST=chatwoot-postgres", + "POSTGRES_PORT=5432", + "POSTGRES_DATABASE=chatwoot", + "POSTGRES_USERNAME=postgres", + `POSTGRES_PASSWORD=${postgresPassword}`, + "REDIS_URL=redis://chatwoot-redis:6379", + "ENABLE_ACCOUNT_SIGNUP=false", + "ACTIVE_STORAGE_SERVICE=local", + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/checkmate/docker-compose.yml b/blueprints/checkmate/docker-compose.yml new file mode 100644 index 000000000..7a5fc8984 --- /dev/null +++ b/blueprints/checkmate/docker-compose.yml @@ -0,0 +1,42 @@ +services: + client: + image: bluewaveuptime/uptime_client:latest + restart: always + environment: + UPTIME_APP_API_BASE_URL: "http://${DOMAIN}/api/v1" + ports: + - 80 + - 443 + depends_on: + - server + + server: + image: bluewaveuptime/uptime_server:latest + restart: always + ports: + - 5000 + depends_on: + - redis + - mongodb + environment: + - DB_CONNECTION_STRING=mongodb://mongodb:27017/uptime_db + - REDIS_HOST=redis + + # volumes: + # - /var/run/docker.sock:/var/run/docker.sock:ro + redis: + image: bluewaveuptime/uptime_redis:latest + restart: always + ports: + - 6379 + volumes: + - ../files/redis/data:/data + + mongodb: + image: bluewaveuptime/uptime_database_mongo:latest + restart: always + volumes: + - ../files/mongo/data:/data/db + command: ["mongod", "--quiet"] + ports: + - 27017 diff --git a/blueprints/checkmate/index.ts b/blueprints/checkmate/index.ts new file mode 100644 index 000000000..e52370aae --- /dev/null +++ b/blueprints/checkmate/index.ts @@ -0,0 +1,25 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const envs = [`DOMAIN=${mainDomain}`]; + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 80, + serviceName: "client", + }, + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/cloudflared/docker-compose.yml b/blueprints/cloudflared/docker-compose.yml new file mode 100644 index 000000000..d9fc27419 --- /dev/null +++ b/blueprints/cloudflared/docker-compose.yml @@ -0,0 +1,18 @@ +services: + cloudflared: + image: 'cloudflare/cloudflared:latest' + environment: + # Don't forget to set this in your Dokploy Environment + - 'TUNNEL_TOKEN=${CLOUDFLARE_TUNNEL_TOKEN}' + network_mode: host + restart: unless-stopped + command: [ + "tunnel", + + # More tunnel run parameters here: + # https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/configure-tunnels/tunnel-run-parameters/ + "--no-autoupdate", + #"--protocol", "http2", + + "run" + ] diff --git a/blueprints/cloudflared/index.ts b/blueprints/cloudflared/index.ts new file mode 100644 index 000000000..93ea091c6 --- /dev/null +++ b/blueprints/cloudflared/index.ts @@ -0,0 +1,9 @@ +import type { Schema, Template } from "../utils"; + +export function generate(_schema: Schema): Template { + const envs = [`CLOUDFLARE_TUNNEL_TOKEN=""`]; + + return { + envs, + }; +} diff --git a/blueprints/coder/docker-compose.yml b/blueprints/coder/docker-compose.yml new file mode 100644 index 000000000..875c7ae81 --- /dev/null +++ b/blueprints/coder/docker-compose.yml @@ -0,0 +1,37 @@ +services: + coder: + image: ghcr.io/coder/coder:v2.15.3 + + volumes: + - /var/run/docker.sock:/var/run/docker.sock + group_add: + - "998" + depends_on: + db: + condition: service_healthy + environment: + - CODER_ACCESS_URL + - CODER_HTTP_ADDRESS + - CODER_PG_CONNECTION_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db/${POSTGRES_DB}?sslmode=disable + + db: + image: postgres:17 + + environment: + - POSTGRES_PASSWORD + - POSTGRES_USER + - POSTGRES_DB + healthcheck: + test: + [ + "CMD-SHELL", + "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}", + ] + interval: 5s + timeout: 5s + retries: 5 + volumes: + - db_coder_data:/var/lib/postgresql/data + +volumes: + db_coder_data: \ No newline at end of file diff --git a/blueprints/coder/index.ts b/blueprints/coder/index.ts new file mode 100644 index 000000000..c3f066d63 --- /dev/null +++ b/blueprints/coder/index.ts @@ -0,0 +1,30 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 7080, + serviceName: "coder", + }, + ]; + + const envs = [ + "CODER_ACCESS_URL=", + "CODER_HTTP_ADDRESS=0.0.0.0:7080", + "", + "POSTGRES_DB=coder", + "POSTGRES_USER=coder", + "POSTGRES_PASSWORD=VERY_STRONG_PASSWORD", + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/conduit/docker-compose.yml b/blueprints/conduit/docker-compose.yml new file mode 100644 index 000000000..f2f1fb340 --- /dev/null +++ b/blueprints/conduit/docker-compose.yml @@ -0,0 +1,31 @@ +# From Conduit's official documentation: https://docs.conduit.rs/deploying/docker.html#docker-compose +version: '3' + +services: + homeserver: + image: registry.gitlab.com/famedly/conduit/matrix-conduit:v0.9.0 + restart: unless-stopped + volumes: + - db:/var/lib/matrix-conduit/ + networks: + - dokploy-network + environment: + CONDUIT_SERVER_NAME: ${MATRIX_SUBDOMAIN} + CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ + CONDUIT_DATABASE_BACKEND: rocksdb + CONDUIT_PORT: 6167 + CONDUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB + CONDUIT_ALLOW_REGISTRATION: 'true' + #CONDUIT_REGISTRATION_TOKEN: '' # require password for registration + CONDUIT_ALLOW_FEDERATION: 'true' + CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' + CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 + CONDUIT_ADDRESS: 0.0.0.0 + CONDUIT_CONFIG: '' # Ignore this +volumes: + db: + +networks: + dokploy-network: + external: true diff --git a/blueprints/conduit/index.ts b/blueprints/conduit/index.ts new file mode 100644 index 000000000..2b9a81ae9 --- /dev/null +++ b/blueprints/conduit/index.ts @@ -0,0 +1,28 @@ +// EXAMPLE +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const matrixSubdomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: matrixSubdomain, + port: 6167, + serviceName: "homeserver", + }, + ]; + + const envs = [ + `MATRIX_SUBDOMAIN=${matrixSubdomain} # Replace by your server name`, + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/conduwuit/docker-compose.yml b/blueprints/conduwuit/docker-compose.yml new file mode 100644 index 000000000..7945d6c97 --- /dev/null +++ b/blueprints/conduwuit/docker-compose.yml @@ -0,0 +1,48 @@ +# conduwuit +# https://conduwuit.puppyirl.gay/deploying/docker-compose.yml + +services: + homeserver: + image: girlbossceo/conduwuit:latest + restart: unless-stopped + ports: + - 8448:6167 + volumes: + - db:/var/lib/conduwuit + #- ./conduwuit.toml:/etc/conduwuit.toml + environment: + # Edit this in your Dokploy Environment + CONDUWUIT_SERVER_NAME: ${CONDUWUIT_SERVER_NAME} + + CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit + CONDUWUIT_PORT: 6167 + CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB + + CONDUWUIT_ALLOW_REGISTRATION: 'true' + CONDUWUIT_REGISTRATION_TOKEN: ${CONDUWUIT_REGISTRATION_TOKEN} + + CONDUWUIT_ALLOW_FEDERATION: 'true' + CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' + CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' + #CONDUWUIT_LOG: warn,state_res=warn + CONDUWUIT_ADDRESS: 0.0.0.0 + + # Uncomment if you mapped config toml in volumes + #CONDUWUIT_CONFIG: '/etc/conduwuit.toml' + + ### Uncomment if you want to use your own Element-Web App. + ### Note: You need to provide a config.json for Element and you also need a second + ### Domain or Subdomain for the communication between Element and conduwuit + ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md + # element-web: + # image: vectorim/element-web:latest + # restart: unless-stopped + # ports: + # - 8009:80 + # volumes: + # - ./element_config.json:/app/config.json + # depends_on: + # - homeserver + +volumes: + db: diff --git a/blueprints/conduwuit/index.ts b/blueprints/conduwuit/index.ts new file mode 100644 index 000000000..9d9e98569 --- /dev/null +++ b/blueprints/conduwuit/index.ts @@ -0,0 +1,30 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const matrixSubdomain = generateRandomDomain(schema); + const registrationToken = generatePassword(20); + + const domains: DomainSchema[] = [ + { + host: matrixSubdomain, + port: 6167, + serviceName: "homeserver", + }, + ]; + + const envs = [ + `CONDUWUIT_SERVER_NAME=${matrixSubdomain}`, + `CONDUWUIT_REGISTRATION_TOKEN=${registrationToken}`, + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/convex/docker-compose.yml b/blueprints/convex/docker-compose.yml new file mode 100644 index 000000000..12e2b5ada --- /dev/null +++ b/blueprints/convex/docker-compose.yml @@ -0,0 +1,37 @@ +services: + backend: + image: ghcr.io/get-convex/convex-backend:6c974d219776b753cd23d26f4a296629ff7c2cad + ports: + - "${PORT:-3210}:3210" + - "${SITE_PROXY_PORT:-3211}:3211" + volumes: + - data:/convex/data + environment: + - INSTANCE_NAME=${INSTANCE_NAME:-} + - INSTANCE_SECRET=${INSTANCE_SECRET:-} + - CONVEX_RELEASE_VERSION_DEV=${CONVEX_RELEASE_VERSION_DEV:-} + - ACTIONS_USER_TIMEOUT_SECS=${ACTIONS_USER_TIMEOUT_SECS:-} + - CONVEX_CLOUD_ORIGIN=${CONVEX_CLOUD_ORIGIN:-http://127.0.0.1:3210} + - CONVEX_SITE_ORIGIN=${CONVEX_SITE_ORIGIN:-http://127.0.0.1:3211} + - DATABASE_URL=${DATABASE_URL:-} + - DISABLE_BEACON=${DISABLE_BEACON:-} + - REDACT_LOGS_TO_CLIENT=${REDACT_LOGS_TO_CLIENT:-} + - RUST_LOG=${RUST_LOG:-info} + - RUST_BACKTRACE=${RUST_BACKTRACE:-} + healthcheck: + test: curl -f http://localhost:3210/version + interval: 5s + start_period: 5s + + dashboard: + image: ghcr.io/get-convex/convex-dashboard:4499dd4fd7f2148687a7774599c613d052950f46 + ports: + - "${DASHBOARD_PORT:-6791}:6791" + environment: + - NEXT_PUBLIC_DEPLOYMENT_URL=${NEXT_PUBLIC_DEPLOYMENT_URL:-http://127.0.0.1:3210} + depends_on: + backend: + condition: service_healthy + +volumes: + data: diff --git a/blueprints/convex/index.ts b/blueprints/convex/index.ts new file mode 100644 index 000000000..badfe7320 --- /dev/null +++ b/blueprints/convex/index.ts @@ -0,0 +1,38 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const dashboardDomain = generateRandomDomain(schema); + const backendDomain = generateRandomDomain(schema); + const actionsDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: dashboardDomain, + port: 6791, + serviceName: "dashboard", + }, + { + host: backendDomain, + port: 3210, + serviceName: "backend", + }, + { + host: actionsDomain, + port: 3211, + serviceName: "backend", + }, + ]; + + const envs = [ + `NEXT_PUBLIC_DEPLOYMENT_URL=http://${backendDomain}`, + `CONVEX_CLOUD_ORIGIN=http://${backendDomain}`, + `CONVEX_SITE_ORIGIN=http://${actionsDomain}`, + ]; + + return { envs, domains }; +} diff --git a/blueprints/couchdb/docker-compose.yml b/blueprints/couchdb/docker-compose.yml new file mode 100644 index 000000000..cb00bf69d --- /dev/null +++ b/blueprints/couchdb/docker-compose.yml @@ -0,0 +1,17 @@ +version: '3.8' + +services: + couchdb: + image: couchdb:latest + ports: + - '5984' + volumes: + - couchdb-data:/opt/couchdb/data + environment: + - COUCHDB_USER=${COUCHDB_USER} + - COUCHDB_PASSWORD=${COUCHDB_PASSWORD} + restart: unless-stopped + +volumes: + couchdb-data: + driver: local diff --git a/blueprints/couchdb/index.ts b/blueprints/couchdb/index.ts new file mode 100644 index 000000000..70d716695 --- /dev/null +++ b/blueprints/couchdb/index.ts @@ -0,0 +1,28 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const username = generatePassword(16); + const password = generatePassword(32); + + const domains: DomainSchema[] = [ + { + serviceName: "couchdb", + host: mainDomain, + port: 5984, + }, + ]; + + const envs = [`COUCHDB_USER=${username}`, `COUCHDB_PASSWORD=${password}`]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/datalens/docker-compose.yml b/blueprints/datalens/docker-compose.yml new file mode 100644 index 000000000..94839e04b --- /dev/null +++ b/blueprints/datalens/docker-compose.yml @@ -0,0 +1,96 @@ +services: + pg-compeng: + image: postgres:16-alpine + restart: always + environment: + POSTGRES_PASSWORD: "postgres" + POSTGRES_DB: postgres + POSTGRES_USER: postgres + + control-api: + image: ghcr.io/datalens-tech/datalens-control-api:0.2192.0 + restart: always + environment: + BI_API_UWSGI_WORKERS_COUNT: 4 + CONNECTOR_AVAILABILITY_VISIBLE: "clickhouse,postgres,chyt,ydb,mysql,greenplum,mssql,appmetrica_api,metrika_api" + RQE_FORCE_OFF: 1 + DL_CRY_ACTUAL_KEY_ID: key_1 + DL_CRY_KEY_VAL_ID_key_1: "h1ZpilcYLYRdWp7Nk8X1M1kBPiUi8rdjz9oBfHyUKIk=" + RQE_SECRET_KEY: "" + US_HOST: "http://us:8083" + US_MASTER_TOKEN: "fake-us-master-token" + depends_on: + - us + + data-api: + container_name: datalens-data-api + image: ghcr.io/datalens-tech/datalens-data-api:0.2192.0 + restart: always + environment: + GUNICORN_WORKERS_COUNT: 5 + RQE_FORCE_OFF: 1 + CACHES_ON: 0 + MUTATIONS_CACHES_ON: 0 + RQE_SECRET_KEY: "" + DL_CRY_ACTUAL_KEY_ID: key_1 + DL_CRY_KEY_VAL_ID_key_1: "h1ZpilcYLYRdWp7Nk8X1M1kBPiUi8rdjz9oBfHyUKIk=" + BI_COMPENG_PG_ON: 1 + BI_COMPENG_PG_URL: "postgresql://postgres:postgres@pg-compeng:5432/postgres" + US_HOST: "http://us:8083" + US_MASTER_TOKEN: "fake-us-master-token" + depends_on: + - us + - pg-compeng + + pg-us: + container_name: datalens-pg-us + image: postgres:16-alpine + restart: always + environment: + POSTGRES_DB: us-db-ci_purgeable + POSTGRES_USER: us + POSTGRES_PASSWORD: us + volumes: + - ${VOLUME_US:-./metadata}:/var/lib/postgresql/data + + us: + image: ghcr.io/datalens-tech/datalens-us:0.310.0 + restart: always + depends_on: + - pg-us + environment: + APP_INSTALLATION: "opensource" + APP_ENV: "prod" + MASTER_TOKEN: "fake-us-master-token" + POSTGRES_DSN_LIST: ${METADATA_POSTGRES_DSN_LIST:-postgres://us:us@pg-us:5432/us-db-ci_purgeable} + SKIP_INSTALL_DB_EXTENSIONS: ${METADATA_SKIP_INSTALL_DB_EXTENSIONS:-0} + USE_DEMO_DATA: ${USE_DEMO_DATA:-0} + HC: ${HC:-0} + NODE_EXTRA_CA_CERTS: /certs/root.crt + extra_hosts: + - "host.docker.internal:host-gateway" + volumes: + - ./certs:/certs + + datalens: + image: ghcr.io/datalens-tech/datalens-ui:0.2601.0 + restart: always + ports: + - ${UI_PORT:-8080}:8080 + depends_on: + - us + - control-api + - data-api + environment: + APP_MODE: "full" + APP_ENV: "production" + APP_INSTALLATION: "opensource" + AUTH_POLICY: "disabled" + US_ENDPOINT: "http://us:8083" + BI_API_ENDPOINT: "http://control-api:8080" + BI_DATA_ENDPOINT: "http://data-api:8080" + US_MASTER_TOKEN: "fake-us-master-token" + NODE_EXTRA_CA_CERTS: "/usr/local/share/ca-certificates/cert.pem" + HC: ${HC:-0} + YANDEX_MAP_ENABLED: ${YANDEX_MAP_ENABLED:-0} + YANDEX_MAP_TOKEN: ${YANDEX_MAP_TOKEN:-0} diff --git a/blueprints/datalens/index.ts b/blueprints/datalens/index.ts new file mode 100644 index 000000000..f2efb76b3 --- /dev/null +++ b/blueprints/datalens/index.ts @@ -0,0 +1,23 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 8080, + serviceName: "datalens", + }, + ]; + + const envs = ["HC=1"]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/directus/docker-compose.yml b/blueprints/directus/docker-compose.yml new file mode 100644 index 000000000..52e64baf6 --- /dev/null +++ b/blueprints/directus/docker-compose.yml @@ -0,0 +1,64 @@ +services: + database: + image: postgis/postgis:13-master + volumes: + - directus_database:/var/lib/postgresql/data + + environment: + POSTGRES_USER: "directus" + POSTGRES_PASSWORD: ${DATABASE_PASSWORD} + POSTGRES_DB: "directus" + healthcheck: + test: ["CMD", "pg_isready", "--host=localhost", "--username=directus"] + interval: 10s + timeout: 5s + retries: 5 + start_interval: 5s + start_period: 30s + + cache: + image: redis:6 + healthcheck: + test: ["CMD-SHELL", "[ $$(redis-cli ping) = 'PONG' ]"] + interval: 10s + timeout: 5s + retries: 5 + start_interval: 5s + start_period: 30s + + + directus: + image: directus/directus:11.0.2 + ports: + - 8055 + volumes: + - directus_uploads:/directus/uploads + - directus_extensions:/directus/extensions + depends_on: + database: + condition: service_healthy + cache: + condition: service_healthy + environment: + SECRET: ${DIRECTUS_SECRET} + + DB_CLIENT: "pg" + DB_HOST: "database" + DB_PORT: "5432" + DB_DATABASE: "directus" + DB_USER: "directus" + DB_PASSWORD: ${DATABASE_PASSWORD} + + CACHE_ENABLED: "true" + CACHE_AUTO_PURGE: "true" + CACHE_STORE: "redis" + REDIS: "redis://cache:6379" + + # After first successful login, remove the admin email/password env. variables below + # as these will now be stored in the database. + ADMIN_EMAIL: "admin@example.com" + ADMIN_PASSWORD: "d1r3ctu5" +volumes: + directus_uploads: + directus_extensions: + directus_database: \ No newline at end of file diff --git a/blueprints/directus/index.ts b/blueprints/directus/index.ts new file mode 100644 index 000000000..37d03e7b2 --- /dev/null +++ b/blueprints/directus/index.ts @@ -0,0 +1,31 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const directusSecret = generateBase64(64); + const databasePassword = generatePassword(); + + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 8055, + serviceName: "directus", + }, + ]; + + const envs = [ + `DATABASE_PASSWORD=${databasePassword}`, + `DIRECTUS_SECRET=${directusSecret}`, + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/discord-tickets/docker-compose.yml b/blueprints/discord-tickets/docker-compose.yml new file mode 100644 index 000000000..f797a77b0 --- /dev/null +++ b/blueprints/discord-tickets/docker-compose.yml @@ -0,0 +1,54 @@ +version: "3.8" + +services: + tickets-postgres: + image: mysql:8 + restart: unless-stopped + + volumes: + - tickets-mysql-data:/var/lib/mysql + environment: + MYSQL_DATABASE: ${MYSQL_DATABASE} + MYSQL_PASSWORD: ${MYSQL_PASSWORD} + MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD} + MYSQL_USER: ${MYSQL_USER} + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u${MYSQL_USER}", "-p${MYSQL_PASSWORD}"] + interval: 10s + timeout: 5s + retries: 5 + + tickets-app: + image: eartharoid/discord-tickets:4.0.21 + depends_on: + tickets-postgres: + condition: service_healthy + restart: unless-stopped + + volumes: + - tickets-app-data:/home/container/user + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + tty: true + stdin_open: true + environment: + DB_CONNECTION_URL: mysql://${MYSQL_USER}:${MYSQL_PASSWORD}@tickets-postgres/${MYSQL_DATABASE} + DISCORD_SECRET: ${DISCORD_SECRET} + DISCORD_TOKEN: ${DISCORD_TOKEN} + ENCRYPTION_KEY: ${ENCRYPTION_KEY} + DB_PROVIDER: mysql + HTTP_EXTERNAL: https://${TICKETS_HOST} + HTTP_HOST: 0.0.0.0 + HTTP_PORT: 8169 + HTTP_TRUST_PROXY: "true" + PUBLIC_BOT: "false" + PUBLISH_COMMANDS: "true" + SUPER: ${SUPER_USERS} + +networks: + dokploy-network: + external: true + +volumes: + tickets-mysql-data: + tickets-app-data: \ No newline at end of file diff --git a/blueprints/discord-tickets/index.ts b/blueprints/discord-tickets/index.ts new file mode 100644 index 000000000..e11b78e66 --- /dev/null +++ b/blueprints/discord-tickets/index.ts @@ -0,0 +1,45 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const mysqlPassword = generatePassword(); + const mysqlRootPassword = generatePassword(); + const mysqlUser = "tickets"; + const mysqlDatabase = "tickets"; + + const encryptionKey = Array.from({ length: 48 }, () => + Math.floor(Math.random() * 16).toString(16), + ).join(""); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 8169, + serviceName: "tickets-app", + }, + ]; + + const envs = [ + `TICKETS_HOST=${mainDomain}`, + `MYSQL_DATABASE=${mysqlDatabase}`, + `MYSQL_PASSWORD=${mysqlPassword}`, + `MYSQL_ROOT_PASSWORD=${mysqlRootPassword}`, + `MYSQL_USER=${mysqlUser}`, + `ENCRYPTION_KEY=${encryptionKey}`, + "# Follow the guide at: https://discordtickets.app/self-hosting/installation/docker/#creating-the-discord-application", + "DISCORD_SECRET=", + "DISCORD_TOKEN=", + "SUPER_USERS=YOUR_DISCORD_USER_ID", // Default super user + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/discourse/docker-compose.yml b/blueprints/discourse/docker-compose.yml new file mode 100644 index 000000000..2b938b855 --- /dev/null +++ b/blueprints/discourse/docker-compose.yml @@ -0,0 +1,90 @@ +version: '3.7' + +services: + discourse-db: + image: docker.io/bitnami/postgresql:17 + + volumes: + - discourse-postgresql-data:/bitnami/postgresql + environment: + POSTGRESQL_USERNAME: bn_discourse + POSTGRESQL_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRESQL_DATABASE: bitnami_discourse + healthcheck: + test: ["CMD-SHELL", "pg_isready -U bn_discourse -d bitnami_discourse"] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + + discourse-redis: + image: docker.io/bitnami/redis:7.4 + + volumes: + - discourse-redis-data:/bitnami/redis + environment: + REDIS_PASSWORD: ${REDIS_PASSWORD} + healthcheck: + test: ["CMD", "redis-cli", "-a", "${REDIS_PASSWORD}", "ping"] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + + discourse-app: + image: docker.io/bitnami/discourse:3.3.2 + + volumes: + - discourse-data:/bitnami/discourse + depends_on: + discourse-db: + condition: service_healthy + discourse-redis: + condition: service_healthy + environment: + DISCOURSE_HOST: ${DISCOURSE_HOST} + DISCOURSE_DATABASE_HOST: discourse-db + DISCOURSE_DATABASE_PORT_NUMBER: 5432 + DISCOURSE_DATABASE_USER: bn_discourse + DISCOURSE_DATABASE_PASSWORD: ${POSTGRES_PASSWORD} + DISCOURSE_DATABASE_NAME: bitnami_discourse + DISCOURSE_REDIS_HOST: discourse-redis + DISCOURSE_REDIS_PORT_NUMBER: 6379 + DISCOURSE_REDIS_PASSWORD: ${REDIS_PASSWORD} + # Optional: Configure SMTP for email delivery + # DISCOURSE_SMTP_HOST: ${SMTP_HOST} + # DISCOURSE_SMTP_PORT: ${SMTP_PORT} + # DISCOURSE_SMTP_USER: ${SMTP_USER} + # DISCOURSE_SMTP_PASSWORD: ${SMTP_PASSWORD} + restart: unless-stopped + + discourse-sidekiq: + image: docker.io/bitnami/discourse:3.3.2 + + volumes: + - discourse-sidekiq-data:/bitnami/discourse + depends_on: + - discourse-app + command: /opt/bitnami/scripts/discourse-sidekiq/run.sh + environment: + DISCOURSE_HOST: ${DISCOURSE_HOST} + DISCOURSE_DATABASE_HOST: discourse-db + DISCOURSE_DATABASE_PORT_NUMBER: 5432 + DISCOURSE_DATABASE_USER: bn_discourse + DISCOURSE_DATABASE_PASSWORD: ${POSTGRES_PASSWORD} + DISCOURSE_DATABASE_NAME: bitnami_discourse + DISCOURSE_REDIS_HOST: discourse-redis + DISCOURSE_REDIS_PORT_NUMBER: 6379 + DISCOURSE_REDIS_PASSWORD: ${REDIS_PASSWORD} + # Optional: Configure SMTP for email delivery + # DISCOURSE_SMTP_HOST: ${SMTP_HOST} + # DISCOURSE_SMTP_PORT: ${SMTP_PORT} + # DISCOURSE_SMTP_USER: ${SMTP_USER} + # DISCOURSE_SMTP_PASSWORD: ${SMTP_PASSWORD} + restart: unless-stopped + +volumes: + discourse-postgresql-data: + discourse-redis-data: + discourse-data: + discourse-sidekiq-data: \ No newline at end of file diff --git a/blueprints/discourse/index.ts b/blueprints/discourse/index.ts new file mode 100644 index 000000000..bf99e8d05 --- /dev/null +++ b/blueprints/discourse/index.ts @@ -0,0 +1,37 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const postgresPassword = generatePassword(); + const redisPassword = generatePassword(); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3000, + serviceName: "discourse-app", + }, + ]; + + const envs = [ + `DISCOURSE_HOST=${mainDomain}`, + `POSTGRES_PASSWORD=${postgresPassword}`, + `REDIS_PASSWORD=${redisPassword}`, + "# Optional: Configure SMTP for email delivery", + "# SMTP_HOST=smtp.example.com", + "# SMTP_PORT=587", + "# SMTP_USER=your_smtp_user", + "# SMTP_PASSWORD=your_smtp_password", + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/docmost/docker-compose.yml b/blueprints/docmost/docker-compose.yml new file mode 100644 index 000000000..b5995594b --- /dev/null +++ b/blueprints/docmost/docker-compose.yml @@ -0,0 +1,44 @@ +version: "3" + +services: + docmost: + image: docmost/docmost:0.4.1 + depends_on: + - db + - redis + environment: + - APP_URL + - APP_SECRET + - DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB}?schema=public + - REDIS_URL=redis://redis:6379 + restart: unless-stopped + + volumes: + - docmost:/app/data/storage + + db: + image: postgres:16-alpine + environment: + - POSTGRES_DB + - POSTGRES_USER + - POSTGRES_PASSWORD + restart: unless-stopped + + volumes: + - db_docmost_data:/var/lib/postgresql/data + + redis: + image: redis:7.2-alpine + restart: unless-stopped + + volumes: + - redis_docmost_data:/data + +networks: + dokploy-network: + external: true + +volumes: + docmost: + db_docmost_data: + redis_docmost_data: \ No newline at end of file diff --git a/blueprints/docmost/index.ts b/blueprints/docmost/index.ts new file mode 100644 index 000000000..16f7afa66 --- /dev/null +++ b/blueprints/docmost/index.ts @@ -0,0 +1,29 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 3000, + serviceName: "docmost", + }, + ]; + + const envs = [ + "POSTGRES_DB=docmost", + "POSTGRES_USER=docmost", + "POSTGRES_PASSWORD=STRONG_DB_PASSWORD", + "APP_URL=http://localhost:3000", + "APP_SECRET=VERY_STRONG_SECRET", + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/documenso/docker-compose.yml b/blueprints/documenso/docker-compose.yml new file mode 100644 index 000000000..9b8e8ed87 --- /dev/null +++ b/blueprints/documenso/docker-compose.yml @@ -0,0 +1,42 @@ +version: "3.8" +services: + postgres: + image: postgres:16 + + volumes: + - documenso-data:/var/lib/postgresql/data + environment: + - POSTGRES_USER=documenso + - POSTGRES_PASSWORD=password + - POSTGRES_DB=documenso + healthcheck: + test: ["CMD-SHELL", "pg_isready -U documenso"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 10s + + documenso: + image: documenso/documenso:v1.5.6-rc.2 + depends_on: + postgres: + condition: service_healthy + environment: + - PORT=${DOCUMENSO_PORT} + - NEXTAUTH_URL=http://${DOCUMENSO_HOST} + - NEXTAUTH_SECRET=${NEXTAUTH_SECRET} + - NEXT_PRIVATE_ENCRYPTION_KEY=${NEXT_PRIVATE_ENCRYPTION_KEY} + - NEXT_PRIVATE_ENCRYPTION_SECONDARY_KEY=${NEXT_PRIVATE_ENCRYPTION_SECONDARY_KEY} + - NEXT_PUBLIC_WEBAPP_URL=http://${DOCUMENSO_HOST} + - NEXT_PRIVATE_DATABASE_URL=postgres://documenso:password@postgres:5432/documenso + - NEXT_PRIVATE_DIRECT_DATABASE_URL=postgres://documenso:password@postgres:5432/documenso + - NEXT_PUBLIC_UPLOAD_TRANSPORT=database + - NEXT_PRIVATE_SMTP_TRANSPORT=smtp-auth + - NEXT_PRIVATE_SIGNING_LOCAL_FILE_PATH=/opt/documenso/cert.p12 + ports: + - ${DOCUMENSO_PORT} + volumes: + - /opt/documenso/cert.p12:/opt/documenso/cert.p12 + +volumes: + documenso-data: diff --git a/blueprints/documenso/index.ts b/blueprints/documenso/index.ts new file mode 100644 index 000000000..c70d1db4a --- /dev/null +++ b/blueprints/documenso/index.ts @@ -0,0 +1,36 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const nextAuthSecret = generateBase64(32); + const documensoEncryptionKey = generatePassword(32); + const documensoSecondaryEncryptionKey = generatePassword(64); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3000, + serviceName: "documenso", + }, + ]; + + const envs = [ + `DOCUMENSO_HOST=${mainDomain}`, + "DOCUMENSO_PORT=3000", + `NEXTAUTH_SECRET=${nextAuthSecret}`, + `NEXT_PRIVATE_ENCRYPTION_KEY=${documensoEncryptionKey}`, + `NEXT_PRIVATE_ENCRYPTION_SECONDARY_KEY=${documensoSecondaryEncryptionKey}`, + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/doublezero/docker-compose.yml b/blueprints/doublezero/docker-compose.yml new file mode 100644 index 000000000..352470334 --- /dev/null +++ b/blueprints/doublezero/docker-compose.yml @@ -0,0 +1,19 @@ +services: + doublezero: + restart: always + image: liltechnomancer/double-zero:0.2.1 + volumes: + - db-data:/var/lib/doublezero/data + environment: + AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID} + AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY} + AWS_REGION: ${AWS_REGION} + SQS_URL: ${SQS_URL} + SYSTEM_EMAIL: ${SYSTEM_EMAIL} + SECRET_KEY_BASE: ${SECRET_KEY_BASE} + PHX_HOST: ${DOUBLEZERO_HOST} + DATABASE_PATH: ./00.db + +volumes: + db-data: + driver: local diff --git a/blueprints/doublezero/index.ts b/blueprints/doublezero/index.ts new file mode 100644 index 000000000..fa774e9dc --- /dev/null +++ b/blueprints/doublezero/index.ts @@ -0,0 +1,36 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const secretKeyBase = generateBase64(64); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 4000, + serviceName: "doublezero", + }, + ]; + + const envs = [ + `DOUBLEZERO_HOST=${mainDomain}`, + "DOUBLEZERO_PORT=4000", + `SECRET_KEY_BASE=${secretKeyBase}`, + "AWS_ACCESS_KEY_ID=your-aws-access-key", + "AWS_SECRET_ACCESS_KEY=your-aws-secret-key", + "AWS_REGION=your-aws-region", + "SQS_URL=your-aws-sqs-url", + "SYSTEM_EMAIL=", + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/drawio/docker-compose.yml b/blueprints/drawio/docker-compose.yml new file mode 100644 index 000000000..a7d7b578d --- /dev/null +++ b/blueprints/drawio/docker-compose.yml @@ -0,0 +1,59 @@ +version: '3' +services: + plantuml-server: + image: plantuml/plantuml-server + ports: + - "8080" + + volumes: + - fonts_volume:/usr/share/fonts/drawio + image-export: + image: jgraph/export-server + ports: + - "8000" + + volumes: + - fonts_volume:/usr/share/fonts/drawio + environment: + - DRAWIO_BASE_URL=${DRAWIO_BASE_URL} + drawio: + image: jgraph/drawio:24.7.17 + ports: + - "8080" + links: + - plantuml-server:plantuml-server + - image-export:image-export + depends_on: + - plantuml-server + - image-export + + environment: + RAWIO_SELF_CONTAINED: 1 + DRAWIO_USE_HTTP: 1 + PLANTUML_URL: http://plantuml-server:8080/ + EXPORT_URL: http://image-export:8000/ + DRAWIO_BASE_URL: ${DRAWIO_BASE_URL} + DRAWIO_SERVER_URL: ${DRAWIO_SERVER_URL} + DRAWIO_CSP_HEADER: ${DRAWIO_CSP_HEADER} + DRAWIO_VIEWER_URL: ${DRAWIO_VIEWER_URL} + DRAWIO_LIGHTBOX_URL: ${DRAWIO_LIGHTBOX_URL} + DRAWIO_CONFIG: ${DRAWIO_CONFIG} + DRAWIO_GOOGLE_CLIENT_ID: ${DRAWIO_GOOGLE_CLIENT_ID} + DRAWIO_GOOGLE_APP_ID: ${DRAWIO_GOOGLE_APP_ID} + DRAWIO_GOOGLE_CLIENT_SECRET: ${DRAWIO_GOOGLE_CLIENT_SECRET} + DRAWIO_GOOGLE_VIEWER_CLIENT_ID: ${DRAWIO_GOOGLE_VIEWER_CLIENT_ID} + DRAWIO_GOOGLE_VIEWER_APP_ID: ${DRAWIO_GOOGLE_VIEWER_APP_ID} + DRAWIO_GOOGLE_VIEWER_CLIENT_SECRET: ${DRAWIO_GOOGLE_VIEWER_CLIENT_SECRET} + DRAWIO_MSGRAPH_CLIENT_ID: ${DRAWIO_MSGRAPH_CLIENT_ID} + DRAWIO_MSGRAPH_CLIENT_SECRET: ${DRAWIO_MSGRAPH_CLIENT_SECRET} + DRAWIO_MSGRAPH_TENANT_ID: ${DRAWIO_MSGRAPH_TENANT_ID} + DRAWIO_GITLAB_ID: ${DRAWIO_GITLAB_ID} + DRAWIO_GITLAB_SECRET: ${DRAWIO_GITLAB_SECRET} + DRAWIO_GITLAB_URL: ${DRAWIO_GITLAB_URL} + DRAWIO_CLOUD_CONVERT_APIKEY: ${DRAWIO_CLOUD_CONVERT_APIKEY} +networks: + dokploy-network: + external: true + +volumes: + fonts_volume: \ No newline at end of file diff --git a/blueprints/drawio/index.ts b/blueprints/drawio/index.ts new file mode 100644 index 000000000..701283c8d --- /dev/null +++ b/blueprints/drawio/index.ts @@ -0,0 +1,31 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const _secretKeyBase = generateBase64(64); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 8080, + serviceName: "drawio", + }, + ]; + + const envs = [ + `DRAWIO_HOST=${mainDomain}`, + `DRAWIO_BASE_URL=https://${mainDomain}`, + `DRAWIO_SERVER_URL=https://${mainDomain}/`, + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/elastic-search/docker-compose.yml b/blueprints/elastic-search/docker-compose.yml new file mode 100644 index 000000000..929006ff1 --- /dev/null +++ b/blueprints/elastic-search/docker-compose.yml @@ -0,0 +1,34 @@ +version: '3.8' + +services: + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.10.2 + container_name: elasticsearch + environment: + - discovery.type=single-node + - xpack.security.enabled=false + - bootstrap.memory_lock=true + - ES_JAVA_OPTS=-Xms512m -Xmx512m + ulimits: + memlock: + soft: -1 + hard: -1 + ports: + - "9200" + volumes: + - es_data:/usr/share/elasticsearch/data + + kibana: + image: docker.elastic.co/kibana/kibana:8.10.2 + container_name: kibana + environment: + - ELASTICSEARCH_HOSTS=http://elasticsearch:9200 + ports: + - "5601" + depends_on: + - elasticsearch + +volumes: + es_data: + driver: local + \ No newline at end of file diff --git a/blueprints/elastic-search/index.ts b/blueprints/elastic-search/index.ts new file mode 100644 index 000000000..5a3a31e1f --- /dev/null +++ b/blueprints/elastic-search/index.ts @@ -0,0 +1,28 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const apiDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 5601, + serviceName: "kibana", + }, + { + host: apiDomain, + port: 9200, + serviceName: "elasticsearch", + }, + ]; + + return { + domains, + }; +} diff --git a/blueprints/erpnext/docker-compose.yml b/blueprints/erpnext/docker-compose.yml new file mode 100644 index 000000000..28cd8f6ab --- /dev/null +++ b/blueprints/erpnext/docker-compose.yml @@ -0,0 +1,354 @@ +x-custom-image: &custom_image + image: ${IMAGE_NAME:-docker.io/frappe/erpnext}:${VERSION:-version-15} + pull_policy: ${PULL_POLICY:-always} + deploy: + restart_policy: + condition: always + +services: + backend: + <<: *custom_image + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + healthcheck: + test: + - CMD + - wait-for-it + - '0.0.0.0:8000' + interval: 2s + timeout: 10s + retries: 30 + + frontend: + <<: *custom_image + command: + - nginx-entrypoint.sh + depends_on: + backend: + condition: service_started + required: true + websocket: + condition: service_started + required: true + environment: + BACKEND: backend:8000 + FRAPPE_SITE_NAME_HEADER: ${FRAPPE_SITE_NAME_HEADER:-$$host} + SOCKETIO: websocket:9000 + UPSTREAM_REAL_IP_ADDRESS: 127.0.0.1 + UPSTREAM_REAL_IP_HEADER: X-Forwarded-For + UPSTREAM_REAL_IP_RECURSIVE: "off" + volumes: + - sites:/home/frappe/frappe-bench/sites + + networks: + - bench-network + + healthcheck: + test: + - CMD + - wait-for-it + - '0.0.0.0:8080' + interval: 2s + timeout: 30s + retries: 30 + + queue-default: + <<: *custom_image + command: + - bench + - worker + - --queue + - default + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + healthcheck: + test: + - CMD + - wait-for-it + - 'redis-queue:6379' + interval: 2s + timeout: 10s + retries: 30 + depends_on: + configurator: + condition: service_completed_successfully + required: true + + queue-long: + <<: *custom_image + command: + - bench + - worker + - --queue + - long + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + healthcheck: + test: + - CMD + - wait-for-it + - 'redis-queue:6379' + interval: 2s + timeout: 10s + retries: 30 + depends_on: + configurator: + condition: service_completed_successfully + required: true + + queue-short: + <<: *custom_image + command: + - bench + - worker + - --queue + - short + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + healthcheck: + test: + - CMD + - wait-for-it + - 'redis-queue:6379' + interval: 2s + timeout: 10s + retries: 30 + depends_on: + configurator: + condition: service_completed_successfully + required: true + + scheduler: + <<: *custom_image + healthcheck: + test: + - CMD + - wait-for-it + - 'redis-queue:6379' + interval: 2s + timeout: 10s + retries: 30 + command: + - bench + - schedule + depends_on: + configurator: + condition: service_completed_successfully + required: true + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + + websocket: + <<: *custom_image + healthcheck: + test: + - CMD + - wait-for-it + - '0.0.0.0:9000' + interval: 2s + timeout: 10s + retries: 30 + command: + - node + - /home/frappe/frappe-bench/apps/frappe/socketio.js + depends_on: + configurator: + condition: service_completed_successfully + required: true + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + + configurator: + <<: *custom_image + deploy: + mode: replicated + replicas: ${CONFIGURE:-0} + restart_policy: + condition: none + entrypoint: ["bash", "-c"] + command: + - > + [[ $${REGENERATE_APPS_TXT} == "1" ]] && ls -1 apps > sites/apps.txt; + [[ -n `grep -hs ^ sites/common_site_config.json | jq -r ".db_host // empty"` ]] && exit 0; + bench set-config -g db_host $$DB_HOST; + bench set-config -gp db_port $$DB_PORT; + bench set-config -g redis_cache "redis://$$REDIS_CACHE"; + bench set-config -g redis_queue "redis://$$REDIS_QUEUE"; + bench set-config -g redis_socketio "redis://$$REDIS_QUEUE"; + bench set-config -gp socketio_port $$SOCKETIO_PORT; + environment: + DB_HOST: "${DB_HOST:-db}" + DB_PORT: "3306" + REDIS_CACHE: redis-cache:6379 + REDIS_QUEUE: redis-queue:6379 + SOCKETIO_PORT: "9000" + REGENERATE_APPS_TXT: "${REGENERATE_APPS_TXT:-0}" + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + + create-site: + <<: *custom_image + deploy: + mode: replicated + replicas: ${CREATE_SITE:-0} + restart_policy: + condition: none + entrypoint: ["bash", "-c"] + command: + - > + wait-for-it -t 120 $$DB_HOST:$$DB_PORT; + wait-for-it -t 120 redis-cache:6379; + wait-for-it -t 120 redis-queue:6379; + export start=`date +%s`; + until [[ -n `grep -hs ^ sites/common_site_config.json | jq -r ".db_host // empty"` ]] && \ + [[ -n `grep -hs ^ sites/common_site_config.json | jq -r ".redis_cache // empty"` ]] && \ + [[ -n `grep -hs ^ sites/common_site_config.json | jq -r ".redis_queue // empty"` ]]; + do + echo "Waiting for sites/common_site_config.json to be created"; + sleep 5; + if (( `date +%s`-start > 120 )); then + echo "could not find sites/common_site_config.json with required keys"; + exit 1 + fi + done; + echo "sites/common_site_config.json found"; + [[ -d "sites/${SITE_NAME}" ]] && echo "${SITE_NAME} already exists" && exit 0; + bench new-site --mariadb-user-host-login-scope='%' --admin-password=$${ADMIN_PASSWORD} --db-root-username=root --db-root-password=$${DB_ROOT_PASSWORD} $${INSTALL_APP_ARGS} $${SITE_NAME}; + volumes: + - sites:/home/frappe/frappe-bench/sites + environment: + SITE_NAME: ${SITE_NAME} + ADMIN_PASSWORD: ${ADMIN_PASSWORD} + DB_HOST: ${DB_HOST:-db} + DB_PORT: "${DB_PORT:-3306}" + DB_ROOT_PASSWORD: ${DB_ROOT_PASSWORD} + INSTALL_APP_ARGS: ${INSTALL_APP_ARGS} + networks: + - bench-network + + migration: + <<: *custom_image + deploy: + mode: replicated + replicas: ${MIGRATE:-0} + restart_policy: + condition: none + entrypoint: ["bash", "-c"] + command: + - > + curl -f http://${SITE_NAME}:8080/api/method/ping || echo "Site busy" && exit 0; + bench --site all set-config -p maintenance_mode 1; + bench --site all set-config -p pause_scheduler 1; + bench --site all migrate; + bench --site all set-config -p maintenance_mode 0; + bench --site all set-config -p pause_scheduler 0; + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + + db: + image: mariadb:10.6 + deploy: + mode: replicated + replicas: ${ENABLE_DB:-0} + restart_policy: + condition: always + healthcheck: + test: mysqladmin ping -h localhost --password=${DB_ROOT_PASSWORD} + interval: 1s + retries: 20 + command: + - --character-set-server=utf8mb4 + - --collation-server=utf8mb4_unicode_ci + - --skip-character-set-client-handshake + - --skip-innodb-read-only-compressed + environment: + - MYSQL_ROOT_PASSWORD=${DB_ROOT_PASSWORD} + - MARIADB_ROOT_PASSWORD=${DB_ROOT_PASSWORD} + volumes: + - db-data:/var/lib/mysql + networks: + - bench-network + + redis-cache: + deploy: + restart_policy: + condition: always + image: redis:6.2-alpine + volumes: + - redis-cache-data:/data + networks: + - bench-network + healthcheck: + test: + - CMD + - redis-cli + - ping + interval: 5s + timeout: 5s + retries: 3 + + redis-queue: + deploy: + restart_policy: + condition: always + image: redis:6.2-alpine + volumes: + - redis-queue-data:/data + networks: + - bench-network + healthcheck: + test: + - CMD + - redis-cli + - ping + interval: 5s + timeout: 5s + retries: 3 + + redis-socketio: + deploy: + restart_policy: + condition: always + image: redis:6.2-alpine + volumes: + - redis-socketio-data:/data + networks: + - bench-network + healthcheck: + test: + - CMD + - redis-cli + - ping + interval: 5s + timeout: 5s + retries: 3 + +volumes: + db-data: + redis-cache-data: + redis-queue-data: + redis-socketio-data: + sites: + driver_opts: + type: "${SITE_VOLUME_TYPE}" + o: "${SITE_VOLUME_OPTS}" + device: "${SITE_VOLUME_DEV}" + +networks: + bench-network: \ No newline at end of file diff --git a/blueprints/erpnext/index.ts b/blueprints/erpnext/index.ts new file mode 100644 index 000000000..5b7543b91 --- /dev/null +++ b/blueprints/erpnext/index.ts @@ -0,0 +1,39 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const dbRootPassword = generatePassword(32); + const adminPassword = generatePassword(32); + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 8080, + serviceName: "frontend", + }, + ]; + + const envs = [ + `SITE_NAME=${mainDomain}`, + `ADMIN_PASSWORD=${adminPassword}`, + `DB_ROOT_PASSWORD=${dbRootPassword}`, + "MIGRATE=1", + "ENABLE_DB=1", + "DB_HOST=db", + "CREATE_SITE=1", + "CONFIGURE=1", + "REGENERATE_APPS_TXT=1", + "INSTALL_APP_ARGS=--install-app erpnext", + "IMAGE_NAME=docker.io/frappe/erpnext", + "VERSION=version-15", + "FRAPPE_SITE_NAME_HEADER=", + ]; + + return { envs, domains }; +} diff --git a/blueprints/evolutionapi/docker-compose.yml b/blueprints/evolutionapi/docker-compose.yml new file mode 100644 index 000000000..d4803de1c --- /dev/null +++ b/blueprints/evolutionapi/docker-compose.yml @@ -0,0 +1,58 @@ +services: + evolution-api: + image: atendai/evolution-api:v2.1.2 + restart: always + volumes: + - evolution-instances:/evolution/instances + + environment: + - SERVER_URL=${SERVER_URL} + - AUTHENTICATION_TYPE=${AUTHENTICATION_TYPE} + - AUTHENTICATION_API_KEY=${AUTHENTICATION_API_KEY} + - AUTHENTICATION_EXPOSE_IN_FETCH_INSTANCES=${AUTHENTICATION_EXPOSE_IN_FETCH_INSTANCES} + - LANGUAGE=${LANGUAGE} + - CONFIG_SESSION_PHONE_CLIENT=${CONFIG_SESSION_PHONE_CLIENT} + - CONFIG_SESSION_PHONE_NAME=${CONFIG_SESSION_PHONE_NAME} + - TELEMETRY=${TELEMETRY} + - TELEMETRY_URL=${TELEMETRY_URL} + - DATABASE_ENABLED=${DATABASE_ENABLED} + - DATABASE_PROVIDER=${DATABASE_PROVIDER} + - DATABASE_CONNECTION_URI=${DATABASE_CONNECTION_URI} + - DATABASE_SAVE_DATA_INSTANCE=${DATABASE_SAVE_DATA_INSTANCE} + - DATABASE_SAVE_DATA_NEW_MESSAGE=${DATABASE_SAVE_DATA_NEW_MESSAGE} + - DATABASE_SAVE_MESSAGE_UPDATE=${DATABASE_SAVE_MESSAGE_UPDATE} + - DATABASE_SAVE_DATA_CONTACTS=${DATABASE_SAVE_DATA_CONTACTS} + - DATABASE_SAVE_DATA_CHATS=${DATABASE_SAVE_DATA_CHATS} + - DATABASE_SAVE_DATA_LABELS=${DATABASE_SAVE_DATA_LABELS} + - DATABASE_SAVE_DATA_HISTORIC=${DATABASE_SAVE_DATA_HISTORIC} + - CACHE_REDIS_ENABLED=${CACHE_REDIS_ENABLED} + - CACHE_REDIS_URI=${CACHE_REDIS_URI} + - CACHE_REDIS_PREFIX_KEY=${CACHE_REDIS_PREFIX_KEY} + - CACHE_REDIS_SAVE_INSTANCES=${CACHE_REDIS_SAVE_INSTANCES} + + evolution-postgres: + image: postgres:16-alpine + restart: always + volumes: + - evolution-postgres-data:/var/lib/postgresql/data + + environment: + - POSTGRES_DB=${POSTGRES_DATABASE} + - POSTGRES_USER=${POSTGRES_USERNAME} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + + evolution-redis: + image: redis:alpine + restart: always + volumes: + - evolution-redis-data:/data + + +networks: + dokploy-network: + external: true + +volumes: + evolution-instances: + evolution-postgres-data: + evolution-redis-data: \ No newline at end of file diff --git a/blueprints/evolutionapi/index.ts b/blueprints/evolutionapi/index.ts new file mode 100644 index 000000000..6ca7a3b60 --- /dev/null +++ b/blueprints/evolutionapi/index.ts @@ -0,0 +1,59 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const apiKey = generateBase64(64); + const postgresPassword = generatePassword(); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 8080, + serviceName: "evolution-api", + }, + ]; + + const envs = [ + `SERVER_URL=https://${mainDomain}`, + "AUTHENTICATION_TYPE=apikey", + `AUTHENTICATION_API_KEY=${apiKey}`, + "AUTHENTICATION_EXPOSE_IN_FETCH_INSTANCES=true", + + "LANGUAGE=en", + "CONFIG_SESSION_PHONE_CLIENT=Evolution API", + "CONFIG_SESSION_PHONE_NAME=Chrome", + "TELEMETRY=false", + "TELEMETRY_URL=", + + "POSTGRES_DATABASE=evolution", + "POSTGRES_USERNAME=postgresql", + `POSTGRES_PASSWORD=${postgresPassword}`, + "DATABASE_ENABLED=true", + "DATABASE_PROVIDER=postgresql", + `DATABASE_CONNECTION_URI=postgres://postgresql:${postgresPassword}@evolution-postgres:5432/evolution`, + "DATABASE_SAVE_DATA_INSTANCE=true", + "DATABASE_SAVE_DATA_NEW_MESSAGE=true", + "DATABASE_SAVE_MESSAGE_UPDATE=true", + "DATABASE_SAVE_DATA_CONTACTS=true", + "DATABASE_SAVE_DATA_CHATS=true", + "DATABASE_SAVE_DATA_LABELS=true", + "DATABASE_SAVE_DATA_HISTORIC=true", + + "CACHE_REDIS_ENABLED=true", + "CACHE_REDIS_URI=redis://evolution-redis:6379", + "CACHE_REDIS_PREFIX_KEY=evolution", + "CACHE_REDIS_SAVE_INSTANCES=true", + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/excalidraw/docker-compose.yml b/blueprints/excalidraw/docker-compose.yml new file mode 100644 index 000000000..3cf2fb1d1 --- /dev/null +++ b/blueprints/excalidraw/docker-compose.yml @@ -0,0 +1,6 @@ +version: "3.8" + +services: + excalidraw: + + image: excalidraw/excalidraw:latest diff --git a/blueprints/excalidraw/index.ts b/blueprints/excalidraw/index.ts new file mode 100644 index 000000000..7f73f395f --- /dev/null +++ b/blueprints/excalidraw/index.ts @@ -0,0 +1,22 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 80, + serviceName: "excalidraw", + }, + ]; + + return { + domains, + }; +} diff --git a/blueprints/filebrowser/docker-compose.yml b/blueprints/filebrowser/docker-compose.yml new file mode 100644 index 000000000..10c119091 --- /dev/null +++ b/blueprints/filebrowser/docker-compose.yml @@ -0,0 +1,14 @@ +services: + filebrowser: + image: hurlenko/filebrowser + volumes: + - filebrowser-data:/data + - filebrowser-config:/config + environment: + - FB_BASEURL=${FB_BASEURL} + restart: always + +volumes: + filebrowser-data: + filebrowser-config: + \ No newline at end of file diff --git a/blueprints/filebrowser/index.ts b/blueprints/filebrowser/index.ts new file mode 100644 index 000000000..c30519f1d --- /dev/null +++ b/blueprints/filebrowser/index.ts @@ -0,0 +1,24 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 8080, + serviceName: "filebrowser", + }, + ]; + const envs = ["FB_BASEURL=/filebrowser"]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/formbricks/docker-compose.yml b/blueprints/formbricks/docker-compose.yml new file mode 100644 index 000000000..ad1dcbcff --- /dev/null +++ b/blueprints/formbricks/docker-compose.yml @@ -0,0 +1,37 @@ +x-environment: &environment + environment: + WEBAPP_URL: ${WEBAPP_URL} + NEXTAUTH_URL: ${NEXTAUTH_URL} + DATABASE_URL: "postgresql://postgres:postgres@postgres:5432/formbricks?schema=public" + NEXTAUTH_SECRET: ${NEXTAUTH_SECRET} + ENCRYPTION_KEY: ${ENCRYPTION_KEY} + CRON_SECRET: ${CRON_SECRET} + EMAIL_VERIFICATION_DISABLED: 1 + PASSWORD_RESET_DISABLED: 1 + S3_FORCE_PATH_STYLE: 0 + +services: + postgres: + restart: always + image: pgvector/pgvector:pg17 + volumes: + - postgres:/var/lib/postgresql/data + environment: + - POSTGRES_PASSWORD=postgres + + + formbricks: + restart: always + image: ghcr.io/formbricks/formbricks:v3.1.3 + depends_on: + - postgres + ports: + - 3000 + volumes: + - ../files/uploads:/home/nextjs/apps/web/uploads/ + <<: *environment + +volumes: + postgres: + driver: local + uploads: diff --git a/blueprints/formbricks/index.ts b/blueprints/formbricks/index.ts new file mode 100644 index 000000000..fc179f497 --- /dev/null +++ b/blueprints/formbricks/index.ts @@ -0,0 +1,38 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const secretBase = generateBase64(64); + const encryptionKey = generateBase64(48); + const cronSecret = generateBase64(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3000, + serviceName: "formbricks", + }, + ]; + + const envs = [ + `WEBAPP_URL=http://${mainDomain}`, + `NEXTAUTH_URL=http://${mainDomain}`, + `NEXTAUTH_SECRET=${secretBase}`, + `ENCRYPTION_KEY=${encryptionKey}`, + `CRON_SECRET=${cronSecret}`, + ]; + + const mounts: Template["mounts"] = []; + + return { + envs, + mounts, + domains, + }; +} diff --git a/blueprints/frappe-hr/docker-compose.yml b/blueprints/frappe-hr/docker-compose.yml new file mode 100644 index 000000000..a7ce9b262 --- /dev/null +++ b/blueprints/frappe-hr/docker-compose.yml @@ -0,0 +1,354 @@ +x-custom-image: &custom_image + image: ${IMAGE_NAME:-ghcr.io/frappe/hrms}:${VERSION:-version-15} + pull_policy: ${PULL_POLICY:-always} + deploy: + restart_policy: + condition: always + +services: + backend: + <<: *custom_image + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + healthcheck: + test: + - CMD + - wait-for-it + - '0.0.0.0:8000' + interval: 2s + timeout: 10s + retries: 30 + + frontend: + <<: *custom_image + command: + - nginx-entrypoint.sh + depends_on: + backend: + condition: service_started + required: true + websocket: + condition: service_started + required: true + environment: + BACKEND: backend:8000 + FRAPPE_SITE_NAME_HEADER: ${FRAPPE_SITE_NAME_HEADER:-$$host} + SOCKETIO: websocket:9000 + UPSTREAM_REAL_IP_ADDRESS: 127.0.0.1 + UPSTREAM_REAL_IP_HEADER: X-Forwarded-For + UPSTREAM_REAL_IP_RECURSIVE: "off" + volumes: + - sites:/home/frappe/frappe-bench/sites + + networks: + - bench-network + + healthcheck: + test: + - CMD + - wait-for-it + - '0.0.0.0:8080' + interval: 2s + timeout: 30s + retries: 30 + + queue-default: + <<: *custom_image + command: + - bench + - worker + - --queue + - default + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + healthcheck: + test: + - CMD + - wait-for-it + - 'redis-queue:6379' + interval: 2s + timeout: 10s + retries: 30 + depends_on: + configurator: + condition: service_completed_successfully + required: true + + queue-long: + <<: *custom_image + command: + - bench + - worker + - --queue + - long + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + healthcheck: + test: + - CMD + - wait-for-it + - 'redis-queue:6379' + interval: 2s + timeout: 10s + retries: 30 + depends_on: + configurator: + condition: service_completed_successfully + required: true + + queue-short: + <<: *custom_image + command: + - bench + - worker + - --queue + - short + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + healthcheck: + test: + - CMD + - wait-for-it + - 'redis-queue:6379' + interval: 2s + timeout: 10s + retries: 30 + depends_on: + configurator: + condition: service_completed_successfully + required: true + + scheduler: + <<: *custom_image + healthcheck: + test: + - CMD + - wait-for-it + - 'redis-queue:6379' + interval: 2s + timeout: 10s + retries: 30 + command: + - bench + - schedule + depends_on: + configurator: + condition: service_completed_successfully + required: true + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + + websocket: + <<: *custom_image + healthcheck: + test: + - CMD + - wait-for-it + - '0.0.0.0:9000' + interval: 2s + timeout: 10s + retries: 30 + command: + - node + - /home/frappe/frappe-bench/apps/frappe/socketio.js + depends_on: + configurator: + condition: service_completed_successfully + required: true + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + + configurator: + <<: *custom_image + deploy: + mode: replicated + replicas: ${CONFIGURE:-0} + restart_policy: + condition: none + entrypoint: ["bash", "-c"] + command: + - > + [[ $${REGENERATE_APPS_TXT} == "1" ]] && ls -1 apps > sites/apps.txt; + [[ -n `grep -hs ^ sites/common_site_config.json | jq -r ".db_host // empty"` ]] && exit 0; + bench set-config -g db_host $$DB_HOST; + bench set-config -gp db_port $$DB_PORT; + bench set-config -g redis_cache "redis://$$REDIS_CACHE"; + bench set-config -g redis_queue "redis://$$REDIS_QUEUE"; + bench set-config -g redis_socketio "redis://$$REDIS_QUEUE"; + bench set-config -gp socketio_port $$SOCKETIO_PORT; + environment: + DB_HOST: "${DB_HOST:-db}" + DB_PORT: "3306" + REDIS_CACHE: redis-cache:6379 + REDIS_QUEUE: redis-queue:6379 + SOCKETIO_PORT: "9000" + REGENERATE_APPS_TXT: "${REGENERATE_APPS_TXT:-0}" + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + + create-site: + <<: *custom_image + deploy: + mode: replicated + replicas: ${CREATE_SITE:-0} + restart_policy: + condition: none + entrypoint: ["bash", "-c"] + command: + - > + wait-for-it -t 120 $$DB_HOST:$$DB_PORT; + wait-for-it -t 120 redis-cache:6379; + wait-for-it -t 120 redis-queue:6379; + export start=`date +%s`; + until [[ -n `grep -hs ^ sites/common_site_config.json | jq -r ".db_host // empty"` ]] && \ + [[ -n `grep -hs ^ sites/common_site_config.json | jq -r ".redis_cache // empty"` ]] && \ + [[ -n `grep -hs ^ sites/common_site_config.json | jq -r ".redis_queue // empty"` ]]; + do + echo "Waiting for sites/common_site_config.json to be created"; + sleep 5; + if (( `date +%s`-start > 120 )); then + echo "could not find sites/common_site_config.json with required keys"; + exit 1 + fi + done; + echo "sites/common_site_config.json found"; + [[ -d "sites/${SITE_NAME}" ]] && echo "${SITE_NAME} already exists" && exit 0; + bench new-site --mariadb-user-host-login-scope='%' --admin-password=$${ADMIN_PASSWORD} --db-root-username=root --db-root-password=$${DB_ROOT_PASSWORD} $${INSTALL_APP_ARGS} $${SITE_NAME}; + volumes: + - sites:/home/frappe/frappe-bench/sites + environment: + SITE_NAME: ${SITE_NAME} + ADMIN_PASSWORD: ${ADMIN_PASSWORD} + DB_HOST: ${DB_HOST:-db} + DB_PORT: "${DB_PORT:-3306}" + DB_ROOT_PASSWORD: ${DB_ROOT_PASSWORD} + INSTALL_APP_ARGS: ${INSTALL_APP_ARGS} + networks: + - bench-network + + migration: + <<: *custom_image + deploy: + mode: replicated + replicas: ${MIGRATE:-0} + restart_policy: + condition: none + entrypoint: ["bash", "-c"] + command: + - > + curl -f http://${SITE_NAME}:8080/api/method/ping || echo "Site busy" && exit 0; + bench --site all set-config -p maintenance_mode 1; + bench --site all set-config -p pause_scheduler 1; + bench --site all migrate; + bench --site all set-config -p maintenance_mode 0; + bench --site all set-config -p pause_scheduler 0; + volumes: + - sites:/home/frappe/frappe-bench/sites + networks: + - bench-network + + db: + image: mariadb:10.6 + deploy: + mode: replicated + replicas: ${ENABLE_DB:-0} + restart_policy: + condition: always + healthcheck: + test: mysqladmin ping -h localhost --password=${DB_ROOT_PASSWORD} + interval: 1s + retries: 20 + command: + - --character-set-server=utf8mb4 + - --collation-server=utf8mb4_unicode_ci + - --skip-character-set-client-handshake + - --skip-innodb-read-only-compressed + environment: + - MYSQL_ROOT_PASSWORD=${DB_ROOT_PASSWORD} + - MARIADB_ROOT_PASSWORD=${DB_ROOT_PASSWORD} + volumes: + - db-data:/var/lib/mysql + networks: + - bench-network + + redis-cache: + deploy: + restart_policy: + condition: always + image: redis:6.2-alpine + volumes: + - redis-cache-data:/data + networks: + - bench-network + healthcheck: + test: + - CMD + - redis-cli + - ping + interval: 5s + timeout: 5s + retries: 3 + + redis-queue: + deploy: + restart_policy: + condition: always + image: redis:6.2-alpine + volumes: + - redis-queue-data:/data + networks: + - bench-network + healthcheck: + test: + - CMD + - redis-cli + - ping + interval: 5s + timeout: 5s + retries: 3 + + redis-socketio: + deploy: + restart_policy: + condition: always + image: redis:6.2-alpine + volumes: + - redis-socketio-data:/data + networks: + - bench-network + healthcheck: + test: + - CMD + - redis-cli + - ping + interval: 5s + timeout: 5s + retries: 3 + +volumes: + db-data: + redis-cache-data: + redis-queue-data: + redis-socketio-data: + sites: + driver_opts: + type: "${SITE_VOLUME_TYPE}" + o: "${SITE_VOLUME_OPTS}" + device: "${SITE_VOLUME_DEV}" + +networks: + bench-network: \ No newline at end of file diff --git a/blueprints/frappe-hr/index.ts b/blueprints/frappe-hr/index.ts new file mode 100644 index 000000000..1e6b94745 --- /dev/null +++ b/blueprints/frappe-hr/index.ts @@ -0,0 +1,39 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const dbRootPassword = generatePassword(32); + const adminPassword = generatePassword(32); + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 8080, + serviceName: "frontend", + }, + ]; + + const envs = [ + `SITE_NAME=${mainDomain}`, + `ADMIN_PASSWORD=${adminPassword}`, + `DB_ROOT_PASSWORD=${dbRootPassword}`, + "MIGRATE=1", + "ENABLE_DB=1", + "DB_HOST=db", + "CREATE_SITE=1", + "CONFIGURE=1", + "REGENERATE_APPS_TXT=1", + "INSTALL_APP_ARGS=--install-app hrms", + "IMAGE_NAME=ghcr.io/frappe/hrms", + "VERSION=version-15", + "FRAPPE_SITE_NAME_HEADER=", + ]; + + return { envs, domains }; +} diff --git a/blueprints/ghost/docker-compose.yml b/blueprints/ghost/docker-compose.yml new file mode 100644 index 000000000..33c47f7f8 --- /dev/null +++ b/blueprints/ghost/docker-compose.yml @@ -0,0 +1,28 @@ +version: "3.8" +services: + ghost: + image: ghost:5-alpine + restart: always + environment: + database__client: mysql + database__connection__host: db + database__connection__user: root + database__connection__password: example + database__connection__database: ghost + url: http://${GHOST_HOST} + + volumes: + - ghost:/var/lib/ghost/content + + db: + image: mysql:8.0 + restart: always + + environment: + MYSQL_ROOT_PASSWORD: example + volumes: + - db:/var/lib/mysql + +volumes: + ghost: + db: diff --git a/blueprints/ghost/index.ts b/blueprints/ghost/index.ts new file mode 100644 index 000000000..052b7c6bb --- /dev/null +++ b/blueprints/ghost/index.ts @@ -0,0 +1,24 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 2368, + serviceName: "ghost", + }, + ]; + const envs = [`GHOST_HOST=${mainDomain}`]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/gitea/docker-compose.yml b/blueprints/gitea/docker-compose.yml new file mode 100644 index 000000000..5127224cd --- /dev/null +++ b/blueprints/gitea/docker-compose.yml @@ -0,0 +1,35 @@ +version: "3.8" +services: + gitea: + image: gitea/gitea:1.22.3 + environment: + - USER_UID=${USER_UID} + - USER_GID=${USER_GID} + - GITEA__database__DB_TYPE=postgres + - GITEA__database__HOST=db:5432 + - GITEA__database__NAME=gitea + - GITEA__database__USER=gitea + - GITEA__database__PASSWD=gitea + restart: always + + volumes: + - gitea_server:/data + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + depends_on: + - db + + db: + image: postgres:17 + restart: always + environment: + - POSTGRES_USER=gitea + - POSTGRES_PASSWORD=gitea + - POSTGRES_DB=gitea + + volumes: + - gitea_db:/var/lib/postgresql/data + +volumes: + gitea_db: + gitea_server: \ No newline at end of file diff --git a/blueprints/gitea/index.ts b/blueprints/gitea/index.ts new file mode 100644 index 000000000..92a20df64 --- /dev/null +++ b/blueprints/gitea/index.ts @@ -0,0 +1,24 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3000, + serviceName: "gitea", + }, + ]; + const envs = ["USER_UID=1000", "USER_GID=1000"]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/glance/docker-compose.yml b/blueprints/glance/docker-compose.yml new file mode 100644 index 000000000..ace8bc940 --- /dev/null +++ b/blueprints/glance/docker-compose.yml @@ -0,0 +1,11 @@ +services: + glance: + image: glanceapp/glance + volumes: + - ../files/app/config/:/app/config + - ../files/app/assets:/app/assets + # Optionally, also mount docker socket if you want to use the docker containers widget + # - /var/run/docker.sock:/var/run/docker.sock:ro + ports: + - 8080 + env_file: .env \ No newline at end of file diff --git a/blueprints/glance/index.ts b/blueprints/glance/index.ts new file mode 100644 index 000000000..a0ab1b676 --- /dev/null +++ b/blueprints/glance/index.ts @@ -0,0 +1,108 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 8080, + serviceName: "glance", + }, + ]; + + const mounts: Template["mounts"] = [ + { + filePath: "/app/config/glance.yml", + content: ` +branding: + hide-footer: true + logo-text: P + +pages: + - name: Home + columns: + - size: small + widgets: + - type: calendar + + - type: releases + show-source-icon: true + repositories: + - Dokploy/dokploy + - n8n-io/n8n + - Budibase/budibase + - home-assistant/core + - tidbyt/pixlet + + - type: twitch-channels + channels: + - nmplol + - extraemily + - qtcinderella + - ludwig + - timthetatman + - mizkif + + - size: full + widgets: + - type: hacker-news + + - type: videos + style: grid-cards + channels: + - UC3GzdWYwUYI1ACxuP9Nm-eg + - UCGbg3DjQdcqWwqOLHpYHXIg + - UC24RSoLcjiNZbQcT54j5l7Q + limit: 3 + + - type: rss + limit: 10 + collapse-after: 3 + cache: 3h + feeds: + - url: https://daringfireball.net/feeds/main + title: Daring Fireball + + - size: small + widgets: + - type: weather + location: Gansevoort, New York, United States + show-area-name: false + units: imperial + hour-format: 12h + + - type: markets + markets: + - symbol: SPY + name: S&P 500 + - symbol: VOO + name: Vanguard + - symbol: BTC-USD + name: Bitcoin + - symbol: ETH-USD + name: Etherium + - symbol: NVDA + name: NVIDIA + - symbol: AAPL + name: Apple + - symbol: MSFT + name: Microsoft + - symbol: GOOGL + name: Google + - symbol: AMD + name: AMD + - symbol: TSLA + name: Tesla`, + }, + ]; + + return { + domains, + mounts, + }; +} diff --git a/blueprints/glitchtip/docker-compose.yml b/blueprints/glitchtip/docker-compose.yml new file mode 100644 index 000000000..f47742f01 --- /dev/null +++ b/blueprints/glitchtip/docker-compose.yml @@ -0,0 +1,55 @@ +x-environment: &default-environment + DATABASE_URL: postgres://postgres:postgres@postgres:5432/postgres + SECRET_KEY: ${SECRET_KEY} + PORT: ${GLITCHTIP_PORT} + EMAIL_URL: consolemail:// + GLITCHTIP_DOMAIN: http://${GLITCHTIP_HOST} + DEFAULT_FROM_EMAIL: email@glitchtip.com + CELERY_WORKER_AUTOSCALE: "1,3" + CELERY_WORKER_MAX_TASKS_PER_CHILD: "10000" + +x-depends_on: &default-depends_on + - postgres + - redis + +services: + postgres: + image: postgres:16 + environment: + POSTGRES_HOST_AUTH_METHOD: "trust" + restart: unless-stopped + volumes: + - pg-data:/var/lib/postgresql/data + + redis: + image: redis + restart: unless-stopped + + web: + image: glitchtip/glitchtip:v4.0 + depends_on: *default-depends_on + ports: + - ${GLITCHTIP_PORT} + environment: *default-environment + restart: unless-stopped + volumes: + - uploads:/code/uploads + worker: + image: glitchtip/glitchtip:v4.0 + command: ./bin/run-celery-with-beat.sh + depends_on: *default-depends_on + environment: *default-environment + restart: unless-stopped + volumes: + - uploads:/code/uploads + + migrate: + image: glitchtip/glitchtip:v4.0 + depends_on: *default-depends_on + command: "./manage.py migrate" + environment: *default-environment + + +volumes: + pg-data: + uploads: diff --git a/blueprints/glitchtip/index.ts b/blueprints/glitchtip/index.ts new file mode 100644 index 000000000..093d752e1 --- /dev/null +++ b/blueprints/glitchtip/index.ts @@ -0,0 +1,30 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const secretKey = generateBase64(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 8000, + serviceName: "web", + }, + ]; + const envs = [ + `GLITCHTIP_HOST=${mainDomain}`, + "GLITCHTIP_PORT=8000", + `SECRET_KEY=${secretKey}`, + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/glpi/docker-compose.yml b/blueprints/glpi/docker-compose.yml new file mode 100644 index 000000000..fa732fa36 --- /dev/null +++ b/blueprints/glpi/docker-compose.yml @@ -0,0 +1,26 @@ +services: + glpi-mysql: + image: mysql:9.1.0 + restart: always + volumes: + - glpi-mysql-data:/var/lib/mysql + + + glpi-web: + image: elestio/glpi:10.0.16 + restart: always + volumes: + - /etc/timezone:/etc/timezone:ro + - /etc/localtime:/etc/localtime:ro + - glpi-www-data:/var/www/html/glpi + environment: + - TIMEZONE=Europe/Brussels + + +volumes: + glpi-mysql-data: + glpi-www-data: + +networks: + dokploy-network: + external: true \ No newline at end of file diff --git a/blueprints/glpi/index.ts b/blueprints/glpi/index.ts new file mode 100644 index 000000000..48695fb50 --- /dev/null +++ b/blueprints/glpi/index.ts @@ -0,0 +1,20 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const randomDomain = generateRandomDomain(schema); + const domains: DomainSchema[] = [ + { + host: randomDomain, + port: 80, + serviceName: "glpi-web", + }, + ]; + return { + domains, + }; +} diff --git a/blueprints/gotenberg/docker-compose.yml b/blueprints/gotenberg/docker-compose.yml new file mode 100644 index 000000000..02bbacb7c --- /dev/null +++ b/blueprints/gotenberg/docker-compose.yml @@ -0,0 +1,18 @@ +services: + gotenberg: + image: gotenberg/gotenberg:latest + environment: + # NOTE: requires the --api-enable-basic-auth option in "command" + # make sure to also change the credentials in Dokploy environment + GOTENBERG_API_BASIC_AUTH_USERNAME: ${GOTENBERG_API_BASIC_AUTH_USERNAME} + GOTENBERG_API_BASIC_AUTH_PASSWORD: ${GOTENBERG_API_BASIC_AUTH_PASSWORD} + command: [ + "gotenberg", + # See the full list of options at https://gotenberg.dev/docs/configuration + + # Examples: + "--api-enable-basic-auth" + #"--api-timeout=60s", + #"--chromium-auto-start" + ] + restart: unless-stopped \ No newline at end of file diff --git a/blueprints/gotenberg/index.ts b/blueprints/gotenberg/index.ts new file mode 100644 index 000000000..ef48703ee --- /dev/null +++ b/blueprints/gotenberg/index.ts @@ -0,0 +1,29 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const username = "gotenberg"; + const password = "changethis"; + + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 3000, + serviceName: "gotenberg", + }, + ]; + + const envs = [ + `GOTENBERG_API_BASIC_AUTH_USERNAME=${username}`, + `GOTENBERG_API_BASIC_AUTH_PASSWORD=${password}`, + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/grafana/docker-compose.yml b/blueprints/grafana/docker-compose.yml new file mode 100644 index 000000000..9d913c17f --- /dev/null +++ b/blueprints/grafana/docker-compose.yml @@ -0,0 +1,9 @@ +version: "3.8" +services: + grafana: + image: grafana/grafana-enterprise:9.5.20 + restart: unless-stopped + volumes: + - grafana-storage:/var/lib/grafana +volumes: + grafana-storage: {} diff --git a/blueprints/grafana/index.ts b/blueprints/grafana/index.ts new file mode 100644 index 000000000..fb614ef32 --- /dev/null +++ b/blueprints/grafana/index.ts @@ -0,0 +1,19 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 3000, + serviceName: "grafana", + }, + ]; + return { + domains, + }; +} diff --git a/blueprints/heyform/docker-compose.yml b/blueprints/heyform/docker-compose.yml new file mode 100644 index 000000000..ec7e02fc1 --- /dev/null +++ b/blueprints/heyform/docker-compose.yml @@ -0,0 +1,48 @@ +services: + heyform: + image: heyform/community-edition:latest + restart: always + volumes: + # Persist uploaded images + - heyform-data:/app/static/upload + depends_on: + - mongo + - redis + ports: + - 8000 + env_file: + - .env + environment: + MONGO_URI: 'mongodb://mongo:27017/heyform' + REDIS_HOST: redis + REDIS_PORT: 6379 + networks: + - heyform-network + + mongo: + image: percona/percona-server-mongodb:4.4 + restart: always + networks: + - heyform-network + volumes: + # Persist MongoDB data + - mongo-data:/data/db + + redis: + image: redis + restart: always + command: "redis-server --appendonly yes" + networks: + - heyform-network + volumes: + # Persist KeyDB data + - redis-data:/data + +networks: + heyform-network: + driver: bridge + +volumes: + heyform-data: + mongo-data: + redis-data: diff --git a/blueprints/heyform/index.ts b/blueprints/heyform/index.ts new file mode 100644 index 000000000..03ea4b923 --- /dev/null +++ b/blueprints/heyform/index.ts @@ -0,0 +1,32 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const sessionKey = generateBase64(64); + const formEncryptionKey = generateBase64(64); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 8000, + serviceName: "heyform", + }, + ]; + + const envs = [ + `APP_HOMEPAGE_URL=http://${mainDomain}`, + `SESSION_KEY=${sessionKey}`, + `FORM_ENCRYPTION_KEY=${formEncryptionKey}`, + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/hi-events/docker-compose.yml b/blueprints/hi-events/docker-compose.yml new file mode 100644 index 000000000..cce45fecf --- /dev/null +++ b/blueprints/hi-events/docker-compose.yml @@ -0,0 +1,44 @@ +services: + all-in-one: + image: daveearley/hi.events-all-in-one:v0.8.0-beta.1 + restart: always + environment: + - VITE_FRONTEND_URL=https://${DOMAIN} + - APP_FRONTEND_URL=https://${DOMAIN} + - VITE_API_URL_CLIENT=https://${DOMAIN}/api + - VITE_API_URL_SERVER=http://localhost:80/api + - VITE_STRIPE_PUBLISHABLE_KEY + - LOG_CHANNEL=stderr + - QUEUE_CONNECTION=sync + - MAIL_MAILER=array + - APP_KEY + - JWT_SECRET + - FILESYSTEM_PUBLIC_DISK=public + - FILESYSTEM_PRIVATE_DISK=local + - APP_CDN_URL=https://${DOMAIN}/storage + - DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} + - MAIL_MAILER + - MAIL_HOST + - MAIL_PORT + - MAIL_FROM_ADDRESS + - MAIL_FROM_NAME + depends_on: + - postgres + + postgres: + image: elestio/postgres:16 + restart: always + + environment: + - POSTGRES_DB + - POSTGRES_USER + - POSTGRES_PASSWORD + volumes: + - pg_hi-events_data:/var/lib/postgresql/data + +networks: + dokploy-network: + external: true + +volumes: + pg_hi-events_data: \ No newline at end of file diff --git a/blueprints/hi-events/index.ts b/blueprints/hi-events/index.ts new file mode 100644 index 000000000..f799bb737 --- /dev/null +++ b/blueprints/hi-events/index.ts @@ -0,0 +1,41 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 80, + serviceName: "all-in-one", + }, + ]; + + const envs = [ + "# change domain here", + "DOMAIN=my-events.com", + "", + "POSTGRES_DB=hievents", + "POSTGRES_USER=hievents", + "POSTGRES_PASSWORD=VERY_STRONG_PASSWORD", + "", + "VITE_STRIPE_PUBLISHABLE_KEY=", + "", + "APP_KEY=my-app-key", + "JWT_SECRET=STRONG_JWT_SECRET", + "", + "MAIL_MAILER=", + "MAIL_HOST=", + "MAIL_PORT=", + "MAIL_FROM_ADDRESS=", + "MAIL_FROM_NAME=", + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/hoarder/docker-compose.yml b/blueprints/hoarder/docker-compose.yml new file mode 100644 index 000000000..93e594697 --- /dev/null +++ b/blueprints/hoarder/docker-compose.yml @@ -0,0 +1,45 @@ +services: + web: + image: ghcr.io/hoarder-app/hoarder:0.22.0 + restart: unless-stopped + volumes: + - hoarder-data:/data + ports: + - 3000 + environment: + - DISABLE_SIGNUPS + - NEXTAUTH_URL + - NEXTAUTH_SECRET + - MEILI_ADDR=http://meilisearch:7700 + - BROWSER_WEB_URL=http://chrome:9222 + - DATA_DIR=/data + chrome: + image: gcr.io/zenika-hub/alpine-chrome:124 + restart: unless-stopped + command: + - --no-sandbox + - --disable-gpu + - --disable-dev-shm-usage + - --remote-debugging-address=0.0.0.0 + - --remote-debugging-port=9222 + - --hide-scrollbars + meilisearch: + image: getmeili/meilisearch:v1.6 + restart: unless-stopped + environment: + - MEILI_MASTER_KEY + - MEILI_NO_ANALYTICS="true" + volumes: + - meilisearch-data:/meili_data + healthcheck: + test: + - CMD + - curl + - '-f' + - 'http://127.0.0.1:7700/health' + interval: 2s + timeout: 10s + retries: 15 +volumes: + meilisearch-data: + hoarder-data: \ No newline at end of file diff --git a/blueprints/hoarder/index.ts b/blueprints/hoarder/index.ts new file mode 100644 index 000000000..d1c656e19 --- /dev/null +++ b/blueprints/hoarder/index.ts @@ -0,0 +1,34 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const postgresPassword = generatePassword(); + const nextSecret = generateBase64(32); + const meiliMasterKey = generateBase64(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3000, + serviceName: "web", + }, + ]; + + const envs = [ + `NEXTAUTH_SECRET=${nextSecret}`, + `MEILI_MASTER_KEY=${meiliMasterKey}`, + `NEXTAUTH_URL=http://${mainDomain}`, + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/homarr/docker-compose.yml b/blueprints/homarr/docker-compose.yml new file mode 100644 index 000000000..876ea3f6f --- /dev/null +++ b/blueprints/homarr/docker-compose.yml @@ -0,0 +1,11 @@ +services: + homarr: + image: ghcr.io/homarr-labs/homarr:latest + restart: unless-stopped + volumes: + # - /var/run/docker.sock:/var/run/docker.sock # Optional, only if you want docker integration + - ../homarr/appdata:/appdata + environment: + - SECRET_ENCRYPTION_KEY=${SECRET_ENCRYPTION_KEY} + ports: + - 7575 diff --git a/blueprints/homarr/index.ts b/blueprints/homarr/index.ts new file mode 100644 index 000000000..eb5a9f823 --- /dev/null +++ b/blueprints/homarr/index.ts @@ -0,0 +1,27 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const secretKey = generatePassword(64); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 7575, + serviceName: "homarr", + }, + ]; + + const envs = [`SECRET_ENCRYPTION_KEY=${secretKey}`]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/huly/docker-compose.yml b/blueprints/huly/docker-compose.yml new file mode 100644 index 000000000..639b10d2f --- /dev/null +++ b/blueprints/huly/docker-compose.yml @@ -0,0 +1,172 @@ +name: ${DOCKER_NAME} +version: "3" +services: + nginx: + + image: "nginx:1.21.3" + ports: + - 80 + volumes: + - ../files/volumes/nginx/.huly.nginx:/etc/nginx/conf.d/default.conf + restart: unless-stopped + + mongodb: + + image: "mongo:7-jammy" + environment: + - PUID=1000 + - PGID=1000 + volumes: + - db:/data/db + restart: unless-stopped + + minio: + + image: "minio/minio:RELEASE.2024-11-07T00-52-20Z" + command: server /data --address ":9000" --console-address ":9001" + volumes: + - files:/data + restart: unless-stopped + + elastic: + + image: "elasticsearch:7.14.2" + command: | + /bin/sh -c "./bin/elasticsearch-plugin list | grep -q ingest-attachment || yes | ./bin/elasticsearch-plugin install --silent ingest-attachment; + /usr/local/bin/docker-entrypoint.sh eswrapper" + volumes: + - elastic:/usr/share/elasticsearch/data + environment: + - ELASTICSEARCH_PORT_NUMBER=9200 + - BITNAMI_DEBUG=true + - discovery.type=single-node + - ES_JAVA_OPTS=-Xms1024m -Xmx1024m + - http.cors.enabled=true + - http.cors.allow-origin=http://localhost:8082 + healthcheck: + interval: 20s + retries: 10 + test: curl -s http://localhost:9200/_cluster/health | grep -vq '"status":"red"' + restart: unless-stopped + + rekoni: + + image: hardcoreeng/rekoni-service:${HULY_VERSION} + environment: + - SECRET=${SECRET} + deploy: + resources: + limits: + memory: 500M + restart: unless-stopped + + transactor: + + image: hardcoreeng/transactor:${HULY_VERSION} + environment: + - SERVER_PORT=3333 + - SERVER_SECRET=${SECRET} + - SERVER_CURSOR_MAXTIMEMS=30000 + - DB_URL=mongodb://mongodb:27017 + - MONGO_URL=mongodb://mongodb:27017 + - STORAGE_CONFIG=minio|minio?accessKey=minioadmin&secretKey=minioadmin + - FRONT_URL=http://localhost:8087 + - ACCOUNTS_URL=http://account:3000 + - FULLTEXT_URL=http://fulltext:4700 + - STATS_URL=http://stats:4900 + - LAST_NAME_FIRST=${LAST_NAME_FIRST:-true} + restart: unless-stopped + + collaborator: + + image: hardcoreeng/collaborator:${HULY_VERSION} + environment: + - COLLABORATOR_PORT=3078 + - SECRET=${SECRET} + - ACCOUNTS_URL=http://account:3000 + - DB_URL=mongodb://mongodb:27017 + - STATS_URL=http://stats:4900 + - STORAGE_CONFIG=minio|minio?accessKey=minioadmin&secretKey=minioadmin + restart: unless-stopped + + account: + + image: hardcoreeng/account:${HULY_VERSION} + environment: + - SERVER_PORT=3000 + - SERVER_SECRET=${SECRET} + - DB_URL=mongodb://mongodb:27017 + - MONGO_URL=mongodb://mongodb:27017 + - TRANSACTOR_URL=ws://transactor:3333;ws${SECURE:+s}://${HOST_ADDRESS}/_transactor + - STORAGE_CONFIG=minio|minio?accessKey=minioadmin&secretKey=minioadmin + - FRONT_URL=http://front:8080 + - STATS_URL=http://stats:4900 + - MODEL_ENABLED=* + - ACCOUNTS_URL=http://localhost:3000 + - ACCOUNT_PORT=3000 + restart: unless-stopped + + workspace: + + image: hardcoreeng/workspace:${HULY_VERSION} + environment: + - SERVER_SECRET=${SECRET} + - DB_URL=mongodb://mongodb:27017 + - MONGO_URL=mongodb://mongodb:27017 + - TRANSACTOR_URL=ws://transactor:3333;ws${SECURE:+s}://${HOST_ADDRESS}/_transactor + - STORAGE_CONFIG=minio|minio?accessKey=minioadmin&secretKey=minioadmin + - MODEL_ENABLED=* + - ACCOUNTS_URL=http://account:3000 + - STATS_URL=http://stats:4900 + restart: unless-stopped + + front: + + image: hardcoreeng/front:${HULY_VERSION} + environment: + - SERVER_PORT=8080 + - SERVER_SECRET=${SECRET} + - LOVE_ENDPOINT=http${SECURE:+s}://${HOST_ADDRESS}/_love + - ACCOUNTS_URL=http${SECURE:+s}://${HOST_ADDRESS}/_accounts + - REKONI_URL=http${SECURE:+s}://${HOST_ADDRESS}/_rekoni + - CALENDAR_URL=http${SECURE:+s}://${HOST_ADDRESS}/_calendar + - GMAIL_URL=http${SECURE:+s}://${HOST_ADDRESS}/_gmail + - TELEGRAM_URL=http${SECURE:+s}://${HOST_ADDRESS}/_telegram + - STATS_URL=http${SECURE:+s}://${HOST_ADDRESS}/_stats + - UPLOAD_URL=/files + - ELASTIC_URL=http://elastic:9200 + - COLLABORATOR_URL=ws${SECURE:+s}://${HOST_ADDRESS}/_collaborator + - STORAGE_CONFIG=minio|minio?accessKey=minioadmin&secretKey=minioadmin + - DB_URL=mongodb://mongodb:27017 + - MONGO_URL=mongodb://mongodb:27017 + - TITLE=${TITLE:-Huly Self Host} + - DEFAULT_LANGUAGE=${DEFAULT_LANGUAGE:-en} + - LAST_NAME_FIRST=${LAST_NAME_FIRST:-true} + - DESKTOP_UPDATES_CHANNEL=selfhost + restart: unless-stopped + + fulltext: + + image: hardcoreeng/fulltext:${HULY_VERSION} + environment: + - SERVER_SECRET=${SECRET} + - DB_URL=mongodb://mongodb:27017 + - FULLTEXT_DB_URL=http://elastic:9200 + - ELASTIC_INDEX_NAME=huly_storage_index + - STORAGE_CONFIG=minio|minio?accessKey=minioadmin&secretKey=minioadmin + - REKONI_URL=http://rekoni:4004 + - ACCOUNTS_URL=http://account:3000 + - STATS_URL=http://stats:4900 + restart: unless-stopped + + stats: + + image: hardcoreeng/stats:${HULY_VERSION} + environment: + - PORT=4900 + - SERVER_SECRET=${SECRET} + restart: unless-stopped +volumes: + db: + elastic: + files: diff --git a/blueprints/huly/index.ts b/blueprints/huly/index.ts new file mode 100644 index 000000000..3157ed51e --- /dev/null +++ b/blueprints/huly/index.ts @@ -0,0 +1,152 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const hulySecret = generateBase64(64); + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 80, + serviceName: "nginx", + }, + ]; + + const envs = [ + "HULY_VERSION=v0.6.377", + "DOCKER_NAME=huly", + "", + "# The address of the host or server from which you will access your Huly instance.", + "# This can be a domain name (e.g., huly.example.com) or an IP address (e.g., 192.168.1.1).", + `HOST_ADDRESS=${mainDomain}`, + "", + "# Set this variable to 'true' to enable SSL (HTTPS/WSS). ", + "# Leave it empty to use non-SSL (HTTP/WS).", + "SECURE=", + "", + "# Specify the IP address to bind to; leave blank to bind to all interfaces (0.0.0.0).", + "# Do not use IP:PORT format in HTTP_BIND or HTTP_PORT.", + "HTTP_PORT=80", + "HTTP_BIND=", + "", + "# Huly specific variables", + "TITLE=Huly", + "DEFAULT_LANGUAGE=en", + "LAST_NAME_FIRST=true", + "", + "# The following configs are auto-generated by the setup script. ", + "# Please do not manually overwrite.", + "", + "# Run with --secret to regenerate.", + `SECRET=${hulySecret}`, + ]; + + const mounts: Template["mounts"] = [ + { + filePath: "/volumes/nginx/.huly.nginx", + content: `server { + listen 80; + server_name _; + location / { + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_pass http://front:8080; + } + + location /_accounts { + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + rewrite ^/_accounts(/.*)$ $1 break; + proxy_pass http://account:3000/; + } + + #location /_love { + # proxy_set_header Host $host; + # proxy_set_header X-Real-IP $remote_addr; + # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + # proxy_set_header X-Forwarded-Proto $scheme; + + # proxy_http_version 1.1; + # proxy_set_header Upgrade $http_upgrade; + # proxy_set_header Connection "upgrade"; + # rewrite ^/_love(/.*)$ $1 break; + # proxy_pass http://love:8096/; + #} + + location /_collaborator { + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + rewrite ^/_collaborator(/.*)$ $1 break; + proxy_pass http://collaborator:3078/; + } + + location /_transactor { + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + rewrite ^/_transactor(/.*)$ $1 break; + proxy_pass http://transactor:3333/; + } + + location ~ ^/eyJ { + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_pass http://transactor:3333; + } + + location /_rekoni { + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + rewrite ^/_rekoni(/.*)$ $1 break; + proxy_pass http://rekoni:4004/; + } + + location /_stats { + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + rewrite ^/_stats(/.*)$ $1 break; + proxy_pass http://stats:4900/; + } +}`, + }, + ]; + + return { + domains, + envs, + mounts, + }; +} diff --git a/blueprints/immich/docker-compose.yml b/blueprints/immich/docker-compose.yml new file mode 100644 index 000000000..743f70acf --- /dev/null +++ b/blueprints/immich/docker-compose.yml @@ -0,0 +1,107 @@ +version: "3.9" + +services: + immich-server: + image: ghcr.io/immich-app/immich-server:v1.121.0 + + volumes: + - immich-library:/usr/src/app/upload + - /etc/localtime:/etc/localtime:ro + depends_on: + immich-redis: + condition: service_healthy + immich-database: + condition: service_healthy + environment: + PORT: 2283 + SERVER_URL: ${SERVER_URL} + FRONT_BASE_URL: ${FRONT_BASE_URL} + # Database Configuration + DB_HOSTNAME: ${DB_HOSTNAME} + DB_PORT: ${DB_PORT} + DB_USERNAME: ${DB_USERNAME} + DB_PASSWORD: ${DB_PASSWORD} + DB_DATABASE_NAME: ${DB_DATABASE_NAME} + # Redis Configuration + REDIS_HOSTNAME: ${REDIS_HOSTNAME} + REDIS_PORT: ${REDIS_PORT} + REDIS_DBINDEX: ${REDIS_DBINDEX} + # Server Configuration + TZ: ${TZ} + restart: always + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:2283/server-info/ping"] + interval: 30s + timeout: 10s + retries: 3 + + immich-machine-learning: + image: ghcr.io/immich-app/immich-machine-learning:v1.121.0 + + volumes: + - immich-model-cache:/cache + environment: + REDIS_HOSTNAME: ${REDIS_HOSTNAME} + REDIS_PORT: ${REDIS_PORT} + REDIS_DBINDEX: ${REDIS_DBINDEX} + restart: always + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:3003/ping"] + interval: 30s + timeout: 10s + retries: 3 + + immich-redis: + image: redis:6.2-alpine + + volumes: + - immich-redis-data:/data + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + restart: always + + immich-database: + image: tensorchord/pgvecto-rs:pg14-v0.2.0 + + volumes: + - immich-postgres:/var/lib/postgresql/data + environment: + POSTGRES_PASSWORD: ${DB_PASSWORD} + POSTGRES_USER: ${DB_USERNAME} + POSTGRES_DB: immich + POSTGRES_INITDB_ARGS: '--data-checksums' + healthcheck: + test: pg_isready -U ${DB_USERNAME} -d immich || exit 1 + interval: 10s + timeout: 5s + retries: 5 + command: + [ + 'postgres', + '-c', + 'shared_preload_libraries=vectors.so', + '-c', + 'search_path="$$user", public, vectors', + '-c', + 'logging_collector=on', + '-c', + 'max_wal_size=2GB', + '-c', + 'shared_buffers=512MB', + '-c', + 'wal_compression=on', + ] + restart: always + +networks: + dokploy-network: + external: true + +volumes: + immich-model-cache: + immich-postgres: + immich-library: + immich-redis-data: \ No newline at end of file diff --git a/blueprints/immich/index.ts b/blueprints/immich/index.ts new file mode 100644 index 000000000..4beca87da --- /dev/null +++ b/blueprints/immich/index.ts @@ -0,0 +1,46 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const dbPassword = generatePassword(); + const dbUser = "immich"; + const _appSecret = generateBase64(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 2283, + serviceName: "immich-server", + }, + ]; + + const envs = [ + `IMMICH_HOST=${mainDomain}`, + `SERVER_URL=https://${mainDomain}`, + `FRONT_BASE_URL=https://${mainDomain}`, + "# Database Configuration", + "DB_HOSTNAME=immich-database", + "DB_PORT=5432", + `DB_USERNAME=${dbUser}`, + `DB_PASSWORD=${dbPassword}`, + "DB_DATABASE_NAME=immich", + "# Redis Configuration", + "REDIS_HOSTNAME=immich-redis", + "REDIS_PORT=6379", + "REDIS_DBINDEX=0", + "# Server Configuration", + "TZ=UTC", + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/infisical/docker-compose.yml b/blueprints/infisical/docker-compose.yml new file mode 100644 index 000000000..7566c8980 --- /dev/null +++ b/blueprints/infisical/docker-compose.yml @@ -0,0 +1,83 @@ +services: + db-migration: + depends_on: + db: + condition: service_healthy + image: infisical/infisical:v0.90.1-postgres + environment: + - NODE_ENV=production + - ENCRYPTION_KEY + - AUTH_SECRET + - SITE_URL + - DB_CONNECTION_URI=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB} + - REDIS_URL=redis://redis:6379 + - SMTP_HOST + - SMTP_PORT + - SMTP_FROM_NAME + - SMTP_USERNAME + - SMTP_PASSWORD + - SMTP_SECURE=true + command: npm run migration:latest + pull_policy: always + + + backend: + restart: unless-stopped + depends_on: + db: + condition: service_healthy + redis: + condition: service_started + db-migration: + condition: service_completed_successfully + image: infisical/infisical:v0.90.1-postgres + pull_policy: always + environment: + - NODE_ENV=production + - ENCRYPTION_KEY + - AUTH_SECRET + - SITE_URL + - DB_CONNECTION_URI=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB} + - REDIS_URL=redis://redis:6379 + - SMTP_HOST + - SMTP_PORT + - SMTP_FROM_NAME + - SMTP_USERNAME + - SMTP_PASSWORD + - SMTP_SECURE=true + + + redis: + image: redis:7.4.1 + env_file: .env + restart: always + environment: + - ALLOW_EMPTY_PASSWORD=yes + + volumes: + - redis_infisical_data:/data + + db: + image: postgres:14-alpine + restart: always + environment: + - POSTGRES_PASSWORD + - POSTGRES_USER + - POSTGRES_DB + volumes: + - pg_infisical_data:/var/lib/postgresql/data + + healthcheck: + test: "pg_isready --username=${POSTGRES_USER} && psql --username=${POSTGRES_USER} --list" + interval: 5s + timeout: 10s + retries: 10 + +volumes: + pg_infisical_data: + redis_infisical_data: + +networks: + dokploy-network: + external: true + diff --git a/blueprints/infisical/index.ts b/blueprints/infisical/index.ts new file mode 100644 index 000000000..6d2127740 --- /dev/null +++ b/blueprints/infisical/index.ts @@ -0,0 +1,93 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 8080, + serviceName: "backend", + }, + ]; + + const envs = [ + "# THIS IS A SAMPLE ENCRYPTION KEY AND SHOULD NEVER BE USED FOR PRODUCTION", + "ENCRYPTION_KEY=6c1fe4e407b8911c104518103505b218", + "", + "# THIS IS A SAMPLE AUTH_SECRET KEY AND SHOULD NEVER BE USED FOR PRODUCTION", + "AUTH_SECRET=5lrMXKKWCVocS/uerPsl7V+TX/aaUaI7iDkgl3tSmLE=", + "# Postgres creds", + "POSTGRES_PASSWORD=infisical", + "POSTGRES_USER=infisical", + "POSTGRES_DB=infisical", + "", + "# Website URL", + "# Required", + "SITE_URL=http://localhost:8080", + "", + "# Mail/SMTP", + "SMTP_HOST=", + "SMTP_PORT=", + "SMTP_NAME=", + "SMTP_USERNAME=", + "SMTP_PASSWORD=", + "", + "# Integration", + "# Optional only if integration is used", + "CLIENT_ID_HEROKU=", + "CLIENT_ID_VERCEL=", + "CLIENT_ID_NETLIFY=", + "CLIENT_ID_GITHUB=", + "CLIENT_ID_GITHUB_APP=", + "CLIENT_SLUG_GITHUB_APP=", + "CLIENT_ID_GITLAB=", + "CLIENT_ID_BITBUCKET=", + "CLIENT_SECRET_HEROKU=", + "CLIENT_SECRET_VERCEL=", + "CLIENT_SECRET_NETLIFY=", + "CLIENT_SECRET_GITHUB=", + "CLIENT_SECRET_GITHUB_APP=", + "CLIENT_SECRET_GITLAB=", + "CLIENT_SECRET_BITBUCKET=", + "CLIENT_SLUG_VERCEL=", + "", + "CLIENT_PRIVATE_KEY_GITHUB_APP=", + "CLIENT_APP_ID_GITHUB_APP=", + "", + "# Sentry (optional) for monitoring errors", + "SENTRY_DSN=", + "", + "# Infisical Cloud-specific configs", + "# Ignore - Not applicable for self-hosted version", + "POSTHOG_HOST=", + "POSTHOG_PROJECT_API_KEY=", + "", + "# SSO-specific variables", + "CLIENT_ID_GOOGLE_LOGIN=", + "CLIENT_SECRET_GOOGLE_LOGIN=", + "", + "CLIENT_ID_GITHUB_LOGIN=", + "CLIENT_SECRET_GITHUB_LOGIN=", + "", + "CLIENT_ID_GITLAB_LOGIN=", + "CLIENT_SECRET_GITLAB_LOGIN=", + "", + "CAPTCHA_SECRET=", + "", + "NEXT_PUBLIC_CAPTCHA_SITE_KEY=", + "", + "PLAIN_API_KEY=", + "PLAIN_WISH_LABEL_IDS=", + "", + "SSL_CLIENT_CERTIFICATE_HEADER_KEY=", + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/influxdb/docker-compose.yml b/blueprints/influxdb/docker-compose.yml new file mode 100644 index 000000000..1327c6028 --- /dev/null +++ b/blueprints/influxdb/docker-compose.yml @@ -0,0 +1,11 @@ +services: + influxdb: + image: influxdb:2.7.10 + restart: unless-stopped + volumes: + - influxdb2-data:/var/lib/influxdb2 + - influxdb2-config:/etc/influxdb2 + +volumes: + influxdb2-data: + influxdb2-config: \ No newline at end of file diff --git a/blueprints/influxdb/index.ts b/blueprints/influxdb/index.ts new file mode 100644 index 000000000..550b680e7 --- /dev/null +++ b/blueprints/influxdb/index.ts @@ -0,0 +1,19 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 8086, + serviceName: "influxdb", + }, + ]; + return { + domains, + }; +} diff --git a/blueprints/invoiceshelf/docker-compose.yml b/blueprints/invoiceshelf/docker-compose.yml new file mode 100644 index 000000000..ef47f1c04 --- /dev/null +++ b/blueprints/invoiceshelf/docker-compose.yml @@ -0,0 +1,57 @@ +version: "3.8" + +services: + invoiceshelf-postgres: + image: postgres:15 + + volumes: + - invoiceshelf-postgres-data:/var/lib/postgresql/data + environment: + - POSTGRES_PASSWORD=${DB_PASSWORD} + - POSTGRES_USER=${DB_USERNAME} + - POSTGRES_DB=${DB_DATABASE} + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${DB_USERNAME}"] + interval: 10s + timeout: 5s + retries: 5 + + invoiceshelf-app: + image: invoiceshelf/invoiceshelf:latest + + volumes: + - invoiceshelf-app-data:/data + - invoiceshelf-app-conf:/conf + environment: + - PHP_TZ=UTC + - TIMEZONE=UTC + - APP_NAME=InvoiceShelf + - APP_ENV=production + - APP_DEBUG=false + - APP_URL=http://${INVOICESHELF_HOST} + - DB_CONNECTION=pgsql + - DB_HOST=invoiceshelf-postgres + - DB_PORT=5432 + - DB_DATABASE=${DB_DATABASE} + - DB_USERNAME=${DB_USERNAME} + - DB_PASSWORD=${DB_PASSWORD} + - CACHE_STORE=file + - SESSION_DRIVER=file + - SESSION_LIFETIME=120 + - SESSION_ENCRYPT=true + - SESSION_PATH=/ + - SESSION_DOMAIN=${INVOICESHELF_HOST} + - SANCTUM_STATEFUL_DOMAINS=${INVOICESHELF_HOST} + - STARTUP_DELAY=10 + depends_on: + invoiceshelf-postgres: + condition: service_healthy + +networks: + dokploy-network: + external: true + +volumes: + invoiceshelf-postgres-data: + invoiceshelf-app-data: + invoiceshelf-app-conf: \ No newline at end of file diff --git a/blueprints/invoiceshelf/index.ts b/blueprints/invoiceshelf/index.ts new file mode 100644 index 000000000..a83ebf07a --- /dev/null +++ b/blueprints/invoiceshelf/index.ts @@ -0,0 +1,34 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const dbPassword = generatePassword(); + const dbUsername = "invoiceshelf"; + const dbDatabase = "invoiceshelf"; + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 80, + serviceName: "invoiceshelf-app", + }, + ]; + + const envs = [ + `INVOICESHELF_HOST=${mainDomain}`, + `DB_PASSWORD=${dbPassword}`, + `DB_USERNAME=${dbUsername}`, + `DB_DATABASE=${dbDatabase}`, + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/it-tools/docker-compose.yml b/blueprints/it-tools/docker-compose.yml new file mode 100644 index 000000000..b26665f8a --- /dev/null +++ b/blueprints/it-tools/docker-compose.yml @@ -0,0 +1,8 @@ +services: + it-tools: + image: corentinth/it-tools:latest + healthcheck: + test: ["CMD", "curl", "-f", "http://127.0.0.1:80"] + interval: 30s + timeout: 10s + retries: 3 diff --git a/blueprints/it-tools/index.ts b/blueprints/it-tools/index.ts new file mode 100644 index 000000000..9912c4ba1 --- /dev/null +++ b/blueprints/it-tools/index.ts @@ -0,0 +1,20 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 80, + serviceName: "it-tools", + }, + ]; + + return { + domains, + }; +} diff --git a/blueprints/jellyfin/docker-compose.yml b/blueprints/jellyfin/docker-compose.yml new file mode 100644 index 000000000..cb61476ab --- /dev/null +++ b/blueprints/jellyfin/docker-compose.yml @@ -0,0 +1,19 @@ +version: "3.8" +services: + jellyfin: + image: jellyfin/jellyfin:10 + volumes: + - config:/config + - cache:/cache + - media:/media + restart: "unless-stopped" + # Optional - alternative address used for autodiscovery + environment: + - JELLYFIN_PublishedServerUrl=http://${JELLYFIN_HOST} + # Optional - may be necessary for docker healthcheck to pass if running in host network mode + extra_hosts: + - "host.docker.internal:host-gateway" +volumes: + config: + cache: + media: diff --git a/blueprints/jellyfin/index.ts b/blueprints/jellyfin/index.ts new file mode 100644 index 000000000..61c9c9b7c --- /dev/null +++ b/blueprints/jellyfin/index.ts @@ -0,0 +1,25 @@ +// EXAMPLE +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domain = generateRandomDomain(schema); + const domains: DomainSchema[] = [ + { + host: domain, + port: 8096, + serviceName: "jellyfin", + }, + ]; + + const envs = [`JELLYFIN_HOST=${domain}`]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/kimai/docker-compose.yml b/blueprints/kimai/docker-compose.yml new file mode 100644 index 000000000..253ecb004 --- /dev/null +++ b/blueprints/kimai/docker-compose.yml @@ -0,0 +1,49 @@ +services: + app: + image: kimai/kimai2:apache-2.26.0 + restart: unless-stopped + environment: + APP_ENV: prod + DATABASE_URL: mysql://kimai:${KI_MYSQL_PASSWORD:-kimai}@db/kimai + TRUSTED_PROXIES: localhost + APP_SECRET: ${KI_APP_SECRET} + MAILER_FROM: ${KI_MAILER_FROM:-admin@kimai.local} + MAILER_URL: ${KI_MAILER_URL:-null://null} + ADMINMAIL: ${KI_ADMINMAIL:-admin@kimai.local} + ADMINPASS: ${KI_ADMINPASS} + volumes: + - kimai-data:/opt/kimai/var + depends_on: + db: + condition: service_healthy + + db: + image: mariadb:10.11 + restart: unless-stopped + environment: + - MYSQL_DATABASE=kimai + - MYSQL_USER=kimai + - MYSQL_PASSWORD=${KI_MYSQL_PASSWORD} + - MYSQL_ROOT_PASSWORD=${KI_MYSQL_ROOT_PASSWORD} + volumes: + - mysql-data:/var/lib/mysql + command: + - --character-set-server=utf8mb4 + - --collation-server=utf8mb4_unicode_ci + - --innodb-buffer-pool-size=256M + - --innodb-flush-log-at-trx-commit=2 + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "kimai", "-p${KI_MYSQL_PASSWORD}"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 30s + + +networks: + dokploy-network: + external: true + +volumes: + kimai-data: + mysql-data: \ No newline at end of file diff --git a/blueprints/kimai/index.ts b/blueprints/kimai/index.ts new file mode 100644 index 000000000..5569905ec --- /dev/null +++ b/blueprints/kimai/index.ts @@ -0,0 +1,37 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domain = generateRandomDomain(schema); + const domains: DomainSchema[] = [ + { + host: domain, + port: 8001, + serviceName: "app", + }, + ]; + + const adminPassword = generatePassword(32); + const mysqlPassword = generatePassword(32); + const mysqlRootPassword = generatePassword(32); + const appSecret = generatePassword(32); + + const envs = [ + `KI_HOST=${domain}`, + "KI_ADMINMAIL=admin@kimai.local", + `KI_ADMINPASS=${adminPassword}`, + `KI_MYSQL_ROOT_PASSWORD=${mysqlRootPassword}`, + `KI_MYSQL_PASSWORD=${mysqlPassword}`, + `KI_APP_SECRET=${appSecret}`, + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/langflow/docker-compose.yml b/blueprints/langflow/docker-compose.yml new file mode 100644 index 000000000..a96282868 --- /dev/null +++ b/blueprints/langflow/docker-compose.yml @@ -0,0 +1,31 @@ +version: "3.8" + +services: + langflow: + image: langflowai/langflow:v1.1.1 + ports: + - 7860 + depends_on: + - postgres-langflow + environment: + - LANGFLOW_DATABASE_URL=postgresql://${DB_USERNAME}:${DB_PASSWORD}@postgres-langflow:5432/langflow + # This variable defines where the logs, file storage, monitor data and secret keys are stored. + volumes: + - langflow-data:/app/langflow + + + postgres-langflow: + image: postgres:16 + environment: + POSTGRES_USER: ${DB_USERNAME} + POSTGRES_PASSWORD: ${DB_PASSWORD} + POSTGRES_DB: langflow + ports: + - 5432 + volumes: + - langflow-postgres:/var/lib/postgresql/data + + +volumes: + langflow-postgres: + langflow-data: \ No newline at end of file diff --git a/blueprints/langflow/index.ts b/blueprints/langflow/index.ts new file mode 100644 index 000000000..75f6db580 --- /dev/null +++ b/blueprints/langflow/index.ts @@ -0,0 +1,28 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const dbPassword = generatePassword(); + const dbUsername = "langflow"; + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 7860, + serviceName: "langflow", + }, + ]; + + const envs = [`DB_PASSWORD=${dbPassword}`, `DB_USERNAME=${dbUsername}`]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/linkwarden/docker-compose.yml b/blueprints/linkwarden/docker-compose.yml new file mode 100644 index 000000000..05ffb8a0a --- /dev/null +++ b/blueprints/linkwarden/docker-compose.yml @@ -0,0 +1,40 @@ +services: + linkwarden: + environment: + - NEXTAUTH_SECRET + - NEXTAUTH_URL + - DATABASE_URL=postgresql://linkwarden:${POSTGRES_PASSWORD}@postgres:5432/linkwarden + restart: unless-stopped + image: ghcr.io/linkwarden/linkwarden:v2.9.3 + ports: + - 3000 + volumes: + - linkwarden-data:/data/data + depends_on: + - postgres + healthcheck: + test: curl --fail http://localhost:3000 || exit 1 + interval: 60s + retries: 2 + start_period: 60s + timeout: 15s + + postgres: + image: postgres:17-alpine + restart: unless-stopped + user: postgres + environment: + POSTGRES_USER: linkwarden + POSTGRES_DB: linkwarden + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + volumes: + - postgres-data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready"] + interval: 10s + timeout: 5s + retries: 5 + +volumes: + linkwarden-data: + postgres-data: diff --git a/blueprints/linkwarden/index.ts b/blueprints/linkwarden/index.ts new file mode 100644 index 000000000..860250356 --- /dev/null +++ b/blueprints/linkwarden/index.ts @@ -0,0 +1,33 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const postgresPassword = generatePassword(); + const nextSecret = generateBase64(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3000, + serviceName: "linkwarden", + }, + ]; + + const envs = [ + `POSTGRES_PASSWORD=${postgresPassword}`, + `NEXTAUTH_SECRET=${nextSecret}`, + `NEXTAUTH_URL=http://${mainDomain}/api/v1/auth`, + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/listmonk/docker-compose.yml b/blueprints/listmonk/docker-compose.yml new file mode 100644 index 000000000..d9da8b50e --- /dev/null +++ b/blueprints/listmonk/docker-compose.yml @@ -0,0 +1,49 @@ +services: + db: + image: postgres:17-alpine + ports: + - 5432 + + environment: + - POSTGRES_PASSWORD=listmonk + - POSTGRES_USER=listmonk + - POSTGRES_DB=listmonk + restart: unless-stopped + healthcheck: + test: ["CMD-SHELL", "pg_isready -U listmonk"] + interval: 10s + timeout: 5s + retries: 6 + volumes: + - listmonk-data:/var/lib/postgresql/data + + setup: + image: listmonk/listmonk:v4.1.0 + + volumes: + - ../files/config.toml:/listmonk/config.toml + depends_on: + - db + command: + [ + sh, + -c, + "sleep 3 && ./listmonk --install --idempotent --yes --config config.toml", + ] + + app: + restart: unless-stopped + image: listmonk/listmonk:v4.1.0 + environment: + - TZ=Etc/UTC + depends_on: + - db + - setup + volumes: + - ../files/config.toml:/listmonk/config.toml + - listmonk-uploads:/listmonk/uploads + +volumes: + listmonk-uploads: + listmonk-data: + driver: local diff --git a/blueprints/listmonk/index.ts b/blueprints/listmonk/index.ts new file mode 100644 index 000000000..2a25efcaa --- /dev/null +++ b/blueprints/listmonk/index.ts @@ -0,0 +1,52 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const randomDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: randomDomain, + port: 9000, + serviceName: "app", + }, + ]; + + const envs = [ + "# visit the page to setup your super admin user", + "# check config.toml in Advanced / Volumes for more options", + ]; + + const mounts: Template["mounts"] = [ + { + filePath: "config.toml", + content: `[app] +address = "0.0.0.0:9000" + +[db] +host = "db" +port = 5432 +user = "listmonk" +password = "listmonk" +database = "listmonk" + +ssl_mode = "disable" +max_open = 25 +max_idle = 25 +max_lifetime = "300s" + +params = "" +`, + }, + ]; + + return { + envs, + mounts, + domains, + }; +} diff --git a/blueprints/lobe-chat/docker-compose.yml b/blueprints/lobe-chat/docker-compose.yml new file mode 100644 index 000000000..676140903 --- /dev/null +++ b/blueprints/lobe-chat/docker-compose.yml @@ -0,0 +1,12 @@ +version: '3.8' + +services: + lobe-chat: + image: lobehub/lobe-chat:v1.26.1 + restart: always + ports: + - 3210 + environment: + OPENAI_API_KEY: sk-xxxx + OPENAI_PROXY_URL: https://api-proxy.com/v1 + ACCESS_CODE: lobe66 \ No newline at end of file diff --git a/blueprints/lobe-chat/index.ts b/blueprints/lobe-chat/index.ts new file mode 100644 index 000000000..a07a4e09a --- /dev/null +++ b/blueprints/lobe-chat/index.ts @@ -0,0 +1,22 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3210, + serviceName: "lobe-chat", + }, + ]; + + return { + domains, + }; +} diff --git a/blueprints/logto/docker-compose.yml b/blueprints/logto/docker-compose.yml new file mode 100644 index 000000000..6f2b920a0 --- /dev/null +++ b/blueprints/logto/docker-compose.yml @@ -0,0 +1,40 @@ +services: + app: + depends_on: + postgres: + condition: service_healthy + image: svhd/logto:1.22.0 + entrypoint: ["sh", "-c", "npm run cli db seed -- --swe && npm start"] + ports: + - 3001 + - 3002 + + environment: + TRUST_PROXY_HEADER: 1 + DB_URL: postgres://logto:${LOGTO_POSTGRES_PASSWORD}@postgres:5432/logto + ENDPOINT: ${LOGTO_ENDPOINT} + ADMIN_ENDPOINT: ${LOGTO_ADMIN_ENDPOINT} + volumes: + - logto-connectors:/etc/logto/packages/core/connectors + postgres: + image: postgres:17-alpine + user: postgres + + environment: + POSTGRES_USER: logto + POSTGRES_PASSWORD: ${LOGTO_POSTGRES_PASSWORD} + volumes: + - postgres-data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready"] + interval: 10s + timeout: 5s + retries: 5 + +networks: + dokploy-network: + external: true + +volumes: + logto-connectors: + postgres-data: diff --git a/blueprints/logto/index.ts b/blueprints/logto/index.ts new file mode 100644 index 000000000..09b099d85 --- /dev/null +++ b/blueprints/logto/index.ts @@ -0,0 +1,37 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const adminDomain = generateRandomDomain(schema); + const postgresPassword = generatePassword(); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3001, + serviceName: "app", + }, + { + host: adminDomain, + port: 3002, + serviceName: "app", + }, + ]; + + const envs = [ + `LOGTO_ENDPOINT=http://${adminDomain}`, + `LOGTO_ADMIN_ENDPOINT=http://${adminDomain}`, + `LOGTO_POSTGRES_PASSWORD=${postgresPassword}`, + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/macos/docker-compose.yml b/blueprints/macos/docker-compose.yml new file mode 100644 index 000000000..585c1bf97 --- /dev/null +++ b/blueprints/macos/docker-compose.yml @@ -0,0 +1,16 @@ +services: + macos: + image: dockurr/macos:1.14 + volumes: + - macos-storage:/storage + environment: + - VERSION + devices: + # If in .env string 'KVM=N' is not commented, you need to comment line below + - /dev/kvm + cap_add: + - NET_ADMIN + stop_grace_period: 2m + +volumes: + macos-storage: \ No newline at end of file diff --git a/blueprints/macos/index.ts b/blueprints/macos/index.ts new file mode 100644 index 000000000..ebda41065 --- /dev/null +++ b/blueprints/macos/index.ts @@ -0,0 +1,33 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 8006, + serviceName: "macos", + }, + ]; + + const envs = [ + "# https://github.com/dockur/macos?tab=readme-ov-file#how-do-i-select-the-macos-version", + "VERSION=15", + "", + "# Uncomment this if your PC/VM or etc does not support virtualization technology", + "# KVM=N", + "", + "DISK_SIZE=64G", + "RAM_SIZE=4G", + "CPU_CORES=2", + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/mailpit/docker-compose.yml b/blueprints/mailpit/docker-compose.yml new file mode 100644 index 000000000..d0dbdb8ec --- /dev/null +++ b/blueprints/mailpit/docker-compose.yml @@ -0,0 +1,25 @@ +services: + mailpit: + image: axllent/mailpit:v1.22.3 + restart: unless-stopped + ports: + - '1025:1025' + volumes: + - 'mailpit-data:/data' + environment: + - MP_SMTP_AUTH_ALLOW_INSECURE=true + - MP_MAX_MESSAGES=5000 + - MP_DATABASE=/data/mailpit.db + - MP_UI_AUTH=${MP_UI_AUTH} + - MP_SMTP_AUTH=${MP_SMTP_AUTH} + healthcheck: + test: + - CMD + - /mailpit + - readyz + interval: 5s + timeout: 20s + retries: 10 + +volumes: + mailpit-data: \ No newline at end of file diff --git a/blueprints/mailpit/index.ts b/blueprints/mailpit/index.ts new file mode 100644 index 000000000..25f18f7e6 --- /dev/null +++ b/blueprints/mailpit/index.ts @@ -0,0 +1,31 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 8025, + serviceName: "mailpit", + }, + ]; + + const defaultPassword = generatePassword(); + + const envs = [ + "# Uncomment below if you want basic auth on UI and SMTP", + `#MP_UI_AUTH=mailpit:${defaultPassword}`, + `#MP_SMTP_AUTH=mailpit:${defaultPassword}`, + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/maybe/docker-compose.yml b/blueprints/maybe/docker-compose.yml new file mode 100644 index 000000000..db529e0a7 --- /dev/null +++ b/blueprints/maybe/docker-compose.yml @@ -0,0 +1,36 @@ +services: + app: + image: ghcr.io/maybe-finance/maybe:sha-68c570eed8810fd59b5b33cca51bbad5eabb4cb4 + restart: unless-stopped + volumes: + - ../files/uploads:/app/uploads + environment: + DATABASE_URL: postgresql://maybe:maybe@db:5432/maybe + SECRET_KEY_BASE: ${SECRET_KEY_BASE} + SELF_HOSTED: true + SYNTH_API_KEY: ${SYNTH_API_KEY} + RAILS_FORCE_SSL: "false" + RAILS_ASSUME_SSL: "false" + GOOD_JOB_EXECUTION_MODE: async + depends_on: + db: + condition: service_healthy + + db: + image: postgres:16 + restart: always + healthcheck: + test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"] + interval: 5s + timeout: 5s + retries: 5 + + volumes: + - db-data:/var/lib/postgresql/data + environment: + POSTGRES_DB: maybe + POSTGRES_USER: maybe + POSTGRES_PASSWORD: maybe + +volumes: + db-data: diff --git a/blueprints/maybe/index.ts b/blueprints/maybe/index.ts new file mode 100644 index 000000000..5eaf7a811 --- /dev/null +++ b/blueprints/maybe/index.ts @@ -0,0 +1,43 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const secretKeyBase = generateBase64(64); + const synthApiKey = generateBase64(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3000, + serviceName: "app", + }, + ]; + + const envs = [ + `SECRET_KEY_BASE=${secretKeyBase}`, + "SELF_HOSTED=true", + `SYNTH_API_KEY=${synthApiKey}`, + "RAILS_FORCE_SSL=false", + "RAILS_ASSUME_SSL=false", + "GOOD_JOB_EXECUTION_MODE=async", + ]; + + const mounts: Template["mounts"] = [ + { + filePath: "./uploads", + content: "This is where user uploads will be stored", + }, + ]; + + return { + envs, + mounts, + domains, + }; +} diff --git a/blueprints/meilisearch/docker-compose.yml b/blueprints/meilisearch/docker-compose.yml new file mode 100644 index 000000000..ae5ebcb1a --- /dev/null +++ b/blueprints/meilisearch/docker-compose.yml @@ -0,0 +1,14 @@ +version: "3.8" + +services: + meilisearch: + image: getmeili/meilisearch:v1.8.3 + volumes: + - meili_data:/meili_data + environment: + MEILI_MASTER_KEY: ${MEILI_MASTER_KEY} + MEILI_ENV: ${MEILI_ENV} + +volumes: + meili_data: + driver: local diff --git a/blueprints/meilisearch/index.ts b/blueprints/meilisearch/index.ts new file mode 100644 index 000000000..cfb8a9a4b --- /dev/null +++ b/blueprints/meilisearch/index.ts @@ -0,0 +1,26 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const masterKey = generateBase64(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 7700, + serviceName: "meilisearch", + }, + ]; + const envs = ["MEILI_ENV=development", `MEILI_MASTER_KEY=${masterKey}`]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/metabase/docker-compose.yml b/blueprints/metabase/docker-compose.yml new file mode 100644 index 000000000..43a03987d --- /dev/null +++ b/blueprints/metabase/docker-compose.yml @@ -0,0 +1,25 @@ +version: "3.8" +services: + metabase: + image: metabase/metabase:v0.50.8 + volumes: + - /dev/urandom:/dev/random:ro + environment: + MB_DB_TYPE: postgres + MB_DB_DBNAME: metabaseappdb + MB_DB_PORT: 5432 + MB_DB_USER: metabase + MB_DB_PASS: mysecretpassword + MB_DB_HOST: postgres + healthcheck: + test: curl --fail -I http://localhost:3000/api/health || exit 1 + interval: 15s + timeout: 5s + retries: 5 + postgres: + image: postgres:14 + environment: + POSTGRES_USER: metabase + POSTGRES_DB: metabaseappdb + POSTGRES_PASSWORD: mysecretpassword + diff --git a/blueprints/metabase/index.ts b/blueprints/metabase/index.ts new file mode 100644 index 000000000..0a08916e6 --- /dev/null +++ b/blueprints/metabase/index.ts @@ -0,0 +1,22 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const randomDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: randomDomain, + port: 3000, + serviceName: "metabase", + }, + ]; + + return { + domains, + }; +} diff --git a/blueprints/minio/docker-compose.yml b/blueprints/minio/docker-compose.yml new file mode 100644 index 000000000..4b24bbcce --- /dev/null +++ b/blueprints/minio/docker-compose.yml @@ -0,0 +1,13 @@ +version: "3.8" +services: + minio: + image: minio/minio + volumes: + - minio-data:/data + environment: + - MINIO_ROOT_USER=minioadmin + - MINIO_ROOT_PASSWORD=minioadmin123 + command: server /data --console-address ":9001" + +volumes: + minio-data: diff --git a/blueprints/minio/index.ts b/blueprints/minio/index.ts new file mode 100644 index 000000000..1345aafdc --- /dev/null +++ b/blueprints/minio/index.ts @@ -0,0 +1,28 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const apiDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 9001, + serviceName: "minio", + }, + { + host: apiDomain, + port: 9000, + serviceName: "minio", + }, + ]; + + return { + domains, + }; +} diff --git a/blueprints/n8n/docker-compose.yml b/blueprints/n8n/docker-compose.yml new file mode 100644 index 000000000..d810fa333 --- /dev/null +++ b/blueprints/n8n/docker-compose.yml @@ -0,0 +1,18 @@ +version: "3.8" +services: + n8n: + image: docker.n8n.io/n8nio/n8n:1.70.3 + restart: always + environment: + - N8N_HOST=${N8N_HOST} + - N8N_PORT=${N8N_PORT} + - N8N_PROTOCOL=http + - NODE_ENV=production + - WEBHOOK_URL=https://${N8N_HOST}/ + - GENERIC_TIMEZONE=${GENERIC_TIMEZONE} + - N8N_SECURE_COOKIE=false + volumes: + - n8n_data:/home/node/.n8n + +volumes: + n8n_data: diff --git a/blueprints/n8n/index.ts b/blueprints/n8n/index.ts new file mode 100644 index 000000000..da93c025f --- /dev/null +++ b/blueprints/n8n/index.ts @@ -0,0 +1,28 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 5678, + serviceName: "n8n", + }, + ]; + const envs = [ + `N8N_HOST=${mainDomain}`, + "N8N_PORT=5678", + "GENERIC_TIMEZONE=Europe/Berlin", + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/nextcloud-aio/docker-compose.yml b/blueprints/nextcloud-aio/docker-compose.yml new file mode 100644 index 000000000..1e6d00fe3 --- /dev/null +++ b/blueprints/nextcloud-aio/docker-compose.yml @@ -0,0 +1,36 @@ +services: + nextcloud: + image: nextcloud:30.0.2 + restart: always + + ports: + - 80 + volumes: + - nextcloud_data:/var/www/html + environment: + - NEXTCLOUD_TRUSTED_DOMAINS=${NEXTCLOUD_DOMAIN} + - MYSQL_HOST=nextcloud_db + - MYSQL_DATABASE=nextcloud + - MYSQL_USER=nextcloud + - MYSQL_PASSWORD=${MYSQL_SECRET_PASSWORD} + - OVERWRITEPROTOCOL=https + + nextcloud_db: + image: mariadb + restart: always + + volumes: + - nextcloud_db_data:/var/lib/mysql + environment: + - MYSQL_ROOT_PASSWORD=${MYSQL_SECRET_PASSWORD_ROOT} + - MYSQL_DATABASE=nextcloud + - MYSQL_USER=nextcloud + - MYSQL_PASSWORD=${MYSQL_SECRET_PASSWORD} + +volumes: + nextcloud_data: + nextcloud_db_data: + +networks: + dokploy-network: + external: true diff --git a/blueprints/nextcloud-aio/index.ts b/blueprints/nextcloud-aio/index.ts new file mode 100644 index 000000000..f6bfc893c --- /dev/null +++ b/blueprints/nextcloud-aio/index.ts @@ -0,0 +1,28 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const randomDomain = generateRandomDomain(schema); + const databasePassword = generatePassword(); + const databaseRootPassword = generatePassword(); + const envs = [ + `NEXTCLOUD_DOMAIN=${randomDomain}`, + `MYSQL_SECRET_PASSWORD=${databasePassword}`, + `MYSQL_SECRET_PASSWORD_ROOT=${databaseRootPassword}`, + ]; + + const domains: DomainSchema[] = [ + { + host: randomDomain, + port: 80, + serviceName: "nextcloud", + }, + ]; + + return { envs, domains }; +} diff --git a/blueprints/nocodb/docker-compose.yml b/blueprints/nocodb/docker-compose.yml new file mode 100644 index 000000000..7c4fd1e95 --- /dev/null +++ b/blueprints/nocodb/docker-compose.yml @@ -0,0 +1,31 @@ +version: "3.8" +services: + nocodb: + image: nocodb/nocodb:0.257.2 + restart: always + environment: + NC_DB: "pg://root_db?u=postgres&p=password&d=root_db" + PORT: ${NOCODB_PORT} + NC_REDIS_URL: ${NC_REDIS_URL} + volumes: + - nc_data:/usr/app/data + + root_db: + image: postgres:17 + restart: always + + environment: + POSTGRES_DB: root_db + POSTGRES_PASSWORD: password + POSTGRES_USER: postgres + healthcheck: + interval: 10s + retries: 10 + test: 'pg_isready -U "$$POSTGRES_USER" -d "$$POSTGRES_DB"' + timeout: 2s + volumes: + - "db_data:/var/lib/postgresql/data" + +volumes: + db_data: {} + nc_data: {} diff --git a/blueprints/nocodb/index.ts b/blueprints/nocodb/index.ts new file mode 100644 index 000000000..60620dbd4 --- /dev/null +++ b/blueprints/nocodb/index.ts @@ -0,0 +1,28 @@ +// EXAMPLE +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const randomDomain = generateRandomDomain(schema); + const secretBase = generateBase64(64); + + const domains: DomainSchema[] = [ + { + host: randomDomain, + port: 8000, + serviceName: "nocodb", + }, + ]; + + const envs = ["NOCODB_PORT=8000", `NC_AUTH_JWT_SECRET=${secretBase}`]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/odoo/docker-compose.yml b/blueprints/odoo/docker-compose.yml new file mode 100644 index 000000000..7c1c7d3ce --- /dev/null +++ b/blueprints/odoo/docker-compose.yml @@ -0,0 +1,28 @@ +version: "3.8" +services: + web: + image: odoo:16.0 + depends_on: + - db + environment: + - HOST=db + - USER=odoo + - PASSWORD=odoo + volumes: + - odoo-web-data:/var/lib/odoo + - ../files/config:/etc/odoo + - ../files/addons:/mnt/extra-addons + + db: + image: postgres:13 + + environment: + - POSTGRES_DB=postgres + - POSTGRES_USER=odoo + - POSTGRES_PASSWORD=odoo + volumes: + - odoo-db-data:/var/lib/postgresql/data + +volumes: + odoo-web-data: + odoo-db-data: diff --git a/blueprints/odoo/index.ts b/blueprints/odoo/index.ts new file mode 100644 index 000000000..904293c11 --- /dev/null +++ b/blueprints/odoo/index.ts @@ -0,0 +1,22 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const randomDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: randomDomain, + port: 8069, + serviceName: "web", + }, + ]; + + return { + domains, + }; +} diff --git a/blueprints/onedev/docker-compose.yml b/blueprints/onedev/docker-compose.yml new file mode 100644 index 000000000..af4122cf8 --- /dev/null +++ b/blueprints/onedev/docker-compose.yml @@ -0,0 +1,12 @@ +--- +services: + onedev: + image: 1dev/server:11.6.6 + restart: always + + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" + - "onedev-data:/opt/onedev" + +volumes: + onedev-data: \ No newline at end of file diff --git a/blueprints/onedev/index.ts b/blueprints/onedev/index.ts new file mode 100644 index 000000000..8017c3514 --- /dev/null +++ b/blueprints/onedev/index.ts @@ -0,0 +1,22 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const randomDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: randomDomain, + port: 6610, + serviceName: "onedev", + }, + ]; + + return { + domains, + }; +} diff --git a/blueprints/ontime/docker-compose.yml b/blueprints/ontime/docker-compose.yml new file mode 100644 index 000000000..2c04bcb3f --- /dev/null +++ b/blueprints/ontime/docker-compose.yml @@ -0,0 +1,14 @@ +services: + ontime: + image: getontime/ontime:v3.8.0 + ports: + - 4001 + - 8888 + - 9999 + volumes: + - ontime-data:/data/ + environment: + - TZ + restart: unless-stopped +volumes: + ontime-data: diff --git a/blueprints/ontime/index.ts b/blueprints/ontime/index.ts new file mode 100644 index 000000000..17cac25d9 --- /dev/null +++ b/blueprints/ontime/index.ts @@ -0,0 +1,25 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 4001, + serviceName: "ontime", + }, + ]; + + const envs = ["TZ=UTC"]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/open-webui/docker-compose.yml b/blueprints/open-webui/docker-compose.yml new file mode 100644 index 000000000..ee179721a --- /dev/null +++ b/blueprints/open-webui/docker-compose.yml @@ -0,0 +1,25 @@ +version: "3.8" +services: + ollama: + volumes: + - ollama:/root/.ollama + + pull_policy: always + tty: true + restart: unless-stopped + image: ollama/ollama:${OLLAMA_DOCKER_TAG-latest} + + open-webui: + image: ghcr.io/open-webui/open-webui:${WEBUI_DOCKER_TAG-main} + volumes: + - open-webui:/app/backend/data + depends_on: + - ollama + environment: + - "OLLAMA_BASE_URL=http://ollama:11434" + - "WEBUI_SECRET_KEY=" + restart: unless-stopped + +volumes: + ollama: {} + open-webui: {} diff --git a/blueprints/open-webui/index.ts b/blueprints/open-webui/index.ts new file mode 100644 index 000000000..0431c2a11 --- /dev/null +++ b/blueprints/open-webui/index.ts @@ -0,0 +1,24 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const randomDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: randomDomain, + port: 8080, + serviceName: "open-webui", + }, + ]; + const envs = ["OLLAMA_DOCKER_TAG=0.1.47", "WEBUI_DOCKER_TAG=0.3.7"]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/outline/docker-compose.yml b/blueprints/outline/docker-compose.yml new file mode 100644 index 000000000..aaf98ac0b --- /dev/null +++ b/blueprints/outline/docker-compose.yml @@ -0,0 +1,57 @@ +services: + outline: + image: outlinewiki/outline:0.82.0 + restart: always + depends_on: + - postgres + - redis + - dex + ports: + - 3000 + environment: + NODE_ENV: production + URL: ${URL} + FORCE_HTTPS: 'false' + SECRET_KEY: ${SECRET_KEY} + UTILS_SECRET: ${UTILS_SECRET} + DATABASE_URL: postgres://outline:${POSTGRES_PASSWORD}@postgres:5432/outline + PGSSLMODE: disable + REDIS_URL: redis://redis:6379 + OIDC_CLIENT_ID: outline + OIDC_CLIENT_SECRET: ${CLIENT_SECRET} + OIDC_AUTH_URI: ${DEX_URL}/auth + OIDC_TOKEN_URI: ${DEX_URL}/token + OIDC_USERINFO_URI: ${DEX_URL}/userinfo + + dex: + image: ghcr.io/dexidp/dex:v2.37.0 + restart: always + volumes: + - ../files/etc/dex/config.yaml:/etc/dex/config.yaml + command: + - dex + - serve + - /etc/dex/config.yaml + ports: + - 5556 + + postgres: + image: postgres:15 + restart: always + environment: + POSTGRES_DB: outline + POSTGRES_USER: outline + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + volumes: + - postgres_data-test-outline-khufpx:/var/lib/postgresql/data + + redis: + image: redis:latest + restart: always + command: redis-server --appendonly yes + volumes: + - redis_data-test-outline-khufpx:/data + +volumes: + postgres_data-test-outline-khufpx: + redis_data-test-outline-khufpx: \ No newline at end of file diff --git a/blueprints/outline/index.ts b/blueprints/outline/index.ts new file mode 100644 index 000000000..8431e5687 --- /dev/null +++ b/blueprints/outline/index.ts @@ -0,0 +1,90 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const dexDomain = generateRandomDomain(schema); + const SECRET_KEY = generateBase64(32); + const UTILS_SECRET = generateBase64(32); + const CLIENT_SECRET = generateBase64(32); + const POSTGRES_PASSWORD = generatePassword(); + + const mainURL = `http://${mainDomain}`; + const dexURL = `http://${dexDomain}`; + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3000, + serviceName: "outline", + }, + { + host: dexDomain, + port: 5556, + serviceName: "dex", + }, + ]; + + const mounts: Template["mounts"] = [ + { + filePath: "/etc/dex/config.yaml", + content: `issuer: ${dexURL} + +web: + http: 0.0.0.0:5556 + +storage: + type: memory + +enablePasswordDB: true + +frontend: + issuer: Outline + +logger: + level: debug + +staticPasswords: + - email: "admin@example.com" + # bcrypt hash of the string "password": $(echo password | htpasswd -BinC 10 admin | cut -d: -f2) + hash: "$2y$10$jsRWHw54uxTUIfhjgUrB9u8HSzPk7TUuQri9sXZrKzRXcScvwYor." + username: "admin" + userID: "1" + + +oauth2: + skipApprovalScreen: true + alwaysShowLoginScreen: false + passwordConnector: local + +staticClients: + - id: "outline" + redirectURIs: + - ${mainURL}/auth/oidc.callback + name: "Outline" + secret: "${CLIENT_SECRET}"`, + }, + ]; + + const envs = [ + `URL=${mainURL}`, + `DEX_URL=${dexURL}`, + `DOMAIN_NAME=${mainDomain}`, + `POSTGRES_PASSWORD=${POSTGRES_PASSWORD}`, + `SECRET_KEY=${SECRET_KEY}`, + `UTILS_SECRET=${UTILS_SECRET}`, + `CLIENT_SECRET=${CLIENT_SECRET}`, + ]; + + return { + domains, + envs, + mounts, + }; +} diff --git a/blueprints/penpot/docker-compose.yml b/blueprints/penpot/docker-compose.yml new file mode 100644 index 000000000..3e0efe915 --- /dev/null +++ b/blueprints/penpot/docker-compose.yml @@ -0,0 +1,207 @@ +## Common flags: +# demo-users +# email-verification +# log-emails +# log-invitation-tokens +# login-with-github +# login-with-gitlab +# login-with-google +# login-with-ldap +# login-with-oidc +# login-with-password +# prepl-server +# registration +# secure-session-cookies +# smtp +# smtp-debug +# telemetry +# webhooks +## +## You can read more about all available flags and other +## environment variables here: +## https://help.penpot.app/technical-guide/configuration/#advanced-configuration +# +# WARNING: if you're exposing Penpot to the internet, you should remove the flags +# 'disable-secure-session-cookies' and 'disable-email-verification' + +volumes: + penpot_postgres_v15: + penpot_assets: + penpot_traefik: + # penpot_minio: + +services: + + penpot-frontend: + image: "penpotapp/frontend:2.3.2" + restart: always + ports: + - 8080 + - 9001 + + volumes: + - penpot_assets:/opt/data/assets + + depends_on: + - penpot-backend + - penpot-exporter + + + + environment: + PENPOT_FLAGS: disable-email-verification enable-smtp enable-prepl-server disable-secure-session-cookies + + penpot-backend: + image: "penpotapp/backend:2.3.2" + restart: always + + volumes: + - penpot_assets:/opt/data/assets + + depends_on: + - penpot-postgres + - penpot-redis + + + + ## Configuration envronment variables for the backend + ## container. + + environment: + PENPOT_PUBLIC_URI: http://${DOMAIN_NAME} + PENPOT_FLAGS: disable-email-verification enable-smtp enable-prepl-server disable-secure-session-cookies + + ## Penpot SECRET KEY. It serves as a master key from which other keys for subsystems + ## (eg http sessions, or invitations) are derived. + ## + ## If you leave it commented, all created sessions and invitations will + ## become invalid on container restart. + ## + ## If you going to uncomment this, we recommend to use a trully randomly generated + ## 512 bits base64 encoded string here. You can generate one with: + ## + ## python3 -c "import secrets; print(secrets.token_urlsafe(64))" + + # PENPOT_SECRET_KEY: my-insecure-key + + ## The PREPL host. Mainly used for external programatic access to penpot backend + ## (example: admin). By default it will listen on `localhost` but if you are going to use + ## the `admin`, you will need to uncomment this and set the host to `0.0.0.0`. + + # PENPOT_PREPL_HOST: 0.0.0.0 + + ## Database connection parameters. Don't touch them unless you are using custom + ## postgresql connection parameters. + + PENPOT_DATABASE_URI: postgresql://penpot-postgres/penpot + PENPOT_DATABASE_USERNAME: penpot + PENPOT_DATABASE_PASSWORD: penpot + + ## Redis is used for the websockets notifications. Don't touch unless the redis + ## container has different parameters or different name. + + PENPOT_REDIS_URI: redis://penpot-redis/0 + + ## Default configuration for assets storage: using filesystem based with all files + ## stored in a docker volume. + + PENPOT_ASSETS_STORAGE_BACKEND: assets-fs + PENPOT_STORAGE_ASSETS_FS_DIRECTORY: /opt/data/assets + + ## Also can be configured to to use a S3 compatible storage + ## service like MiniIO. Look below for minio service setup. + + # AWS_ACCESS_KEY_ID: + # AWS_SECRET_ACCESS_KEY: + # PENPOT_ASSETS_STORAGE_BACKEND: assets-s3 + # PENPOT_STORAGE_ASSETS_S3_ENDPOINT: http://penpot-minio:9000 + # PENPOT_STORAGE_ASSETS_S3_BUCKET: + + ## Telemetry. When enabled, a periodical process will send anonymous data about this + ## instance. Telemetry data will enable us to learn how the application is used, + ## based on real scenarios. If you want to help us, please leave it enabled. You can + ## audit what data we send with the code available on github. + + PENPOT_TELEMETRY_ENABLED: true + + ## Example SMTP/Email configuration. By default, emails are sent to the mailcatch + ## service, but for production usage it is recommended to setup a real SMTP + ## provider. Emails are used to confirm user registrations & invitations. Look below + ## how the mailcatch service is configured. + + PENPOT_SMTP_DEFAULT_FROM: no-reply@example.com + PENPOT_SMTP_DEFAULT_REPLY_TO: no-reply@example.com + PENPOT_SMTP_HOST: penpot-mailcatch + PENPOT_SMTP_PORT: 1025 + PENPOT_SMTP_USERNAME: + PENPOT_SMTP_PASSWORD: + PENPOT_SMTP_TLS: false + PENPOT_SMTP_SSL: false + + penpot-exporter: + image: "penpotapp/exporter:2.3.2" + restart: always + + + environment: + # Don't touch it; this uses an internal docker network to + # communicate with the frontend. + PENPOT_PUBLIC_URI: http://penpot-frontend + + ## Redis is used for the websockets notifications. + PENPOT_REDIS_URI: redis://penpot-redis/0 + + penpot-postgres: + image: "postgres:15" + restart: always + stop_signal: SIGINT + + volumes: + - penpot_postgres_v15:/var/lib/postgresql/data + + + + environment: + - POSTGRES_INITDB_ARGS=--data-checksums + - POSTGRES_DB=penpot + - POSTGRES_USER=penpot + - POSTGRES_PASSWORD=penpot + + penpot-redis: + image: redis:7.2 + restart: always + + + ## A mailcatch service, used as temporal SMTP server. You can access via HTTP to the + ## port 1080 for read all emails the penpot platform has sent. Should be only used as a + ## temporal solution while no real SMTP provider is configured. + + penpot-mailcatch: + image: sj26/mailcatcher:latest + restart: always + expose: + - '1025' + ports: + - 1080 + + + ## Example configuration of MiniIO (S3 compatible object storage service); If you don't + ## have preference, then just use filesystem, this is here just for the completeness. + + # minio: + # image: "minio/minio:latest" + # command: minio server /mnt/data --console-address ":9001" + # restart: always + # + # volumes: + # - "penpot_minio:/mnt/data" + # + # environment: + # - MINIO_ROOT_USER=minioadmin + # - MINIO_ROOT_PASSWORD=minioadmin + # + # ports: + # - 9000:9000 + # - 9001:9001 + + diff --git a/blueprints/penpot/index.ts b/blueprints/penpot/index.ts new file mode 100644 index 000000000..a3e90e8ae --- /dev/null +++ b/blueprints/penpot/index.ts @@ -0,0 +1,25 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 80, + serviceName: "penpot-frontend", + }, + ]; + + const envs = [`DOMAIN_NAME=${mainDomain}`]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/peppermint/docker-compose.yml b/blueprints/peppermint/docker-compose.yml new file mode 100644 index 000000000..06fb46c66 --- /dev/null +++ b/blueprints/peppermint/docker-compose.yml @@ -0,0 +1,38 @@ +version: "3.8" + +services: + peppermint-postgres: + image: postgres:latest + restart: always + + volumes: + - peppermint-postgres-data:/var/lib/postgresql/data + environment: + POSTGRES_USER: peppermint + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_DB: peppermint + healthcheck: + test: ["CMD-SHELL", "pg_isready -U peppermint"] + interval: 10s + timeout: 5s + retries: 5 + + peppermint-app: + image: pepperlabs/peppermint:latest + restart: always + + depends_on: + peppermint-postgres: + condition: service_healthy + environment: + DB_USERNAME: "peppermint" + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_HOST: "peppermint-postgres" + SECRET: ${SECRET} + +networks: + dokploy-network: + external: true + +volumes: + peppermint-postgres-data: \ No newline at end of file diff --git a/blueprints/peppermint/index.ts b/blueprints/peppermint/index.ts new file mode 100644 index 000000000..de63f8452 --- /dev/null +++ b/blueprints/peppermint/index.ts @@ -0,0 +1,40 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const apiDomain = generateRandomDomain(schema); + const postgresPassword = generatePassword(); + const secret = generateBase64(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3000, + serviceName: "peppermint-app", + }, + { + host: apiDomain, + port: 5003, + serviceName: "peppermint-app", + }, + ]; + + const envs = [ + `MAIN_DOMAIN=${mainDomain}`, + `API_DOMAIN=${apiDomain}`, + `POSTGRES_PASSWORD=${postgresPassword}`, + `SECRET=${secret}`, + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/photoprism/docker-compose.yml b/blueprints/photoprism/docker-compose.yml new file mode 100644 index 000000000..56793dbd3 --- /dev/null +++ b/blueprints/photoprism/docker-compose.yml @@ -0,0 +1,76 @@ +services: + photoprism: + image: photoprism/photoprism:latest + stop_grace_period: 10s + depends_on: + - mariadb + security_opt: + - seccomp:unconfined + - apparmor:unconfined + + environment: + PHOTOPRISM_ADMIN_USER: "admin" + PHOTOPRISM_ADMIN_PASSWORD: ${ADMIN_PASSWORD} + PHOTOPRISM_AUTH_MODE: "password" + PHOTOPRISM_SITE_URL: "http://localhost:2342/" + PHOTOPRISM_DISABLE_TLS: "false" + PHOTOPRISM_DEFAULT_TLS: "false" + PHOTOPRISM_ORIGINALS_LIMIT: 5000 # file size limit for originals in MB (increase for high-res video) + PHOTOPRISM_HTTP_COMPRESSION: "gzip" + PHOTOPRISM_LOG_LEVEL: "info" # log level: trace, debug, info, warning, error, fatal, or panic + PHOTOPRISM_READONLY: "false" + PHOTOPRISM_EXPERIMENTAL: "false" + PHOTOPRISM_DISABLE_CHOWN: "false" + PHOTOPRISM_DISABLE_WEBDAV: "false" + PHOTOPRISM_DISABLE_SETTINGS: "false" + PHOTOPRISM_DISABLE_TENSORFLOW: "false" + PHOTOPRISM_DISABLE_FACES: "false" + PHOTOPRISM_DISABLE_CLASSIFICATION: "false" + PHOTOPRISM_DISABLE_VECTORS: "false" + PHOTOPRISM_DISABLE_RAW: "false" + PHOTOPRISM_RAW_PRESETS: "false" + PHOTOPRISM_SIDECAR_YAML: "true" + PHOTOPRISM_BACKUP_ALBUMS: "true" + PHOTOPRISM_BACKUP_DATABASE: "true" + PHOTOPRISM_BACKUP_SCHEDULE: "daily" + PHOTOPRISM_INDEX_SCHEDULE: "" + PHOTOPRISM_AUTO_INDEX: 300 + PHOTOPRISM_AUTO_IMPORT: -1 + PHOTOPRISM_DETECT_NSFW: "false" + PHOTOPRISM_UPLOAD_NSFW: "true" + PHOTOPRISM_DATABASE_DRIVER: "mysql" + PHOTOPRISM_DATABASE_SERVER: "mariadb:3306" + PHOTOPRISM_DATABASE_NAME: "photoprism" + PHOTOPRISM_DATABASE_USER: "photoprism" + PHOTOPRISM_DATABASE_PASSWORD: "insecure" + PHOTOPRISM_SITE_CAPTION: "AI-Powered Photos App" + PHOTOPRISM_SITE_DESCRIPTION: "" + PHOTOPRISM_SITE_AUTHOR: "" + working_dir: + "/photoprism" + volumes: + - pictures:/photoprism/originals + - storage-data:/photoprism/storage + + mariadb: + image: mariadb:11 + restart: unless-stopped + stop_grace_period: 5s + + security_opt: + - seccomp:unconfined + - apparmor:unconfined + volumes: + - db-data:/var/lib/mysql + environment: + MARIADB_AUTO_UPGRADE: "1" + MARIADB_INITDB_SKIP_TZINFO: "1" + MARIADB_DATABASE: "photoprism" + MARIADB_USER: "photoprism" + MARIADB_PASSWORD: "insecure" + MARIADB_ROOT_PASSWORD: "insecure" + +volumes: + db-data: + storage-data: + pictures: \ No newline at end of file diff --git a/blueprints/photoprism/index.ts b/blueprints/photoprism/index.ts new file mode 100644 index 000000000..4a103a624 --- /dev/null +++ b/blueprints/photoprism/index.ts @@ -0,0 +1,30 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const randomDomain = generateRandomDomain(schema); + const randomPassword = generatePassword(); + + const domains: DomainSchema[] = [ + { + host: randomDomain, + port: 2342, + serviceName: "photoprism", + }, + ]; + + const envs = [ + `BASE_URL=http://${randomDomain}`, + `ADMIN_PASSWORD=${randomPassword}`, + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/phpmyadmin/docker-compose.yml b/blueprints/phpmyadmin/docker-compose.yml new file mode 100644 index 000000000..91674e87c --- /dev/null +++ b/blueprints/phpmyadmin/docker-compose.yml @@ -0,0 +1,27 @@ +version: "3.8" + +services: + db: + image: mysql:5.7 + environment: + MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD} + MYSQL_DATABASE: tu_base_de_datos + MYSQL_USER: ${MYSQL_USER} + MYSQL_PASSWORD: ${MYSQL_PASSWORD} + volumes: + - db_data:/var/lib/mysql + + + phpmyadmin: + image: phpmyadmin/phpmyadmin:5.2.1 + environment: + PMA_HOST: db + PMA_USER: ${MYSQL_USER} + PMA_PASSWORD: ${MYSQL_PASSWORD} + PMA_ARBITRARY: 1 + depends_on: + - db + +volumes: + db_data: + driver: local diff --git a/blueprints/phpmyadmin/index.ts b/blueprints/phpmyadmin/index.ts new file mode 100644 index 000000000..e1c976b9d --- /dev/null +++ b/blueprints/phpmyadmin/index.ts @@ -0,0 +1,32 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const rootPassword = generatePassword(32); + const password = generatePassword(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 80, + serviceName: "phpmyadmin", + }, + ]; + const envs = [ + `MYSQL_ROOT_PASSWORD=${rootPassword}`, + "MYSQL_DATABASE=mysql", + "MYSQL_USER=phpmyadmin", + `MYSQL_PASSWORD=${password}`, + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/pocket-id/docker-compose.yml b/blueprints/pocket-id/docker-compose.yml new file mode 100644 index 000000000..f93851430 --- /dev/null +++ b/blueprints/pocket-id/docker-compose.yml @@ -0,0 +1,21 @@ +services: + pocket-id: + image: ghcr.io/pocket-id/pocket-id:v0.35.1 + restart: unless-stopped + environment: + - PUBLIC_UI_CONFIG_DISABLED + - PUBLIC_APP_URL + - TRUST_PROXY + ports: + - 80 + volumes: + - pocket-id-data:/app/backend/data + healthcheck: + test: "curl -f http://localhost/health" + interval: 1m30s + timeout: 5s + retries: 2 + start_period: 10s + +volumes: + pocket-id-data: diff --git a/blueprints/pocket-id/index.ts b/blueprints/pocket-id/index.ts new file mode 100644 index 000000000..9a9faa2a3 --- /dev/null +++ b/blueprints/pocket-id/index.ts @@ -0,0 +1,29 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 80, + serviceName: "pocket-id", + }, + ]; + + const envs = [ + "PUBLIC_UI_CONFIG_DISABLED=false", + `PUBLIC_APP_URL=http://${mainDomain}`, + "TRUST_PROXY=true", + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/portainer/docker-compose.yml b/blueprints/portainer/docker-compose.yml new file mode 100644 index 000000000..19e67a3e5 --- /dev/null +++ b/blueprints/portainer/docker-compose.yml @@ -0,0 +1,29 @@ +version: '3.8' + +services: + agent: + image: portainer/agent + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /var/lib/docker/volumes:/var/lib/docker/volumes + + + deploy: + mode: global + placement: + constraints: [node.platform.os == linux] + + portainer: + image: portainer/portainer-ce + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - portainer-data:/data + deploy: + mode: replicated + placement: + constraints: [node.role == manager] + + +volumes: + portainer-data: + \ No newline at end of file diff --git a/blueprints/portainer/index.ts b/blueprints/portainer/index.ts new file mode 100644 index 000000000..7775a0ed8 --- /dev/null +++ b/blueprints/portainer/index.ts @@ -0,0 +1,19 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 9000, + serviceName: "portainer", + }, + ]; + return { + domains, + }; +} diff --git a/blueprints/postiz/docker-compose.yml b/blueprints/postiz/docker-compose.yml new file mode 100644 index 000000000..cd06e7952 --- /dev/null +++ b/blueprints/postiz/docker-compose.yml @@ -0,0 +1,65 @@ +version: "3.8" + +services: + postiz-app: + image: ghcr.io/gitroomhq/postiz-app:latest + restart: always + + environment: + MAIN_URL: "https://${POSTIZ_HOST}" + FRONTEND_URL: "https://${POSTIZ_HOST}" + NEXT_PUBLIC_BACKEND_URL: "https://${POSTIZ_HOST}/api" + JWT_SECRET: ${JWT_SECRET} + DATABASE_URL: "postgresql://${DB_USER}:${DB_PASSWORD}@postiz-postgres:5432/${DB_NAME}" + REDIS_URL: "redis://postiz-redis:6379" + BACKEND_INTERNAL_URL: "http://localhost:3000" + IS_GENERAL: "true" + STORAGE_PROVIDER: "local" + UPLOAD_DIRECTORY: "/uploads" + NEXT_PUBLIC_UPLOAD_DIRECTORY: "/uploads" + volumes: + - postiz-config:/config/ + - postiz-uploads:/uploads/ + depends_on: + postiz-postgres: + condition: service_healthy + postiz-redis: + condition: service_healthy + + postiz-postgres: + image: postgres:17-alpine + restart: always + + environment: + POSTGRES_PASSWORD: ${DB_PASSWORD} + POSTGRES_USER: ${DB_USER} + POSTGRES_DB: ${DB_NAME} + volumes: + - postiz-postgres-data:/var/lib/postgresql/data + healthcheck: + test: pg_isready -U ${DB_USER} -d ${DB_NAME} + interval: 10s + timeout: 3s + retries: 3 + + postiz-redis: + image: redis:7.2 + restart: always + + healthcheck: + test: redis-cli ping + interval: 10s + timeout: 3s + retries: 3 + volumes: + - postiz-redis-data:/data + +networks: + dokploy-network: + external: true + +volumes: + postiz-postgres-data: + postiz-redis-data: + postiz-config: + postiz-uploads: \ No newline at end of file diff --git a/blueprints/postiz/index.ts b/blueprints/postiz/index.ts new file mode 100644 index 000000000..a05d2d940 --- /dev/null +++ b/blueprints/postiz/index.ts @@ -0,0 +1,37 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const dbPassword = generatePassword(); + const dbUser = "postiz"; + const dbName = "postiz"; + const jwtSecret = generateBase64(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 5000, + serviceName: "postiz-app", + }, + ]; + + const envs = [ + `POSTIZ_HOST=${mainDomain}`, + `DB_PASSWORD=${dbPassword}`, + `DB_USER=${dbUser}`, + `DB_NAME=${dbName}`, + `JWT_SECRET=${jwtSecret}`, + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/registry/docker-compose.yml b/blueprints/registry/docker-compose.yml new file mode 100644 index 000000000..08c5c3688 --- /dev/null +++ b/blueprints/registry/docker-compose.yml @@ -0,0 +1,19 @@ +services: + registry: + restart: always + image: registry:2 + ports: + - 5000 + volumes: + - ../files/auth/registry.password:/auth/registry.password + - registry-data:/var/lib/registry + environment: + REGISTRY_STORAGE_DELETE_ENABLED: true + REGISTRY_HEALTH_STORAGEDRIVER_ENABLED: false + REGISTRY_HTTP_SECRET: ${REGISTRY_HTTP_SECRET} + REGISTRY_AUTH: htpasswd + REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm + REGISTRY_AUTH_HTPASSWD_PATH: /auth/registry.password + +volumes: + registry-data: \ No newline at end of file diff --git a/blueprints/registry/index.ts b/blueprints/registry/index.ts new file mode 100644 index 000000000..81965e6e2 --- /dev/null +++ b/blueprints/registry/index.ts @@ -0,0 +1,35 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const domains: DomainSchema[] = [ + { + host: generateRandomDomain(schema), + port: 5000, + serviceName: "registry", + }, + ]; + + const registryHttpSecret = generatePassword(30); + + const envs = [`REGISTRY_HTTP_SECRET=${registryHttpSecret}`]; + + const mounts: Template["mounts"] = [ + { + filePath: "/auth/registry.password", + content: + "# from: docker run --rm --entrypoint htpasswd httpd:2 -Bbn docker password\ndocker:$2y$10$qWZoWev/u5PV7WneFoRAMuoGpRcAQOgUuIIdLnU7pJXogrBSY23/2\n", + }, + ]; + + return { + domains, + envs, + mounts, + }; +} diff --git a/blueprints/rocketchat/docker-compose.yml b/blueprints/rocketchat/docker-compose.yml new file mode 100644 index 000000000..5119f5a4e --- /dev/null +++ b/blueprints/rocketchat/docker-compose.yml @@ -0,0 +1,34 @@ +version: "3.8" +services: + rocketchat: + image: registry.rocket.chat/rocketchat/rocket.chat:6.9.2 + restart: always + environment: + MONGO_URL: "mongodb://mongodb:27017/rocketchat?replicaSet=rs0" + MONGO_OPLOG_URL: "mongodb://mongodb:27017/local?replicaSet=rs0" + ROOT_URL: ${ROOT_URL:-http://${ROCKETCHAT_HOST}:${ROCKETCHAT_PORT}} + PORT: ${ROCKETCHAT_PORT} + DEPLOY_METHOD: docker + DEPLOY_PLATFORM: + REG_TOKEN: + depends_on: + - mongodb + + mongodb: + image: docker.io/bitnami/mongodb:5.0 + restart: always + volumes: + - mongodb_data:/bitnami/mongodb + environment: + MONGODB_REPLICA_SET_MODE: primary + MONGODB_REPLICA_SET_NAME: rs0 + MONGODB_PORT_NUMBER: 27017 + MONGODB_INITIAL_PRIMARY_HOST: mongodb + MONGODB_INITIAL_PRIMARY_PORT_NUMBER: 27017 + MONGODB_ADVERTISED_HOSTNAME: mongodb + MONGODB_ENABLE_JOURNAL: true + ALLOW_EMPTY_PASSWORD: yes + + +volumes: + mongodb_data: { driver: local } diff --git a/blueprints/rocketchat/index.ts b/blueprints/rocketchat/index.ts new file mode 100644 index 000000000..0c10307a2 --- /dev/null +++ b/blueprints/rocketchat/index.ts @@ -0,0 +1,25 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 3000, + serviceName: "rocketchat", + }, + ]; + + const envs = [`ROCKETCHAT_HOST=${mainDomain}`, "ROCKETCHAT_PORT=3000"]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/roundcube/docker-compose.yml b/blueprints/roundcube/docker-compose.yml new file mode 100644 index 000000000..e5ba4a8b1 --- /dev/null +++ b/blueprints/roundcube/docker-compose.yml @@ -0,0 +1,16 @@ +services: + roundcubemail: + image: roundcube/roundcubemail:1.6.9-apache + volumes: + - ./www:/var/www/html + - ./db/sqlite:/var/roundcube/db + environment: + - ROUNDCUBEMAIL_DB_TYPE=sqlite + - ROUNDCUBEMAIL_SKIN=elastic + - ROUNDCUBEMAIL_DEFAULT_HOST=${DEFAULT_HOST} + - ROUNDCUBEMAIL_SMTP_SERVER=${SMTP_SERVER} + + +networks: + dokploy-network: + external: true diff --git a/blueprints/roundcube/index.ts b/blueprints/roundcube/index.ts new file mode 100644 index 000000000..8df8c743c --- /dev/null +++ b/blueprints/roundcube/index.ts @@ -0,0 +1,24 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const randomDomain = generateRandomDomain(schema); + const envs = [ + "DEFAULT_HOST=tls://mail.example.com", + "SMTP_SERVER=tls://mail.example.com", + ]; + + const domains: DomainSchema[] = [ + { + host: randomDomain, + port: 80, + serviceName: "roundcubemail", + }, + ]; + + return { envs, domains }; +} diff --git a/blueprints/ryot/docker-compose.yml b/blueprints/ryot/docker-compose.yml new file mode 100644 index 000000000..09a727071 --- /dev/null +++ b/blueprints/ryot/docker-compose.yml @@ -0,0 +1,37 @@ +version: '3.7' + +services: + ryot-app: + image: ignisda/ryot:v7.10 + + environment: + - DATABASE_URL=postgres://postgres:${POSTGRES_PASSWORD}@ryot-db:5432/postgres + - SERVER_ADMIN_ACCESS_TOKEN=${ADMIN_ACCESS_TOKEN} + - TZ=UTC + # Optional: Uncomment and set your pro key if you have one + # - SERVER_PRO_KEY=${SERVER_PRO_KEY} + depends_on: + ryot-db: + condition: service_healthy + restart: always + pull_policy: always + + ryot-db: + image: postgres:16-alpine + + volumes: + - ryot-postgres-data:/var/lib/postgresql/data + environment: + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + - POSTGRES_USER=postgres + - POSTGRES_DB=postgres + - TZ=UTC + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres"] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + +volumes: + ryot-postgres-data: \ No newline at end of file diff --git a/blueprints/ryot/index.ts b/blueprints/ryot/index.ts new file mode 100644 index 000000000..1d8d5ce2c --- /dev/null +++ b/blueprints/ryot/index.ts @@ -0,0 +1,34 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const postgresPassword = generatePassword(); + const adminAccessToken = generateBase64(32); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 8000, + serviceName: "ryot-app", + }, + ]; + + const envs = [ + `POSTGRES_PASSWORD=${postgresPassword}`, + `ADMIN_ACCESS_TOKEN=${adminAccessToken}`, + "# Optional: Uncomment and set your pro key if you have one", + "# SERVER_PRO_KEY=your_pro_key_here", + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/shlink/docker-compose.yml b/blueprints/shlink/docker-compose.yml new file mode 100644 index 000000000..6d15a26d8 --- /dev/null +++ b/blueprints/shlink/docker-compose.yml @@ -0,0 +1,29 @@ +services: + shlink: + image: shlinkio/shlink:stable + environment: + - INITIAL_API_KEY=${INITIAL_API_KEY} + - DEFAULT_DOMAIN=${DEFAULT_DOMAIN} + # Note: you should also update SHLINK_SERVER_URL in the shlink-web service. + - IS_HTTPS_ENABLED=false + volumes: + - shlink-data:/etc/shlink/data + healthcheck: + test: ["CMD", "curl", "-f", "http://127.0.0.1:8080/rest/v3/health"] + interval: 30s + timeout: 10s + retries: 3 + shlink-web: + image: shlinkio/shlink-web-client + environment: + - SHLINK_SERVER_API_KEY=${INITIAL_API_KEY} + # Note: if you've set IS_HTTPS_ENABLED=true, change http to https. + - SHLINK_SERVER_URL=http://${DEFAULT_DOMAIN} + healthcheck: + test: ["CMD", "curl", "-f", "http://127.0.0.1:8080"] + interval: 30s + timeout: 10s + retries: 3 + +volumes: + shlink-data: diff --git a/blueprints/shlink/index.ts b/blueprints/shlink/index.ts new file mode 100644 index 000000000..1e456e1c2 --- /dev/null +++ b/blueprints/shlink/index.ts @@ -0,0 +1,35 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const defaultDomain = generateRandomDomain(schema); + const initialApiKey = generatePassword(30); + + const domains: DomainSchema[] = [ + { + host: `web-${defaultDomain}`, + port: 8080, + serviceName: "shlink-web", + }, + { + host: defaultDomain, + port: 8080, + serviceName: "shlink", + }, + ]; + + const envs = [ + `INITIAL_API_KEY=${initialApiKey}`, + `DEFAULT_DOMAIN=${defaultDomain}`, + ]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/slash/docker-compose.yml b/blueprints/slash/docker-compose.yml new file mode 100644 index 000000000..ee6cdf895 --- /dev/null +++ b/blueprints/slash/docker-compose.yml @@ -0,0 +1,39 @@ +version: "3.8" + +services: + slash-app: + image: yourselfhosted/slash:latest + + volumes: + - slash-app-data:/var/opt/slash + environment: + - SLASH_DRIVER=postgres + - SLASH_DSN=postgresql://${DB_USER}:${DB_PASSWORD}@slash-postgres:5432/${DB_NAME}?sslmode=disable + depends_on: + slash-postgres: + condition: service_healthy + restart: unless-stopped + + slash-postgres: + image: postgres:16-alpine + + volumes: + - slash-postgres-data:/var/lib/postgresql/data + environment: + - POSTGRES_USER=${DB_USER} + - POSTGRES_PASSWORD=${DB_PASSWORD} + - POSTGRES_DB=${DB_NAME} + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${DB_USER} -d ${DB_NAME}"] + interval: 10s + timeout: 5s + retries: 5 + restart: unless-stopped + +networks: + dokploy-network: + external: true + +volumes: + slash-app-data: + slash-postgres-data: \ No newline at end of file diff --git a/blueprints/slash/index.ts b/blueprints/slash/index.ts new file mode 100644 index 000000000..ba614fd82 --- /dev/null +++ b/blueprints/slash/index.ts @@ -0,0 +1,33 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const dbPassword = generatePassword(); + const dbUser = "slash"; + const dbName = "slash"; + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 5231, + serviceName: "slash-app", + }, + ]; + + const envs = [ + `DB_USER=${dbUser}`, + `DB_PASSWORD=${dbPassword}`, + `DB_NAME=${dbName}`, + ]; + + return { + domains, + envs, + }; +} diff --git a/blueprints/soketi/docker-compose.yml b/blueprints/soketi/docker-compose.yml new file mode 100644 index 000000000..d38cbb086 --- /dev/null +++ b/blueprints/soketi/docker-compose.yml @@ -0,0 +1,11 @@ +version: "3" + +services: + soketi: + image: quay.io/soketi/soketi:1.6.1-16-debian + environment: + SOKETI_DEBUG: "1" + SOKETI_HOST: "0.0.0.0" + SOKETI_PORT: "6001" + SOKETI_METRICS_SERVER_PORT: "9601" + restart: unless-stopped diff --git a/blueprints/soketi/index.ts b/blueprints/soketi/index.ts new file mode 100644 index 000000000..47aa461df --- /dev/null +++ b/blueprints/soketi/index.ts @@ -0,0 +1,28 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + const metricsDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 6001, + serviceName: "soketi", + }, + { + host: metricsDomain, + port: 9601, + serviceName: "soketi", + }, + ]; + + return { + domains, + }; +} diff --git a/blueprints/spacedrive/docker-compose.yml b/blueprints/spacedrive/docker-compose.yml new file mode 100644 index 000000000..b98d55abf --- /dev/null +++ b/blueprints/spacedrive/docker-compose.yml @@ -0,0 +1,9 @@ +services: + server: + image: ghcr.io/spacedriveapp/spacedrive/server:latest + ports: + - 8080 + environment: + - SD_AUTH=${SD_USERNAME}:${SD_PASSWORD} + volumes: + - /var/spacedrive:/var/spacedrive diff --git a/blueprints/spacedrive/index.ts b/blueprints/spacedrive/index.ts new file mode 100644 index 000000000..15db4b198 --- /dev/null +++ b/blueprints/spacedrive/index.ts @@ -0,0 +1,28 @@ +import { + type DomainSchema, + type Schema, + type Template, + generatePassword, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const randomDomain = generateRandomDomain(schema); + const secretKey = generatePassword(); + const randomUsername = "admin"; // Default username + + const domains: DomainSchema[] = [ + { + host: randomDomain, + port: 8080, + serviceName: "server", + }, + ]; + + const envs = [`SD_USERNAME=${randomUsername}`, `SD_PASSWORD=${secretKey}`]; + + return { + envs, + domains, + }; +} diff --git a/blueprints/stirling/docker-compose.yml b/blueprints/stirling/docker-compose.yml new file mode 100644 index 000000000..27bd01216 --- /dev/null +++ b/blueprints/stirling/docker-compose.yml @@ -0,0 +1,22 @@ +services: + stirling-pdf: + image: frooodle/s-pdf:latest + ports: + - 8080 + volumes: + - stirling_pdf_trainingdata:/usr/share/tessdata + - stirling_pdf_extraconfigs:/configs + - stirling_pdf_customfiles:/customFiles/ + - stirling_pdf_logs:/logs/ + - stirling_pdf_pipeline:/pipeline/ + environment: + - DOCKER_ENABLE_SECURITY=false + - INSTALL_BOOK_AND_ADVANCED_HTML_OPS=false + - LANGS=en_GB + +volumes: + stirling_pdf_trainingdata: + stirling_pdf_extraconfigs: + stirling_pdf_customfiles: + stirling_pdf_logs: + stirling_pdf_pipeline: diff --git a/blueprints/stirling/index.ts b/blueprints/stirling/index.ts new file mode 100644 index 000000000..a8df5e339 --- /dev/null +++ b/blueprints/stirling/index.ts @@ -0,0 +1,22 @@ +import { + type DomainSchema, + type Schema, + type Template, + generateRandomDomain, +} from "../utils"; + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const domains: DomainSchema[] = [ + { + host: mainDomain, + port: 8080, + serviceName: "stirling-pdf", + }, + ]; + + return { + domains, + }; +} diff --git a/blueprints/supabase/docker-compose.yml b/blueprints/supabase/docker-compose.yml new file mode 100644 index 000000000..89339736f --- /dev/null +++ b/blueprints/supabase/docker-compose.yml @@ -0,0 +1,448 @@ +# Usage +# Start: docker compose up +# With helpers: docker compose -f docker-compose.yml -f ../files/dev/docker-compose.dev.yml up +# Stop: docker compose down +# Destroy: docker compose -f docker-compose.yml -f ../files/dev/docker-compose.dev.yml down -v --remove-orphans + +name: supabase +version: "3.8" + +services: + studio: + container_name: supabase-studio + image: supabase/studio:20240729-ce42139 + + restart: unless-stopped + healthcheck: + test: + [ + "CMD", + "node", + "-e", + "require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})", + ] + timeout: 5s + interval: 5s + retries: 3 + depends_on: + analytics: + condition: service_healthy + environment: + STUDIO_PG_META_URL: http://meta:8080 + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + + DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION} + DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT} + + SUPABASE_URL: http://kong:8000 + SUPABASE_PUBLIC_URL: http://${SUPABASE_HOST} + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} + AUTH_JWT_SECRET: ${JWT_SECRET} + + LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + LOGFLARE_URL: http://analytics:4000 + NEXT_PUBLIC_ENABLE_LOGS: true + # Comment to use Big Query backend for analytics + NEXT_ANALYTICS_BACKEND_PROVIDER: postgres + # Uncomment to use Big Query backend for analytics + # NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery + + kong: + container_name: supabase-kong + image: kong:2.8.1 + restart: unless-stopped + + # https://unix.stackexchange.com/a/294837 + entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start' + #ports: + # - ${KONG_HTTP_PORT}:8000/tcp + # - ${KONG_HTTPS_PORT}:8443/tcp + expose: + - 8000 + - 8443 + depends_on: + analytics: + condition: service_healthy + environment: + KONG_DATABASE: "off" + KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml + # https://github.com/supabase/cli/issues/14 + KONG_DNS_ORDER: LAST,A,CNAME + KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth + KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k + KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} + DASHBOARD_USERNAME: ${DASHBOARD_USERNAME} + DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD} + volumes: + # https://github.com/supabase/supabase/issues/12661 + - ../files/volumes/api/kong.yml:/home/kong/temp.yml:ro + + auth: + container_name: supabase-auth + image: supabase/gotrue:v2.158.1 + + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://localhost:9999/health", + ] + timeout: 5s + interval: 5s + retries: 3 + restart: unless-stopped + environment: + GOTRUE_API_HOST: 0.0.0.0 + GOTRUE_API_PORT: 9999 + API_EXTERNAL_URL: http://${SUPABASE_HOST} + + GOTRUE_DB_DRIVER: postgres + GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB} + + GOTRUE_SITE_URL: http://${SUPABASE_HOST} + GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS} + GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP} + + GOTRUE_JWT_ADMIN_ROLES: service_role + GOTRUE_JWT_AUD: authenticated + GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated + GOTRUE_JWT_EXP: ${JWT_EXPIRY} + GOTRUE_JWT_SECRET: ${JWT_SECRET} + + GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP} + GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS} + GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM} + # GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true + # GOTRUE_SMTP_MAX_FREQUENCY: 1s + GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL} + GOTRUE_SMTP_HOST: ${SMTP_HOSTNAME} + GOTRUE_SMTP_PORT: ${SMTP_PORT} + GOTRUE_SMTP_USER: ${SMTP_USER} + GOTRUE_SMTP_PASS: ${SMTP_PASS} + GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME} + GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE} + GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION} + GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY} + GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE} + + GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP} + GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM} + # Uncomment to enable custom access token hook. You'll need to create a public.custom_access_token_hook function and grant necessary permissions. + # See: https://supabase.com/docs/guides/auth/auth-hooks#hook-custom-access-token for details + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED="true" + # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI="pg-functions://postgres/public/custom_access_token_hook" + + # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED="true" + # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI="pg-functions://postgres/public/mfa_verification_attempt" + + # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED="true" + # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI="pg-functions://postgres/public/password_verification_attempt" + + rest: + container_name: supabase-rest + image: postgrest/postgrest:v12.2.0 + + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + restart: unless-stopped + environment: + PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB} + PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS} + PGRST_DB_ANON_ROLE: anon + PGRST_JWT_SECRET: ${JWT_SECRET} + PGRST_DB_USE_LEGACY_GUCS: "false" + PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET} + PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY} + command: "postgrest" + + realtime: + # This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain + container_name: realtime-dev.supabase-realtime + image: supabase/realtime:v2.30.23 + + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + healthcheck: + test: + [ + "CMD", + "curl", + "-sSfL", + "--head", + "-o", + "/dev/null", + "-H", + "Authorization: Bearer ${ANON_KEY}", + "http://localhost:4000/api/tenants/realtime-dev/health", + ] + timeout: 5s + interval: 5s + retries: 3 + restart: unless-stopped + environment: + PORT: 4000 + DB_HOST: ${POSTGRES_HOSTNAME} + DB_PORT: ${POSTGRES_PORT} + DB_USER: supabase_admin + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_NAME: ${POSTGRES_DB} + DB_AFTER_CONNECT_QUERY: "SET search_path TO _realtime" + DB_ENC_KEY: supabaserealtime + API_JWT_SECRET: ${JWT_SECRET} + SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq + ERL_AFLAGS: -proto_dist inet_tcp + DNS_NODES: "''" + RLIMIT_NOFILE: "10000" + APP_NAME: realtime + SEED_SELF_HOST: true + + # To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up + storage: + container_name: supabase-storage + image: supabase/storage-api:v1.0.6 + + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + rest: + condition: service_started + imgproxy: + condition: service_started + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://localhost:5000/status", + ] + timeout: 5s + interval: 5s + retries: 3 + restart: unless-stopped + environment: + ANON_KEY: ${ANON_KEY} + SERVICE_KEY: ${SERVICE_ROLE_KEY} + POSTGREST_URL: http://rest:3000 + PGRST_JWT_SECRET: ${JWT_SECRET} + DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB} + FILE_SIZE_LIMIT: 52428800 + STORAGE_BACKEND: file + FILE_STORAGE_BACKEND_PATH: /var/lib/storage + TENANT_ID: stub + # TODO: https://github.com/supabase/storage-api/issues/55 + REGION: stub + GLOBAL_S3_BUCKET: stub + ENABLE_IMAGE_TRANSFORMATION: "true" + IMGPROXY_URL: http://imgproxy:5001 + volumes: + - ../files/volumes/storage:/var/lib/storage:z + + imgproxy: + container_name: supabase-imgproxy + image: darthsim/imgproxy:v3.8.0 + + healthcheck: + test: ["CMD", "imgproxy", "health"] + timeout: 5s + interval: 5s + retries: 3 + environment: + IMGPROXY_BIND: ":5001" + IMGPROXY_LOCAL_FILESYSTEM_ROOT: / + IMGPROXY_USE_ETAG: "true" + IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION} + volumes: + - ../files/volumes/storage:/var/lib/storage:z + + meta: + container_name: supabase-meta + image: supabase/postgres-meta:v0.83.2 + + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + analytics: + condition: service_healthy + restart: unless-stopped + environment: + PG_META_PORT: 8080 + PG_META_DB_HOST: ${POSTGRES_HOSTNAME} + PG_META_DB_PORT: ${POSTGRES_PORT} + PG_META_DB_NAME: ${POSTGRES_DB} + PG_META_DB_USER: supabase_admin + PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD} + + functions: + container_name: supabase-edge-functions + image: supabase/edge-runtime:v1.56.0 + restart: unless-stopped + + depends_on: + analytics: + condition: service_healthy + environment: + JWT_SECRET: ${JWT_SECRET} + SUPABASE_URL: http://kong:8000 + SUPABASE_ANON_KEY: ${ANON_KEY} + SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY} + SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB} + # TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786 + VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}" + volumes: + - ../files/volumes/functions:/home/deno/functions:Z + command: + - start + - --main-service + - /home/deno/functions/main + + analytics: + container_name: supabase-analytics + image: supabase/logflare:1.4.0 + + healthcheck: + test: ["CMD", "curl", "http://localhost:4000/health"] + timeout: 5s + interval: 5s + retries: 10 + restart: unless-stopped + depends_on: + db: + # Disable this if you are using an external Postgres database + condition: service_healthy + # Uncomment to use Big Query backend for analytics + # volumes: + # - type: bind + # source: ${PWD}/gcloud.json + # target: /opt/app/rel/logflare/bin/gcloud.json + # read_only: true + environment: + LOGFLARE_NODE_HOST: 127.0.0.1 + DB_USERNAME: supabase_admin + DB_DATABASE: ${POSTGRES_DB} + DB_HOSTNAME: ${POSTGRES_HOSTNAME} + DB_PORT: ${POSTGRES_PORT} + DB_PASSWORD: ${POSTGRES_PASSWORD} + DB_SCHEMA: _analytics + LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + LOGFLARE_SINGLE_TENANT: true + LOGFLARE_SUPABASE_MODE: true + LOGFLARE_MIN_CLUSTER_SIZE: 1 + + # Comment variables to use Big Query backend for analytics + POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB} + POSTGRES_BACKEND_SCHEMA: _analytics + LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true + # Uncomment to use Big Query backend for analytics + # GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID} + # GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER} + #ports: + # - 4000:4000 + expose: + - 4000 + + # Comment out everything below this point if you are using an external Postgres database + db: + container_name: supabase-db + image: supabase/postgres:15.1.1.78 + + healthcheck: + test: pg_isready -U postgres -h localhost + interval: 5s + timeout: 5s + retries: 10 + depends_on: + vector: + condition: service_healthy + command: + - postgres + - -c + - config_file=/etc/postgresql/postgresql.conf + - -c + - log_min_messages=fatal # prevents Realtime polling queries from appearing in logs + restart: unless-stopped + #ports: + # # Pass down internal port because it's set dynamically by other services + # - ${POSTGRES_PORT}:${POSTGRES_PORT} + expose: + - ${POSTGRES_PORT} + environment: + POSTGRES_HOST: /var/run/postgresql + PGPORT: ${POSTGRES_PORT} + POSTGRES_PORT: ${POSTGRES_PORT} + PGPASSWORD: ${POSTGRES_PASSWORD} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + PGDATABASE: ${POSTGRES_DB} + POSTGRES_DB: ${POSTGRES_DB} + JWT_SECRET: ${JWT_SECRET} + JWT_EXP: ${JWT_EXPIRY} + volumes: + - ../files/volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z + # Must be superuser to create event trigger + - ../files/volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z + # Must be superuser to alter reserved role + - ../files/volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z + # Initialize the database settings with JWT_SECRET and JWT_EXP + - ../files/volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z + # PGDATA directory is persisted between restarts + - ../files/volumes/db/data:/var/lib/postgresql/data:Z + # Changes required for Analytics support + - ../files/volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z + # Use named volume to persist pgsodium decryption key between restarts + - db-config:/etc/postgresql-custom + + vector: + container_name: supabase-vector + image: timberio/vector:0.28.1-alpine + + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://vector:9001/health", + ] + timeout: 5s + interval: 5s + retries: 3 + volumes: + - ../files/volumes/logs/vector.yml:/etc/vector/vector.yml:ro + - ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro + environment: + LOGFLARE_API_KEY: ${LOGFLARE_API_KEY} + command: ["--config", "etc/vector/vector.yml"] + +volumes: + db-config: + +networks: + dokploy-network: + external: true diff --git a/blueprints/supabase/index.ts b/blueprints/supabase/index.ts new file mode 100644 index 000000000..6922c77fb --- /dev/null +++ b/blueprints/supabase/index.ts @@ -0,0 +1,995 @@ +import { createHmac } from "node:crypto"; +import { + type DomainSchema, + type Schema, + type Template, + generateBase64, + generatePassword, + generateRandomDomain, +} from "../utils"; + +interface JWTPayload { + role: "anon" | "service_role"; + iss: string; + iat: number; + exp: number; +} + +function base64UrlEncode(str: string): string { + return Buffer.from(str) + .toString("base64") + .replace(/\+/g, "-") + .replace(/\//g, "_") + .replace(/=/g, ""); +} + +function generateJWT(payload: JWTPayload, secret: string): string { + const header = { alg: "HS256", typ: "JWT" }; + + const encodedHeader = base64UrlEncode(JSON.stringify(header)); + const encodedPayload = base64UrlEncode(JSON.stringify(payload)); + + const signature = createHmac("sha256", secret) + .update(`${encodedHeader}.${encodedPayload}`) + .digest("base64url"); + + return `${encodedHeader}.${encodedPayload}.${signature}`; +} + +export function generateSupabaseAnonJWT(secret: string): string { + const now = Math.floor(Date.now() / 1000); + const payload: JWTPayload = { + role: "anon", + iss: "supabase", + iat: now, + exp: now + 100 * 365 * 24 * 60 * 60, // 100 years + }; + + return generateJWT(payload, secret); +} + +export function generateSupabaseServiceJWT(secret: string): string { + const now = Math.floor(Date.now() / 1000); + const payload: JWTPayload = { + role: "service_role", + iss: "supabase", + iat: now, + exp: now + 100 * 365 * 24 * 60 * 60, // 100 years + }; + + return generateJWT(payload, secret); +} + +export function generate(schema: Schema): Template { + const mainDomain = generateRandomDomain(schema); + + const postgresPassword = generatePassword(32); + const jwtSecret = generateBase64(32); + const dashboardPassword = generatePassword(32); + const logflareApiKey = generatePassword(32); + + const annonKey = generateSupabaseAnonJWT(jwtSecret); + const serviceRoleKey = generateSupabaseServiceJWT(jwtSecret); + const domains: DomainSchema[] = [ + { + serviceName: "kong", + host: mainDomain, + port: 8000, + }, + ]; + + const envs = [ + `SUPABASE_HOST=${mainDomain}`, + `POSTGRES_PASSWORD=${postgresPassword}`, + `JWT_SECRET=${jwtSecret}`, + `ANON_KEY=${annonKey}`, + `SERVICE_ROLE_KEY=${serviceRoleKey}`, + "DASHBOARD_USERNAME=supabase", + `DASHBOARD_PASSWORD=${dashboardPassword}`, + "POSTGRES_HOSTNAME=db", + "POSTGRES_DB=postgres", + "POSTGRES_PORT=5432", + "KONG_HTTP_PORT=8000", + "KONG_HTTPS_PORT=8443", + "PGRST_DB_SCHEMAS=public,storage,graphql_public", + "ADDITIONAL_REDIRECT_URLS=", + "JWT_EXPIRY=3600", + "DISABLE_SIGNUP=false", + `MAILER_URLPATHS_CONFIRMATION=\"/auth/v1/verify\"`, + `MAILER_URLPATHS_INVITE=\"/auth/v1/verify\"`, + `MAILER_URLPATHS_RECOVERY=\"/auth/v1/verify\"`, + `MAILER_URLPATHS_EMAIL_CHANGE=\"/auth/v1/verify\"`, + "ENABLE_EMAIL_SIGNUP=true", + "ENABLE_EMAIL_AUTOCONFIRM=false", + "SMTP_ADMIN_EMAIL=admin@example.com", + "SMTP_HOSTNAME=supabase-mail", + "SMTP_PORT=2500", + "SMTP_USER=fake_mail_user", + "SMTP_PASS=fake_mail_password", + "SMTP_SENDER_NAME=fake_sender", + "ENABLE_ANONYMOUS_USERS=false", + "ENABLE_PHONE_SIGNUP=true", + "ENABLE_PHONE_AUTOCONFIRM=true", + "STUDIO_DEFAULT_ORGANIZATION=Default Organization", + "STUDIO_DEFAULT_PROJECT=Default Project", + "STUDIO_PORT=3000", + "IMGPROXY_ENABLE_WEBP_DETECTION=true", + "FUNCTIONS_VERIFY_JWT=false", + `LOGFLARE_LOGGER_BACKEND_API_KEY=${logflareApiKey}`, + `LOGFLARE_API_KEY=${logflareApiKey}`, + "DOCKER_SOCKET_LOCATION=/var/run/docker.sock", + "GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID", + "GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER", + ]; + + const mounts: Template["mounts"] = [ + { + filePath: "/volumes/api/kong.yml", + content: ` +_format_version: '2.1' +_transform: true + +### +### Consumers / Users +### +consumers: + - username: DASHBOARD + - username: anon + keyauth_credentials: + - key: $SUPABASE_ANON_KEY + - username: service_role + keyauth_credentials: + - key: $SUPABASE_SERVICE_KEY + +### +### Access Control List +### +acls: + - consumer: anon + group: anon + - consumer: service_role + group: admin + +### +### Dashboard credentials +### +basicauth_credentials: + - consumer: DASHBOARD + username: $DASHBOARD_USERNAME + password: $DASHBOARD_PASSWORD + +### +### API Routes +### +services: + ## Open Auth routes + - name: auth-v1-open + url: http://auth:9999/verify + routes: + - name: auth-v1-open + strip_path: true + paths: + - /auth/v1/verify + plugins: + - name: cors + - name: auth-v1-open-callback + url: http://auth:9999/callback + routes: + - name: auth-v1-open-callback + strip_path: true + paths: + - /auth/v1/callback + plugins: + - name: cors + - name: auth-v1-open-authorize + url: http://auth:9999/authorize + routes: + - name: auth-v1-open-authorize + strip_path: true + paths: + - /auth/v1/authorize + plugins: + - name: cors + + ## Secure Auth routes + - name: auth-v1 + _comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*' + url: http://auth:9999/ + routes: + - name: auth-v1-all + strip_path: true + paths: + - /auth/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure REST routes + - name: rest-v1 + _comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*' + url: http://rest:3000/ + routes: + - name: rest-v1-all + strip_path: true + paths: + - /rest/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: true + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure GraphQL routes + - name: graphql-v1 + _comment: 'PostgREST: /graphql/v1/* -> http://rest:3000/rpc/graphql' + url: http://rest:3000/rpc/graphql + routes: + - name: graphql-v1-all + strip_path: true + paths: + - /graphql/v1 + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: true + - name: request-transformer + config: + add: + headers: + - Content-Profile:graphql_public + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + + ## Secure Realtime routes + - name: realtime-v1-ws + _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*' + url: http://realtime-dev.supabase-realtime:4000/socket + protocol: ws + routes: + - name: realtime-v1-ws + strip_path: true + paths: + - /realtime/v1/ + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + - name: realtime-v1-rest + _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*' + url: http://realtime-dev.supabase-realtime:4000/api + protocol: http + routes: + - name: realtime-v1-rest + strip_path: true + paths: + - /realtime/v1/api + plugins: + - name: cors + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + - anon + ## Storage routes: the storage server manages its own auth + - name: storage-v1 + _comment: 'Storage: /storage/v1/* -> http://storage:5000/*' + url: http://storage:5000/ + routes: + - name: storage-v1-all + strip_path: true + paths: + - /storage/v1/ + plugins: + - name: cors + + ## Edge Functions routes + - name: functions-v1 + _comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*' + url: http://functions:9000/ + routes: + - name: functions-v1-all + strip_path: true + paths: + - /functions/v1/ + plugins: + - name: cors + + ## Analytics routes + - name: analytics-v1 + _comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*' + url: http://analytics:4000/ + routes: + - name: analytics-v1-all + strip_path: true + paths: + - /analytics/v1/ + + ## Secure Database routes + - name: meta + _comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*' + url: http://meta:8080/ + routes: + - name: meta-all + strip_path: true + paths: + - /pg/ + plugins: + - name: key-auth + config: + hide_credentials: false + - name: acl + config: + hide_groups_header: true + allow: + - admin + + ## Protected Dashboard - catch all remaining routes + - name: dashboard + _comment: 'Studio: /* -> http://studio:3000/*' + url: http://studio:3000/ + routes: + - name: dashboard-all + strip_path: true + paths: + - / + plugins: + - name: cors + - name: basic-auth + config: + hide_credentials: true + `, + }, + { + filePath: "/volumes/db/init/data.sql", + content: ` + `, + }, + { + filePath: "/volumes/db/jwt.sql", + content: ` +\\set jwt_secret \`echo "$JWT_SECRET"\` +\\set jwt_exp \`echo "$JWT_EXP"\` + +ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret'; +ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp'; + `, + }, + { + filePath: "/volumes/db/logs.sql", + content: ` +\\set pguser \`echo "$POSTGRES_USER"\` + +create schema if not exists _analytics; +alter schema _analytics owner to :pguser; + `, + }, + { + filePath: "/volumes/db/realtime.sql", + content: ` +\\set pguser \`echo "$POSTGRES_USER"\` + +create schema if not exists _realtime; +alter schema _realtime owner to :pguser; + `, + }, + { + filePath: "/volumes/db/roles.sql", + content: ` +-- NOTE: change to your own passwords for production environments +\\set pgpass \`echo "$POSTGRES_PASSWORD"\` + +ALTER USER authenticator WITH PASSWORD :'pgpass'; +ALTER USER pgbouncer WITH PASSWORD :'pgpass'; +ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass'; +ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass'; +ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass'; + `, + }, + { + filePath: "/volumes/db/webhooks.sql", + content: ` +BEGIN; + -- Create pg_net extension + CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions; + -- Create supabase_functions schema + CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin; + GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role; + -- supabase_functions.migrations definition + CREATE TABLE supabase_functions.migrations ( + version text PRIMARY KEY, + inserted_at timestamptz NOT NULL DEFAULT NOW() + ); + -- Initial supabase_functions migration + INSERT INTO supabase_functions.migrations (version) VALUES ('initial'); + -- supabase_functions.hooks definition + CREATE TABLE supabase_functions.hooks ( + id bigserial PRIMARY KEY, + hook_table_id integer NOT NULL, + hook_name text NOT NULL, + created_at timestamptz NOT NULL DEFAULT NOW(), + request_id bigint + ); + CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id); + CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name); + COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.'; + CREATE FUNCTION supabase_functions.http_request() + RETURNS trigger + LANGUAGE plpgsql + AS $function$ + DECLARE + request_id bigint; + payload jsonb; + url text := TG_ARGV[0]::text; + method text := TG_ARGV[1]::text; + headers jsonb DEFAULT '{}'::jsonb; + params jsonb DEFAULT '{}'::jsonb; + timeout_ms integer DEFAULT 1000; + BEGIN + IF url IS NULL OR url = 'null' THEN + RAISE EXCEPTION 'url argument is missing'; + END IF; + + IF method IS NULL OR method = 'null' THEN + RAISE EXCEPTION 'method argument is missing'; + END IF; + + IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN + headers = '{"Content-Type": "application/json"}'::jsonb; + ELSE + headers = TG_ARGV[2]::jsonb; + END IF; + + IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN + params = '{}'::jsonb; + ELSE + params = TG_ARGV[3]::jsonb; + END IF; + + IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN + timeout_ms = 1000; + ELSE + timeout_ms = TG_ARGV[4]::integer; + END IF; + + CASE + WHEN method = 'GET' THEN + SELECT http_get INTO request_id FROM net.http_get( + url, + params, + headers, + timeout_ms + ); + WHEN method = 'POST' THEN + payload = jsonb_build_object( + 'old_record', OLD, + 'record', NEW, + 'type', TG_OP, + 'table', TG_TABLE_NAME, + 'schema', TG_TABLE_SCHEMA + ); + + SELECT http_post INTO request_id FROM net.http_post( + url, + payload, + params, + headers, + timeout_ms + ); + ELSE + RAISE EXCEPTION 'method argument % is invalid', method; + END CASE; + + INSERT INTO supabase_functions.hooks + (hook_table_id, hook_name, request_id) + VALUES + (TG_RELID, TG_NAME, request_id); + + RETURN NEW; + END + $function$; + -- Supabase super admin + DO + $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_functions_admin' + ) + THEN + CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + END + $$; + GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin; + ALTER USER supabase_functions_admin SET search_path = "supabase_functions"; + ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin; + ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin; + ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin; + GRANT supabase_functions_admin TO postgres; + -- Remove unused supabase_pg_net_admin role + DO + $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_pg_net_admin' + ) + THEN + REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin; + DROP OWNED BY supabase_pg_net_admin; + DROP ROLE supabase_pg_net_admin; + END IF; + END + $$; + -- pg_net grants when extension is already enabled + DO + $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_extension + WHERE extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END + $$; + -- Event trigger for pg_net + CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() + RETURNS event_trigger + LANGUAGE plpgsql + AS $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END; + $$; + COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net'; + DO + $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_event_trigger + WHERE evtname = 'issue_pg_net_access' + ) THEN + CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION') + EXECUTE PROCEDURE extensions.grant_pg_net_access(); + END IF; + END + $$; + INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants'); + ALTER function supabase_functions.http_request() SECURITY DEFINER; + ALTER function supabase_functions.http_request() SET search_path = supabase_functions; + REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC; + GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role; +COMMIT; + `, + }, + { + filePath: "/volumes/functions/hello/index.ts", + content: ` +// Follow this setup guide to integrate the Deno language server with your editor: +// https://deno.land/manual/getting_started/setup_your_environment +// This enables autocomplete, go to definition, etc. + +import { serve } from "https://deno.land/std@0.177.1/http/server.ts" + +serve(async () => { + return new Response( + \`"Hello from Edge Functions!"\`, + { headers: { "Content-Type": "application/json" } }, + ) +}) + +// To invoke: +// curl 'http://localhost:/functions/v1/hello' \\ +// --header 'Authorization: Bearer ' + `, + }, + { + filePath: "/volumes/functions/main/index.ts", + content: ` +import { serve } from 'https://deno.land/std@0.131.0/http/server.ts' +import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts' + +console.log('main function started') + +const JWT_SECRET = Deno.env.get('JWT_SECRET') +const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true' + +function getAuthToken(req: Request) { + const authHeader = req.headers.get('authorization') + if (!authHeader) { + throw new Error('Missing authorization header') + } + const [bearer, token] = authHeader.split(' ') + if (bearer !== 'Bearer') { + throw new Error(\`Auth header is not 'Bearer {token}'\`) + } + return token +} + +async function verifyJWT(jwt: string): Promise { + const encoder = new TextEncoder() + const secretKey = encoder.encode(JWT_SECRET) + try { + await jose.jwtVerify(jwt, secretKey) + } catch (err) { + console.error(err) + return false + } + return true +} + +serve(async (req: Request) => { + if (req.method !== 'OPTIONS' && VERIFY_JWT) { + try { + const token = getAuthToken(req) + const isValidJWT = await verifyJWT(token) + + if (!isValidJWT) { + return new Response(JSON.stringify({ msg: 'Invalid JWT' }), { + status: 401, + headers: { 'Content-Type': 'application/json' }, + }) + } + } catch (e) { + console.error(e) + return new Response(JSON.stringify({ msg: e.toString() }), { + status: 401, + headers: { 'Content-Type': 'application/json' }, + }) + } + } + + const url = new URL(req.url) + const { pathname } = url + const path_parts = pathname.split('/') + const service_name = path_parts[1] + + if (!service_name || service_name === '') { + const error = { msg: 'missing function name in request' } + return new Response(JSON.stringify(error), { + status: 400, + headers: { 'Content-Type': 'application/json' }, + }) + } + + const servicePath = \`/home/deno/functions/\${service_name}\` + console.error(\`serving the request with \${servicePath}\`) + + const memoryLimitMb = 150 + const workerTimeoutMs = 1 * 60 * 1000 + const noModuleCache = false + const importMapPath = null + const envVarsObj = Deno.env.toObject() + const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]]) + + try { + const worker = await EdgeRuntime.userWorkers.create({ + servicePath, + memoryLimitMb, + workerTimeoutMs, + noModuleCache, + importMapPath, + envVars, + }) + return await worker.fetch(req) + } catch (e) { + const error = { msg: e.toString() } + return new Response(JSON.stringify(error), { + status: 500, + headers: { 'Content-Type': 'application/json' }, + }) + } +}) + `, + }, + { + filePath: "/volumes/logs/vector.yml", + content: ` +api: + enabled: true + address: 0.0.0.0:9001 + +sources: + docker_host: + type: docker_logs + exclude_containers: + - supabase-vector + +transforms: + project_logs: + type: remap + inputs: + - docker_host + source: |- + .project = "default" + .event_message = del(.message) + .appname = del(.container_name) + del(.container_created_at) + del(.container_id) + del(.source_type) + del(.stream) + del(.label) + del(.image) + del(.host) + del(.stream) + router: + type: route + inputs: + - project_logs + route: + kong: '.appname == "supabase-kong"' + auth: '.appname == "supabase-auth"' + rest: '.appname == "supabase-rest"' + realtime: '.appname == "supabase-realtime"' + storage: '.appname == "supabase-storage"' + functions: '.appname == "supabase-functions"' + db: '.appname == "supabase-db"' + # Ignores non nginx errors since they are related with kong booting up + kong_logs: + type: remap + inputs: + - router.kong + source: |- + req, err = parse_nginx_log(.event_message, "combined") + if err == null { + .timestamp = req.timestamp + .metadata.request.headers.referer = req.referer + .metadata.request.headers.user_agent = req.agent + .metadata.request.headers.cf_connecting_ip = req.client + .metadata.request.method = req.method + .metadata.request.path = req.path + .metadata.request.protocol = req.protocol + .metadata.response.status_code = req.status + } + if err != null { + abort + } + # Ignores non nginx errors since they are related with kong booting up + kong_err: + type: remap + inputs: + - router.kong + source: |- + .metadata.request.method = "GET" + .metadata.response.status_code = 200 + parsed, err = parse_nginx_log(.event_message, "error") + if err == null { + .timestamp = parsed.timestamp + .severity = parsed.severity + .metadata.request.host = parsed.host + .metadata.request.headers.cf_connecting_ip = parsed.client + url, err = split(parsed.request, " ") + if err == null { + .metadata.request.method = url[0] + .metadata.request.path = url[1] + .metadata.request.protocol = url[2] + } + } + if err != null { + abort + } + # Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency. + auth_logs: + type: remap + inputs: + - router.auth + source: |- + parsed, err = parse_json(.event_message) + if err == null { + .metadata.timestamp = parsed.time + .metadata = merge!(.metadata, parsed) + } + # PostgREST logs are structured so we separate timestamp from message using regex + rest_logs: + type: remap + inputs: + - router.rest + source: |- + parsed, err = parse_regex(.event_message, r'^(?P