From 2d6ce5c35c42676b7a6b13492c232e9edc4e486f Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 27 Mar 2026 13:35:01 -0500 Subject: [PATCH 01/19] Cleanup for GitHub CI and Dependabot. --- .github/dependabot.yml | 15 --------------- .github/workflows/ci.yml | 14 ++++++++------ .pre-commit-config.yaml | 4 ++-- deploy.sh | 4 +++- docs/DEPLOYMENT.md | 7 +------ docs/DEVELOPMENT.md | 13 +++++++++---- docs/INFRA_CONTRACT.md | 7 +------ infra/aws/db_setup/requirements.txt | 8 ++++---- 8 files changed, 28 insertions(+), 44 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index e644f37..ca66faf 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,20 +1,5 @@ version: 2 updates: - - package-ecosystem: "pip" - directory: "/syncbot" - schedule: - interval: "weekly" - open-pull-requests-limit: 10 - groups: - minor-and-patch: - update-types: ["minor", "patch"] - - - package-ecosystem: "pip" - directory: "/infra/aws/db_setup" - schedule: - interval: "weekly" - open-pull-requests-limit: 10 - - package-ecosystem: "github-actions" directory: "/" schedule: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9824f5a..cf7ac38 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -34,18 +34,20 @@ jobs: PR_HEAD_REPO: ${{ github.event.pull_request.head.repo.full_name }} run: | poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt - if git diff --quiet syncbot/requirements.txt; then - echo "requirements.txt is already in sync." + echo "# Required for MySQL 8+ caching_sha2_password; pin for reproducible CI (sam build)." > infra/aws/db_setup/requirements.txt + grep -E "^(pymysql|psycopg2-binary|cryptography)==" syncbot/requirements.txt >> infra/aws/db_setup/requirements.txt + if git diff --quiet syncbot/requirements.txt infra/aws/db_setup/requirements.txt; then + echo "requirements.txt files are already in sync." elif [[ -n "${PR_HEAD_REPO}" && "${PR_HEAD_REPO}" != "${GITHUB_REPOSITORY}" ]]; then - echo "::error::syncbot/requirements.txt is out of sync with poetry.lock. From the repo root run: poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt" + echo "::error::Requirements files are out of sync with poetry.lock. Commit with pre-commit installed (sync-requirements hook) or follow docs/DEVELOPMENT.md." exit 1 else git config user.name "github-actions[bot]" git config user.email "github-actions[bot]@users.noreply.github.com" - git add syncbot/requirements.txt - git commit -m "chore: sync requirements.txt with poetry.lock" + git add syncbot/requirements.txt infra/aws/db_setup/requirements.txt + git commit -m "chore: sync requirements.txt files with poetry.lock" git push - echo "::notice::requirements.txt was out of sync and has been auto-fixed." + echo "::notice::requirements.txt files were out of sync and have been auto-fixed." fi sam-lint: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3bf51ed..8e37bfd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,8 +27,8 @@ repos: - repo: local hooks: - id: sync-requirements - name: Sync requirements.txt with poetry.lock - entry: bash -c 'poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt && git add syncbot/requirements.txt' + name: Sync requirements.txt files with poetry.lock + entry: bash -c 'poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt && echo "# Required for MySQL 8+ caching_sha2_password; pin for reproducible CI (sam build)." > infra/aws/db_setup/requirements.txt && grep -E "^(pymysql|psycopg2-binary|cryptography)==" syncbot/requirements.txt >> infra/aws/db_setup/requirements.txt && git add syncbot/requirements.txt infra/aws/db_setup/requirements.txt' language: system files: ^poetry\.lock$ pass_filenames: false diff --git a/deploy.sh b/deploy.sh index 34c59ad..830105e 100755 --- a/deploy.sh +++ b/deploy.sh @@ -669,7 +669,9 @@ main() { poetry update --quiet if poetry self show plugins 2>/dev/null | grep -q poetry-plugin-export; then poetry export -f requirements.txt --without-hashes -o "$REPO_ROOT/syncbot/requirements.txt" - echo "syncbot/requirements.txt updated from poetry.lock." + echo "# Required for MySQL 8+ caching_sha2_password; pin for reproducible CI (sam build)." > "$REPO_ROOT/infra/aws/db_setup/requirements.txt" + grep -E "^(pymysql|psycopg2-binary|cryptography)==" "$REPO_ROOT/syncbot/requirements.txt" >> "$REPO_ROOT/infra/aws/db_setup/requirements.txt" + echo "syncbot/requirements.txt and infra/aws/db_setup/requirements.txt updated from poetry.lock." else echo "Warning: poetry-plugin-export not installed. Run: poetry self add poetry-plugin-export" >&2 echo "Skipping requirements.txt sync." >&2 diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index cf02d2e..12d9422 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -181,12 +181,7 @@ Configure **per-environment** (`test` / `prod`) variables and secrets so they ma The interactive deploy script can set these via `gh` when you opt in. Re-run that step after changing DB mode or engine so CI stays aligned. -**Dependency hygiene:** The workflow runs `pip-audit` on `syncbot/requirements.txt` and `infra/aws/db_setup/requirements.txt`. After changing `pyproject.toml`: - -```bash -poetry lock -poetry export --only main --format requirements.txt --without-hashes --output syncbot/requirements.txt -``` +**Dependency hygiene:** The AWS deploy workflow runs `pip-audit` on `syncbot/requirements.txt` and `infra/aws/db_setup/requirements.txt`. After changing `pyproject.toml`, run `poetry lock` and commit; the **pre-commit `sync-requirements` hook** (see [.pre-commit-config.yaml](../.pre-commit-config.yaml)) regenerates both requirements files when `poetry.lock` changes. If you do not use pre-commit, run the export commands documented in [DEVELOPMENT.md](DEVELOPMENT.md). ### 4. Ongoing local deploys (least privilege) diff --git a/docs/DEVELOPMENT.md b/docs/DEVELOPMENT.md index b2affa4..1d45cd8 100644 --- a/docs/DEVELOPMENT.md +++ b/docs/DEVELOPMENT.md @@ -62,13 +62,18 @@ syncbot/ ## Dependency management -After `poetry add` / `poetry update`, regenerate the pinned file used by the Docker image and **`pip-audit`** in CI so it matches `poetry.lock`: +After `poetry add` / `poetry update`, keep `poetry.lock` and the pinned requirements files aligned: + +- **Recommended:** Install [pre-commit](https://pre-commit.com) (`pip install pre-commit && pre-commit install`). When you commit a change to `poetry.lock`, the **`sync-requirements`** hook runs `poetry export` and refreshes **`syncbot/requirements.txt`** and **`infra/aws/db_setup/requirements.txt`** (the DbSetup Lambda subset) automatically. + +- **Without pre-commit:** Run the export yourself (Poetry 2.x needs the export plugin once: `poetry self add poetry-plugin-export`): ```bash -poetry self add poetry-plugin-export # Poetry 2.x; once per Poetry install poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt +echo "# Required for MySQL 8+ caching_sha2_password; pin for reproducible CI (sam build)." > infra/aws/db_setup/requirements.txt +grep -E "^(pymysql|psycopg2-binary|cryptography)==" syncbot/requirements.txt >> infra/aws/db_setup/requirements.txt ``` -The root **`./deploy.sh`** may run `poetry update` and regenerate `syncbot/requirements.txt` when Poetry is on your `PATH` (see [DEPLOYMENT.md](DEPLOYMENT.md)). +The root **`./deploy.sh`** dependency-sync menu may run `poetry update` and regenerate both requirements files when Poetry is on your `PATH` (see [DEPLOYMENT.md](DEPLOYMENT.md)). -CI runs `pip-audit` on `syncbot/requirements.txt` and `infra/aws/db_setup/requirements.txt` (see [.github/workflows/ci.yml](../.github/workflows/ci.yml)). +The AWS deploy workflow runs **`pip-audit`** on `syncbot/requirements.txt` and `infra/aws/db_setup/requirements.txt` (see [.github/workflows/deploy-aws.yml](../.github/workflows/deploy-aws.yml)). CI verifies both files match `poetry.lock` (see [.github/workflows/ci.yml](../.github/workflows/ci.yml)). diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index 216dabe..f7e790f 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -18,12 +18,7 @@ The application reads configuration from environment variables. Providers must i - CI Python version - `pyproject.toml` Python constraint - `syncbot/requirements.txt` deployment pins -- When dependency constraints change in `pyproject.toml`, refresh both lock and deployment requirements: - -```bash -poetry lock -poetry export --only main --format requirements.txt --without-hashes --output syncbot/requirements.txt -``` +- When dependency constraints change in `pyproject.toml`, refresh the lockfile and deployment requirements. The **pre-commit `sync-requirements` hook** regenerates **`syncbot/requirements.txt`** and **`infra/aws/db_setup/requirements.txt`** from `poetry.lock` when you commit lockfile changes. Manually: `poetry lock`, then `poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt` and rebuild `infra/aws/db_setup/requirements.txt` as in [.pre-commit-config.yaml](../.pre-commit-config.yaml). ### Database (backend-agnostic) diff --git a/infra/aws/db_setup/requirements.txt b/infra/aws/db_setup/requirements.txt index 4e51763..9deb9fe 100644 --- a/infra/aws/db_setup/requirements.txt +++ b/infra/aws/db_setup/requirements.txt @@ -1,4 +1,4 @@ -pymysql==1.1.2 -psycopg2-binary==2.9.11 -# Required for MySQL 8+ caching_sha2_password; pin for reproducible CI (pip-audit / sam build). -cryptography==46.0.6 +# Required for MySQL 8+ caching_sha2_password; pin for reproducible CI (sam build). +cryptography==46.0.6 ; python_version >= "3.12" and python_version < "4.0" +psycopg2-binary==2.9.11 ; python_version >= "3.12" and python_version < "4.0" +pymysql==1.1.2 ; python_version >= "3.12" and python_version < "4.0" From 87a14cead5427b54746fd177a2f4c461ec0e45ac Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 27 Mar 2026 14:45:25 -0500 Subject: [PATCH 02/19] Added more options for external DB hosts, like TiDB. --- .env.example | 2 + .github/workflows/deploy-aws.yml | 6 + CHANGELOG.md | 6 + docs/DEPLOYMENT.md | 11 +- docs/INFRA_CONTRACT.md | 4 +- infra/aws/db_setup/handler.py | 203 +++++++++++++++++++++---------- infra/aws/scripts/deploy.sh | 104 +++++++++++++++- infra/aws/template.yaml | 58 ++++++++- infra/gcp/main.tf | 8 ++ infra/gcp/scripts/deploy.sh | 38 +++++- infra/gcp/variables.tf | 12 ++ tests/test_db_setup.py | 78 ++++++++++++ 12 files changed, 454 insertions(+), 76 deletions(-) diff --git a/.env.example b/.env.example index 026f186..f26e197 100644 --- a/.env.example +++ b/.env.example @@ -13,6 +13,8 @@ # Option A — MySQL (default): legacy vars or DATABASE_URL DATABASE_BACKEND=mysql DATABASE_HOST=127.0.0.1 +# DATABASE_PORT defaults to 3306 (MySQL) or 5432 (PostgreSQL) when unset. +# Set explicitly for non-standard ports (e.g. TiDB Cloud often uses 4000). # DATABASE_PORT=3306 DATABASE_USER=root DATABASE_PASSWORD=rootpass diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index fab46d5..bcebfed 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -103,6 +103,9 @@ jobs: ExistingDatabaseNetworkMode=${{ vars.EXISTING_DATABASE_NETWORK_MODE || 'public' }} \ ExistingDatabaseSubnetIdsCsv=${{ vars.EXISTING_DATABASE_SUBNET_IDS_CSV }} \ ExistingDatabaseLambdaSecurityGroupId=${{ vars.EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID }} \ + ExistingDatabasePort=${{ vars.EXISTING_DATABASE_PORT }} \ + ExistingDatabaseCreateAppUser=${{ vars.EXISTING_DATABASE_CREATE_APP_USER || 'true' }} \ + ExistingDatabaseCreateSchema=${{ vars.EXISTING_DATABASE_CREATE_SCHEMA || 'true' }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ RequireAdmin=${{ vars.REQUIRE_ADMIN || 'true' }} \ @@ -165,6 +168,9 @@ jobs: ExistingDatabaseNetworkMode=${{ vars.EXISTING_DATABASE_NETWORK_MODE || 'public' }} \ ExistingDatabaseSubnetIdsCsv=${{ vars.EXISTING_DATABASE_SUBNET_IDS_CSV }} \ ExistingDatabaseLambdaSecurityGroupId=${{ vars.EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID }} \ + ExistingDatabasePort=${{ vars.EXISTING_DATABASE_PORT }} \ + ExistingDatabaseCreateAppUser=${{ vars.EXISTING_DATABASE_CREATE_APP_USER || 'true' }} \ + ExistingDatabaseCreateSchema=${{ vars.EXISTING_DATABASE_CREATE_SCHEMA || 'true' }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ RequireAdmin=${{ vars.REQUIRE_ADMIN || 'true' }} \ diff --git a/CHANGELOG.md b/CHANGELOG.md index 0068f3f..6229a9f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to this project are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +### Added + +- `ExistingDatabasePort`, `ExistingDatabaseCreateAppUser`, and `ExistingDatabaseCreateSchema` deploy parameters for external DB providers (e.g. TiDB Cloud) + ## [1.0.1] - 2026-03-26 ### Changed diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 12d9422..ced1b93 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -48,7 +48,7 @@ Runs from repo root (or via `./deploy.sh` → **aws**). It: 3. **Bootstrap probe** — Reads bootstrap stack outputs if the stack exists (for suggested stack names and later CI/CD). Full **bootstrap** create/sync runs only if you select it in **Deploy Tasks** (see below). 4. **App stack identity** — Prompts for stage (`test`/`prod`) and stack name; detects an existing CloudFormation stack for update. 5. **Deploy Tasks** — Multi-select menu (comma-separated, default all): **Bootstrap** (create/sync bootstrap stack; respects `SYNCBOT_SKIP_BOOTSTRAP_SYNC=1` for sync), **Build/Deploy** (full config + SAM), **CI/CD** (`gh` / GitHub Actions), **Slack API**, **Backup Secrets** (DR plaintext echo). Omitting **Build/Deploy** requires an existing stack for tasks that need live outputs. -6. **Configuration** (if Build/Deploy selected) — **Database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight**). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. Optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), **deploy summary**, then **SAM build** (`--use-container`) and **sam deploy**. +6. **Configuration** (if Build/Deploy selected) — **Database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, optional **ExistingDatabasePort** (blank = engine default; use for non-standard ports e.g. TiDB **4000**), whether to **create a dedicated app DB user** and whether to run **`CREATE DATABASE IF NOT EXISTS`**, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight** using the effective DB port). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. Optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), **deploy summary**, then **SAM build** (`--use-container`) and **sam deploy**. 7. **Post-deploy** — According to selected tasks: stack outputs, `slack-manifest_.json`, Slack API, **`gh`** setup, deploy receipt under `deploy-receipts/` (gitignored), and DR backup lines. ### GCP: `infra/gcp/scripts/deploy.sh` @@ -146,7 +146,7 @@ sam deploy \ ... ``` -Use **`sam deploy --guided`** the first time if you prefer prompts. For **existing RDS**, set `ExistingDatabaseHost`, `ExistingDatabaseAdminUser`, `ExistingDatabaseAdminPassword`, and for **private** DBs also `ExistingDatabaseNetworkMode=private`, `ExistingDatabaseSubnetIdsCsv`, `ExistingDatabaseLambdaSecurityGroupId`. Omit `ExistingDatabaseHost` to create a **new** RDS in the stack. +Use **`sam deploy --guided`** the first time if you prefer prompts. For **existing RDS**, set `ExistingDatabaseHost`, `ExistingDatabaseAdminUser`, `ExistingDatabaseAdminPassword`, and for **private** DBs also `ExistingDatabaseNetworkMode=private`, `ExistingDatabaseSubnetIdsCsv`, `ExistingDatabaseLambdaSecurityGroupId`. Optional: `ExistingDatabasePort` (empty = engine default), `ExistingDatabaseCreateAppUser` / `ExistingDatabaseCreateSchema` (`true`/`false`). Omit `ExistingDatabaseHost` to create a **new** RDS in the stack. **samconfig:** Predefined profiles in `samconfig.toml` (`test-new-rds`, `test-existing-rds`, etc.) — adjust placeholders before use. @@ -175,6 +175,9 @@ Configure **per-environment** (`test` / `prod`) variables and secrets so they ma | Var | `EXISTING_DATABASE_NETWORK_MODE` | `public` or `private` | | Var | `EXISTING_DATABASE_SUBNET_IDS_CSV` | **Private** mode: comma-separated subnet IDs (no spaces) | | Var | `EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID` | **Private** mode: Lambda ENI security group | +| Var | `EXISTING_DATABASE_PORT` | Optional; non-standard TCP port (e.g. `4000`). Empty = engine default in SAM. | +| Var | `EXISTING_DATABASE_CREATE_APP_USER` | `true` / `false` (default `true`). Set `false` when the DB cannot create a dedicated app user. | +| Var | `EXISTING_DATABASE_CREATE_SCHEMA` | `true` / `false` (default `true`). Set `false` when the database/schema already exists. | | Secret | `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET` | | | Secret | `EXISTING_DATABASE_ADMIN_PASSWORD` | When `EXISTING_DATABASE_HOST` is set | | Secret | `TOKEN_ENCRYPTION_KEY_OVERRIDE` | Optional DR only | @@ -227,9 +230,9 @@ Build and push an image to Artifact Registry, then `gcloud run deploy` or `terra ## Using an existing RDS host (AWS) -When **ExistingDatabaseHost** is set, the template **does not** create VPC/RDS; a custom resource creates the schema and `syncbot_user_` with a generated app password in Secrets Manager. +When **ExistingDatabaseHost** is set, the template **does not** create VPC/RDS; a custom resource can create the schema and optionally a dedicated app user (`syncbot_user_`) with a generated app password in Secrets Manager—or skip user/schema steps and copy the admin password into the app secret when **`ExistingDatabaseCreateAppUser=false`**. -- **Public:** Lambda is not in your VPC; RDS must be reachable on the Internet on port **3306** or **5432**. +- **Public:** Lambda is not in your VPC; the DB must be reachable on the Internet on the configured port (**`ExistingDatabasePort`**, or **3306** / **5432** by engine). - **Private:** Lambda uses `ExistingDatabaseSubnetIdsCsv` and `ExistingDatabaseLambdaSecurityGroupId`; DB security group must allow the Lambda SG; subnets need **NAT** egress for Slack API calls. See also [Sharing infrastructure across apps](#sharing-infrastructure-across-apps-aws) below. diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index f7e790f..5812402 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -27,7 +27,7 @@ The application reads configuration from environment variables. Providers must i | `DATABASE_BACKEND` | `mysql` (default), `postgresql`, or `sqlite`. | | `DATABASE_URL` | Full SQLAlchemy URL. When set, overrides host/user/password/schema. **Required for SQLite** (e.g. `sqlite:///path/to/syncbot.db`). For `mysql` / `postgresql`, optional if unset (legacy vars below are used). | | `DATABASE_HOST` | Database hostname (IP or FQDN). Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | -| `DATABASE_PORT` | Optional. Defaults to **5432** for `postgresql`, **3306** for `mysql`. | +| `DATABASE_PORT` | Optional. Defaults to **5432** for `postgresql`, **3306** for `mysql`. Set explicitly for external providers that use a non-standard port (e.g. TiDB Cloud **4000**). | | `DATABASE_USER` | Username. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | | `DATABASE_PASSWORD` | Password. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | | `DATABASE_SCHEMA` | Database name (MySQL) or PostgreSQL database name (same convention as MySQL). Use alphanumeric and underscore only for PostgreSQL when the app must `CREATE DATABASE` at bootstrap. | @@ -51,7 +51,7 @@ The application reads configuration from environment variables. Providers must i | `SLACK_USER_SCOPES` | Comma-separated OAuth **user** scopes. Must match `oauth_config.scopes.user` and `syncbot/slack_manifest_scopes.py` `USER_SCOPES`. If this env requests scopes that are not declared on the Slack app, install fails with `invalid_scope`. | | `TOKEN_ENCRYPTION_KEY` | **Required** in production; must be a strong, random value (e.g. 16+ characters). Providers may auto-generate it (e.g. AWS Secrets Manager). Back up the key after first deploy. In local dev you may set it manually or leave unset. | -**Reference wiring:** AWS SAM ([`infra/aws/template.yaml`](../infra/aws/template.yaml)) maps CloudFormation parameters to Lambda env: **`SlackOauthBotScopes`** / **`SlackOauthUserScopes`** → **`SLACK_BOT_SCOPES`** / **`SLACK_USER_SCOPES`** (defaults match `BOT_SCOPES` / `USER_SCOPES`); **`LogLevel`** → **`LOG_LEVEL`**; **`RequireAdmin`** → **`REQUIRE_ADMIN`**; **`SoftDeleteRetentionDays`** → **`SOFT_DELETE_RETENTION_DAYS`**; **`SyncbotFederationEnabled`**, **`SyncbotInstanceId`**, **`SyncbotPublicUrl`** (optional override) → federation env vars; **`PrimaryWorkspace`** → **`PRIMARY_WORKSPACE`**; **`EnableDbReset`** → **`ENABLE_DB_RESET`** (boolean `true` when enabled); optional **`DatabaseTlsEnabled`** / **`DatabaseSslCaPath`** → **`DATABASE_TLS_ENABLED`** / **`DATABASE_SSL_CA_PATH`** (omit when empty so app defaults apply). **`SYNCBOT_PUBLIC_URL`** defaults to the API Gateway stage base URL unless **`SyncbotPublicUrl`** is set; stack output **`SyncBotPublicBaseUrl`** documents that base. GCP Terraform uses **`secret_slack_bot_scopes`** (Secret Manager → `SLACK_BOT_SCOPES`) and variables **`slack_user_scopes`**, **`log_level`**, **`require_admin`**, **`database_backend`**, **`database_port`**, **`soft_delete_retention_days`**, **`syncbot_federation_enabled`**, **`syncbot_instance_id`**, **`syncbot_public_url_override`**, **`primary_workspace`**, **`enable_db_reset`**, **`database_tls_enabled`**, **`database_ssl_ca_path`** for the corresponding runtime env on Cloud Run (see [infra/gcp/README.md](../infra/gcp/README.md)); **`syncbot_public_url_override`** is empty by default—set it to your service’s public HTTPS base (e.g. after first deploy) if you need **`SYNCBOT_PUBLIC_URL`** for federation. +**Reference wiring:** AWS SAM ([`infra/aws/template.yaml`](../infra/aws/template.yaml)) maps CloudFormation parameters to Lambda env: **`SlackOauthBotScopes`** / **`SlackOauthUserScopes`** → **`SLACK_BOT_SCOPES`** / **`SLACK_USER_SCOPES`** (defaults match `BOT_SCOPES` / `USER_SCOPES`); **`LogLevel`** → **`LOG_LEVEL`**; **`RequireAdmin`** → **`REQUIRE_ADMIN`**; **`SoftDeleteRetentionDays`** → **`SOFT_DELETE_RETENTION_DAYS`**; **`SyncbotFederationEnabled`**, **`SyncbotInstanceId`**, **`SyncbotPublicUrl`** (optional override) → federation env vars; **`PrimaryWorkspace`** → **`PRIMARY_WORKSPACE`**; **`EnableDbReset`** → **`ENABLE_DB_RESET`** (boolean `true` when enabled); optional **`DatabaseTlsEnabled`** / **`DatabaseSslCaPath`** → **`DATABASE_TLS_ENABLED`** / **`DATABASE_SSL_CA_PATH`** (omit when empty so app defaults apply). When using an **existing** DB host: optional **`ExistingDatabasePort`** → **`DATABASE_PORT`** (empty uses engine default); **`ExistingDatabaseCreateAppUser`** / **`ExistingDatabaseCreateSchema`** control the DB setup custom resource (dedicated app user and `CREATE DATABASE`), not direct Lambda env names—see [DEPLOYMENT.md](DEPLOYMENT.md). **`SYNCBOT_PUBLIC_URL`** defaults to the API Gateway stage base URL unless **`SyncbotPublicUrl`** is set; stack output **`SyncBotPublicBaseUrl`** documents that base. GCP Terraform uses **`secret_slack_bot_scopes`** (Secret Manager → `SLACK_BOT_SCOPES`) and variables **`slack_user_scopes`**, **`log_level`**, **`require_admin`**, **`database_backend`**, **`database_port`**, **`soft_delete_retention_days`**, **`syncbot_federation_enabled`**, **`syncbot_instance_id`**, **`syncbot_public_url_override`**, **`primary_workspace`**, **`enable_db_reset`**, **`database_tls_enabled`**, **`database_ssl_ca_path`** for the corresponding runtime env on Cloud Run (see [infra/gcp/README.md](../infra/gcp/README.md)); when **`use_existing_database`** is true, **`existing_db_create_app_user`** / **`existing_db_create_schema`** are recorded as Cloud Run service labels for operator documentation. **`syncbot_public_url_override`** is empty by default—set it to your service’s public HTTPS base (e.g. after first deploy) if you need **`SYNCBOT_PUBLIC_URL`** for federation. ### Optional diff --git a/infra/aws/db_setup/handler.py b/infra/aws/db_setup/handler.py index f72a328..16f380c 100644 --- a/infra/aws/db_setup/handler.py +++ b/infra/aws/db_setup/handler.py @@ -1,14 +1,14 @@ """ Custom CloudFormation resource: create database and app user for SyncBot. -Supports MySQL (port 3306) and PostgreSQL (port 5432). It can use: -- explicit admin password (existing-host mode), or -- admin password fetched from an admin secret ARN (new-RDS mode). +Supports MySQL and PostgreSQL with configurable port, optional schema creation, +and optional dedicated app user (external DBs may disallow CREATE USER). """ import json import re import base64 +import ssl import time import socket @@ -78,6 +78,37 @@ def _safe_ident(name: str) -> str: return name +def _default_port(database_engine: str) -> int: + return 3306 if database_engine == "mysql" else 5432 + + +def _parse_port(props: dict, database_engine: str) -> int: + raw = (props.get("Port") or "").strip() + if not raw: + return _default_port(database_engine) + try: + p = int(raw) + except ValueError as exc: + raise ValueError(f"Invalid Port: {raw!r}") from exc + if p < 1 or p > 65535: + raise ValueError(f"Invalid Port out of range: {p}") + return p + + +def _parse_bool_prop(props: dict, key: str, default: bool = True) -> bool: + v = (props.get(key) or "").strip().lower() + if v == "false": + return False + if v == "true": + return True + return default + + +def put_secret_string(secret_arn: str, secret_string: str) -> None: + client = boto3.client("secretsmanager") + client.put_secret_value(SecretId=secret_arn, SecretString=secret_string) + + def _handler_impl(event, context): request_type = event.get("RequestType", "Create") props = event.get("ResourceProperties", {}) @@ -89,6 +120,9 @@ def _handler_impl(event, context): stage = (props.get("Stage") or "test").strip() secret_arn = (props.get("SecretArn") or "").strip() database_engine = (props.get("DatabaseEngine") or "mysql").strip().lower() + port = _parse_port(props, database_engine) + create_app_user = _parse_bool_prop(props, "CreateAppUser", default=True) + create_schema = _parse_bool_prop(props, "CreateSchema", default=True) if request_type == "Delete": # Must return the same PhysicalResourceId as Create; never use a placeholder. @@ -114,11 +148,13 @@ def _handler_impl(event, context): return app_username = f"syncbot_user_{stage}".replace("-", "_") - try: - app_password = get_secret_value(secret_arn) - except Exception as e: - send(event, context, "FAILED", reason=f"GetSecretValue failed: {e}") - return + app_password = "" + if create_app_user: + try: + app_password = get_secret_value(secret_arn) + except Exception as e: + send(event, context, "FAILED", reason=f"GetSecretValue failed: {e}") + return if not admin_password: try: # RDS-managed master-user secrets store JSON; extract the password field. @@ -127,33 +163,52 @@ def _handler_impl(event, context): send(event, context, "FAILED", reason=f"Get admin secret failed: {e}") return + result_username = app_username if create_app_user else admin_user + physical_resource_id = result_username + try: # Fail fast on obvious network connectivity issues before opening DB client sessions. - _assert_tcp_reachable(host, 3306 if database_engine == "mysql" else 5432) - if database_engine == "mysql": - setup_database_mysql( - host=host, - admin_user=admin_user, - admin_password=admin_password, - schema=schema, - app_username=app_username, - app_password=app_password, - ) - else: - setup_database_postgresql( - host=host, - admin_user=admin_user, - admin_password=admin_password, - schema=schema, - app_username=app_username, - app_password=app_password, - ) + _assert_tcp_reachable(host, port) + if create_schema or create_app_user: + if database_engine == "mysql": + setup_database_mysql( + host=host, + admin_user=admin_user, + admin_password=admin_password, + schema=schema, + app_username=app_username, + app_password=app_password, + port=port, + create_schema=create_schema, + create_app_user=create_app_user, + ) + else: + setup_database_postgresql( + host=host, + admin_user=admin_user, + admin_password=admin_password, + schema=schema, + app_username=app_username, + app_password=app_password, + port=port, + create_schema=create_schema, + create_app_user=create_app_user, + ) + if not create_app_user: + put_secret_string(secret_arn, admin_password) except Exception as e: send(event, context, "FAILED", reason=f"Database setup failed: {e}") return - send(event, context, "SUCCESS", {"Username": app_username}, reason="OK", physical_resource_id=app_username) - return {"Username": app_username} + send( + event, + context, + "SUCCESS", + {"Username": result_username}, + reason="OK", + physical_resource_id=physical_resource_id, + ) + return {"Username": result_username} def _assert_tcp_reachable(host: str, port: int) -> None: @@ -207,9 +262,13 @@ def setup_database_mysql( schema: str, app_username: str, app_password: str, + port: int, + create_schema: bool, + create_app_user: bool, ) -> None: safe_schema = _safe_ident(schema) - _safe_ident(app_username) + if create_app_user: + _safe_ident(app_username) conn = None last_exc = None for _attempt in range(1, DB_CONNECT_ATTEMPTS + 1): @@ -218,10 +277,11 @@ def setup_database_mysql( host=host, user=admin_user, password=admin_password, - port=3306, + port=port, charset="utf8mb4", cursorclass=DictCursor, connect_timeout=DB_CONNECT_TIMEOUT_SECONDS, + ssl=ssl.create_default_context(), ) break except Exception as exc: @@ -233,13 +293,15 @@ def setup_database_mysql( ) try: with conn.cursor() as cur: - cur.execute(f"CREATE DATABASE IF NOT EXISTS `{safe_schema}`") - cur.execute( - "CREATE USER IF NOT EXISTS %s@'%%' IDENTIFIED BY %s", - (app_username, app_password), - ) - cur.execute(f"GRANT ALL PRIVILEGES ON `{safe_schema}`.* TO %s@'%%'", (app_username,)) - cur.execute("FLUSH PRIVILEGES") + if create_schema: + cur.execute(f"CREATE DATABASE IF NOT EXISTS `{safe_schema}`") + if create_app_user: + cur.execute( + "CREATE USER IF NOT EXISTS %s@'%%' IDENTIFIED BY %s", + (app_username, app_password), + ) + cur.execute(f"GRANT ALL PRIVILEGES ON `{safe_schema}`.* TO %s@'%%'", (app_username,)) + cur.execute("FLUSH PRIVILEGES") conn.commit() finally: conn.close() @@ -253,16 +315,22 @@ def setup_database_postgresql( schema: str, app_username: str, app_password: str, + port: int, + create_schema: bool, + create_app_user: bool, ) -> None: max_db_connect_attempts = POSTGRES_DB_CONNECT_ATTEMPTS db_connect_retry_seconds = POSTGRES_DB_CONNECT_RETRY_SECONDS _safe_ident(schema) - _safe_ident(app_username) + if create_app_user: + _safe_ident(app_username) + _safe_ident(admin_user) + conn = psycopg2.connect( host=host, user=admin_user, password=admin_password, - port=5432, + port=port, dbname="postgres", connect_timeout=DB_CONNECT_TIMEOUT_SECONDS, sslmode="require", @@ -270,29 +338,42 @@ def setup_database_postgresql( conn.autocommit = True try: with conn.cursor() as cur: - cur.execute("SELECT 1 FROM pg_roles WHERE rolname = %s", (app_username,)) - if cur.fetchone() is None: - q = psql.SQL("CREATE ROLE {name} WITH LOGIN PASSWORD %s").format( - name=psql.Identifier(app_username), - ) - cur.execute(q, (app_password,)) - else: - q = psql.SQL("ALTER ROLE {name} WITH LOGIN PASSWORD %s").format( - name=psql.Identifier(app_username), - ) - cur.execute(q, (app_password,)) - - cur.execute("SELECT 1 FROM pg_database WHERE datname = %s", (schema,)) - if cur.fetchone() is None: - cur.execute( - psql.SQL("CREATE DATABASE {db} OWNER {owner}").format( - db=psql.Identifier(schema), - owner=psql.Identifier(app_username), + if create_app_user: + cur.execute("SELECT 1 FROM pg_roles WHERE rolname = %s", (app_username,)) + if cur.fetchone() is None: + q = psql.SQL("CREATE ROLE {name} WITH LOGIN PASSWORD %s").format( + name=psql.Identifier(app_username), ) - ) + cur.execute(q, (app_password,)) + else: + q = psql.SQL("ALTER ROLE {name} WITH LOGIN PASSWORD %s").format( + name=psql.Identifier(app_username), + ) + cur.execute(q, (app_password,)) + + if create_schema: + cur.execute("SELECT 1 FROM pg_database WHERE datname = %s", (schema,)) + if cur.fetchone() is None: + if create_app_user: + cur.execute( + psql.SQL("CREATE DATABASE {db} OWNER {owner}").format( + db=psql.Identifier(schema), + owner=psql.Identifier(app_username), + ) + ) + else: + cur.execute( + psql.SQL("CREATE DATABASE {db} OWNER {owner}").format( + db=psql.Identifier(schema), + owner=psql.Identifier(admin_user), + ) + ) finally: conn.close() + if not create_app_user: + return + # Ensure runtime role can connect and run migrations in the target DB. # After CREATE DATABASE, RDS can take a short time before accepting connections. last_exc = None @@ -302,7 +383,7 @@ def setup_database_postgresql( host=host, user=admin_user, password=admin_password, - port=5432, + port=port, dbname=schema, connect_timeout=DB_CONNECT_TIMEOUT_SECONDS, sslmode="require", @@ -328,6 +409,6 @@ def setup_database_postgresql( last_exc = exc time.sleep(db_connect_retry_seconds) raise RuntimeError( - f"Failed connecting to newly created database '{schema}' after " + f"Failed connecting to database '{schema}' after " f"{max_db_connect_attempts} attempts: {last_exc}" ) diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index 0380d76..1f3c6b6 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -331,6 +331,9 @@ configure_github_actions_aws() { # $12 Comma-separated subnet IDs for Lambda in private mode # $13 Lambda ENI security group id in private mode # $14 Database engine: mysql | postgresql + # $15 Existing DB port (empty = engine default in SAM) + # $16 ExistingDatabaseCreateAppUser: true | false + # $17 ExistingDatabaseCreateSchema: true | false local bootstrap_outputs="$1" local bootstrap_stack_name="$2" local aws_region="$3" @@ -347,6 +350,11 @@ configure_github_actions_aws() { local existing_db_lambda_sg_id="${13:-}" local database_engine="${14:-}" [[ -z "$database_engine" ]] && database_engine="mysql" + local existing_db_port="${15:-}" + local existing_db_create_app_user="${16:-true}" + local existing_db_create_schema="${17:-true}" + [[ -z "$existing_db_create_app_user" ]] && existing_db_create_app_user="true" + [[ -z "$existing_db_create_schema" ]] && existing_db_create_schema="true" local role bucket boot_region role="$(output_value "$bootstrap_outputs" "GitHubDeployRoleArn")" bucket="$(output_value "$bootstrap_outputs" "DeploymentBucketName")" @@ -403,6 +411,9 @@ configure_github_actions_aws() { gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "" -R "$repo" gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "" -R "$repo" fi + gh variable set EXISTING_DATABASE_PORT --env "$env_name" --body "$existing_db_port" -R "$repo" + gh variable set EXISTING_DATABASE_CREATE_APP_USER --env "$env_name" --body "$existing_db_create_app_user" -R "$repo" + gh variable set EXISTING_DATABASE_CREATE_SCHEMA --env "$env_name" --body "$existing_db_create_schema" -R "$repo" else # Clear existing-host vars for new-RDS mode to avoid stale CI config. gh variable set EXISTING_DATABASE_HOST --env "$env_name" --body "" -R "$repo" @@ -410,6 +421,9 @@ configure_github_actions_aws() { gh variable set EXISTING_DATABASE_NETWORK_MODE --env "$env_name" --body "public" -R "$repo" gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "" -R "$repo" gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "" -R "$repo" + gh variable set EXISTING_DATABASE_PORT --env "$env_name" --body "" -R "$repo" + gh variable set EXISTING_DATABASE_CREATE_APP_USER --env "$env_name" --body "true" -R "$repo" + gh variable set EXISTING_DATABASE_CREATE_SCHEMA --env "$env_name" --body "true" -R "$repo" fi echo "Environment variables updated for '$env_name'." fi @@ -849,11 +863,13 @@ validate_private_existing_db_connectivity() { local db_vpc="$5" local db_sgs_csv="$6" local db_host="$7" + local db_port_override="${8:-}" local db_port subnet_list subnet_vpcs first_vpc line subnet_id subnet_vpc db_sg_id lambda_sg_vpc db_sg_list route_target rt_id ingress_ok local -a no_nat_subnets db_port="3306" [[ "$engine" == "postgresql" ]] && db_port="5432" + [[ -n "$db_port_override" ]] && db_port="$db_port_override" IFS=',' read -r -a subnet_list <<< "$subnet_csv" if [[ "${#subnet_list[@]}" -lt 1 ]]; then @@ -1187,6 +1203,9 @@ PREV_EXISTING_DATABASE_ADMIN_USER="" PREV_EXISTING_DATABASE_NETWORK_MODE="" PREV_EXISTING_DATABASE_SUBNET_IDS_CSV="" PREV_EXISTING_DATABASE_LAMBDA_SG_ID="" +PREV_EXISTING_DATABASE_PORT="" +PREV_EXISTING_DATABASE_CREATE_APP_USER="" +PREV_EXISTING_DATABASE_CREATE_SCHEMA="" PREV_DATABASE_ENGINE="" PREV_DATABASE_SCHEMA="" PREV_LOG_LEVEL="" @@ -1215,6 +1234,9 @@ if [[ -n "$EXISTING_STACK_STATUS" && "$EXISTING_STACK_STATUS" != "None" ]]; then PREV_EXISTING_DATABASE_NETWORK_MODE="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseNetworkMode")" PREV_EXISTING_DATABASE_SUBNET_IDS_CSV="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseSubnetIdsCsv")" PREV_EXISTING_DATABASE_LAMBDA_SG_ID="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseLambdaSecurityGroupId")" + PREV_EXISTING_DATABASE_PORT="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabasePort")" + PREV_EXISTING_DATABASE_CREATE_APP_USER="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseCreateAppUser")" + PREV_EXISTING_DATABASE_CREATE_SCHEMA="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseCreateSchema")" PREV_DATABASE_ENGINE="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseEngine")" PREV_DATABASE_SCHEMA="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseSchema")" PREV_LOG_LEVEL="$(stack_param_value "$EXISTING_STACK_PARAMS" "LogLevel")" @@ -1361,6 +1383,9 @@ SLACK_CLIENT_ID="$(required_from_env_or_prompt "SLACK_CLIENT_ID" "SlackClientID" ENV_EXISTING_DATABASE_HOST="${EXISTING_DATABASE_HOST:-}" ENV_EXISTING_DATABASE_ADMIN_USER="${EXISTING_DATABASE_ADMIN_USER:-}" ENV_EXISTING_DATABASE_ADMIN_PASSWORD="${EXISTING_DATABASE_ADMIN_PASSWORD:-}" +ENV_EXISTING_DATABASE_PORT="${EXISTING_DATABASE_PORT:-}" +ENV_EXISTING_DATABASE_CREATE_APP_USER="${EXISTING_DATABASE_CREATE_APP_USER:-}" +ENV_EXISTING_DATABASE_CREATE_SCHEMA="${EXISTING_DATABASE_CREATE_SCHEMA:-}" EXISTING_DB_ADMIN_PASSWORD_SOURCE="prompt" EXISTING_DATABASE_HOST="" EXISTING_DATABASE_ADMIN_USER="" @@ -1368,6 +1393,10 @@ EXISTING_DATABASE_ADMIN_PASSWORD="" EXISTING_DATABASE_NETWORK_MODE="public" EXISTING_DATABASE_SUBNET_IDS_CSV="" EXISTING_DATABASE_LAMBDA_SG_ID="" +EXISTING_DATABASE_PORT="" +EXISTING_DATABASE_CREATE_APP_USER="true" +EXISTING_DATABASE_CREATE_SCHEMA="true" +EXISTING_DB_EFFECTIVE_PORT="" DATABASE_SCHEMA="" DATABASE_SCHEMA_DEFAULT="syncbot_${STAGE}" if [[ "$IS_STACK_UPDATE" == "true" && -n "$PREV_DATABASE_SCHEMA" ]]; then @@ -1431,6 +1460,57 @@ if [[ "$DB_MODE" == "2" ]]; then DATABASE_SCHEMA="$(prompt_default "DatabaseSchema" "$DATABASE_SCHEMA_DEFAULT")" + echo + echo "=== Existing database port and setup ===" + echo "Leave port blank to use the engine default (3306 MySQL, 5432 PostgreSQL)." + DEFAULT_EXISTING_DB_PORT="" + [[ -n "$PREV_EXISTING_DATABASE_PORT" ]] && DEFAULT_EXISTING_DB_PORT="$PREV_EXISTING_DATABASE_PORT" + if [[ -n "$ENV_EXISTING_DATABASE_PORT" ]]; then + echo "Using ExistingDatabasePort from environment variable EXISTING_DATABASE_PORT." + EXISTING_DATABASE_PORT="$ENV_EXISTING_DATABASE_PORT" + else + EXISTING_DATABASE_PORT="$(prompt_default "ExistingDatabasePort (optional)" "$DEFAULT_EXISTING_DB_PORT")" + fi + if [[ "$DATABASE_ENGINE" == "mysql" && "$EXISTING_DATABASE_PORT" == "3306" ]]; then + EXISTING_DATABASE_PORT="" + fi + if [[ "$DATABASE_ENGINE" == "postgresql" && "$EXISTING_DATABASE_PORT" == "5432" ]]; then + EXISTING_DATABASE_PORT="" + fi + EXISTING_DB_EFFECTIVE_PORT="3306" + [[ "$DATABASE_ENGINE" == "postgresql" ]] && EXISTING_DB_EFFECTIVE_PORT="5432" + [[ -n "$EXISTING_DATABASE_PORT" ]] && EXISTING_DB_EFFECTIVE_PORT="$EXISTING_DATABASE_PORT" + + CREATE_APP_DEFAULT="y" + [[ "${PREV_EXISTING_DATABASE_CREATE_APP_USER:-}" == "false" ]] && CREATE_APP_DEFAULT="n" + if [[ -n "$ENV_EXISTING_DATABASE_CREATE_APP_USER" ]]; then + echo "Using ExistingDatabaseCreateAppUser from environment variable EXISTING_DATABASE_CREATE_APP_USER." + EXISTING_DATABASE_CREATE_APP_USER="$ENV_EXISTING_DATABASE_CREATE_APP_USER" + if [[ "$EXISTING_DATABASE_CREATE_APP_USER" != "true" && "$EXISTING_DATABASE_CREATE_APP_USER" != "false" ]]; then + echo "Error: EXISTING_DATABASE_CREATE_APP_USER must be true or false." >&2 + exit 1 + fi + elif prompt_yes_no "Create dedicated app DB user (CREATE USER / grants)?" "$CREATE_APP_DEFAULT"; then + EXISTING_DATABASE_CREATE_APP_USER="true" + else + EXISTING_DATABASE_CREATE_APP_USER="false" + fi + + CREATE_SCHEMA_DEFAULT="y" + [[ "${PREV_EXISTING_DATABASE_CREATE_SCHEMA:-}" == "false" ]] && CREATE_SCHEMA_DEFAULT="n" + if [[ -n "$ENV_EXISTING_DATABASE_CREATE_SCHEMA" ]]; then + echo "Using ExistingDatabaseCreateSchema from environment variable EXISTING_DATABASE_CREATE_SCHEMA." + EXISTING_DATABASE_CREATE_SCHEMA="$ENV_EXISTING_DATABASE_CREATE_SCHEMA" + if [[ "$EXISTING_DATABASE_CREATE_SCHEMA" != "true" && "$EXISTING_DATABASE_CREATE_SCHEMA" != "false" ]]; then + echo "Error: EXISTING_DATABASE_CREATE_SCHEMA must be true or false." >&2 + exit 1 + fi + elif prompt_yes_no "Run CREATE DATABASE IF NOT EXISTS for DatabaseSchema?" "$CREATE_SCHEMA_DEFAULT"; then + EXISTING_DATABASE_CREATE_SCHEMA="true" + else + EXISTING_DATABASE_CREATE_SCHEMA="false" + fi + if [[ -z "$EXISTING_DATABASE_HOST" || "$EXISTING_DATABASE_HOST" == REPLACE_ME* ]]; then echo "Error: valid ExistingDatabaseHost is required for existing DB mode." >&2 exit 1 @@ -1514,7 +1594,8 @@ if [[ "$DB_MODE" == "2" ]]; then "$EXISTING_DATABASE_LAMBDA_SG_ID" \ "$DETECTED_VPC" \ "$DETECTED_SGS" \ - "$EXISTING_DATABASE_HOST"; then + "$EXISTING_DATABASE_HOST" \ + "$EXISTING_DB_EFFECTIVE_PORT"; then echo "Fix network settings and rerun deploy." >&2 exit 1 fi @@ -1605,6 +1686,9 @@ if [[ "$DB_MODE" == "2" ]]; then echo "DB subnets: $EXISTING_DATABASE_SUBNET_IDS_CSV" echo "Lambda SG: $EXISTING_DATABASE_LAMBDA_SG_ID" fi + echo "DB port: ${EXISTING_DB_EFFECTIVE_PORT:-engine default}" + echo "DB create user: $EXISTING_DATABASE_CREATE_APP_USER" + echo "DB create schema: $EXISTING_DATABASE_CREATE_SCHEMA" echo "DB schema: $DATABASE_SCHEMA" else echo "DB mode: create new RDS" @@ -1680,6 +1764,11 @@ if [[ "$DB_MODE" == "2" ]]; then "ExistingDatabaseLambdaSecurityGroupId=$EXISTING_DATABASE_LAMBDA_SG_ID" ) fi + [[ -n "$EXISTING_DATABASE_PORT" ]] && PARAMS+=("ExistingDatabasePort=$EXISTING_DATABASE_PORT") + PARAMS+=( + "ExistingDatabaseCreateAppUser=$EXISTING_DATABASE_CREATE_APP_USER" + "ExistingDatabaseCreateSchema=$EXISTING_DATABASE_CREATE_SCHEMA" + ) else # Clear existing-host parameters on updates to avoid stale previous values. # SAM rejects Key= (empty value) in shorthand; use ParameterKey=K,ParameterValue= instead. @@ -1690,6 +1779,9 @@ else "ExistingDatabaseNetworkMode=public" "ParameterKey=ExistingDatabaseSubnetIdsCsv,ParameterValue=" "ParameterKey=ExistingDatabaseLambdaSecurityGroupId,ParameterValue=" + "ParameterKey=ExistingDatabasePort,ParameterValue=" + "ExistingDatabaseCreateAppUser=true" + "ExistingDatabaseCreateSchema=true" ) fi @@ -1734,6 +1826,11 @@ else EXISTING_DATABASE_NETWORK_MODE="${PREV_EXISTING_DATABASE_NETWORK_MODE:-public}" EXISTING_DATABASE_SUBNET_IDS_CSV="${PREV_EXISTING_DATABASE_SUBNET_IDS_CSV:-}" EXISTING_DATABASE_LAMBDA_SG_ID="${PREV_EXISTING_DATABASE_LAMBDA_SG_ID:-}" + EXISTING_DATABASE_PORT="${PREV_EXISTING_DATABASE_PORT:-}" + EXISTING_DATABASE_CREATE_APP_USER="${PREV_EXISTING_DATABASE_CREATE_APP_USER:-true}" + EXISTING_DATABASE_CREATE_SCHEMA="${PREV_EXISTING_DATABASE_CREATE_SCHEMA:-true}" + [[ -z "$EXISTING_DATABASE_CREATE_APP_USER" ]] && EXISTING_DATABASE_CREATE_APP_USER="true" + [[ -z "$EXISTING_DATABASE_CREATE_SCHEMA" ]] && EXISTING_DATABASE_CREATE_SCHEMA="true" SLACK_SIGNING_SECRET="${SLACK_SIGNING_SECRET:-}" SLACK_CLIENT_SECRET="${SLACK_CLIENT_SECRET:-}" SLACK_CLIENT_ID="${SLACK_CLIENT_ID:-}" @@ -1780,7 +1877,10 @@ if [[ "$TASK_CICD" == "true" ]]; then "$EXISTING_DATABASE_NETWORK_MODE" \ "$EXISTING_DATABASE_SUBNET_IDS_CSV" \ "$EXISTING_DATABASE_LAMBDA_SG_ID" \ - "$DATABASE_ENGINE" + "$DATABASE_ENGINE" \ + "${EXISTING_DATABASE_PORT:-}" \ + "${EXISTING_DATABASE_CREATE_APP_USER:-true}" \ + "${EXISTING_DATABASE_CREATE_SCHEMA:-true}" fi if [[ "$TASK_BUILD_DEPLOY" == "true" || "$TASK_BACKUP_SECRETS" == "true" ]]; then diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index dc57999..43555cf 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -126,6 +126,33 @@ Parameters: Type: String Default: "" + ExistingDatabasePort: + Description: > + TCP port for ExistingDatabaseHost (DATABASE_PORT). Leave empty to use engine default (3306 MySQL, 5432 PostgreSQL). + Set for external providers with a non-standard port (e.g. TiDB Cloud 4000). Ignored when creating new RDS in stack. + Type: String + Default: "" + + ExistingDatabaseCreateAppUser: + Description: > + When ExistingDatabaseHost is set: "true" (default) creates a dedicated app user and grants schema access. + Set "false" for managed DBs that disallow CREATE USER; admin password is copied to the app DB secret and DATABASE_USER is the admin user. + Type: String + Default: "true" + AllowedValues: + - "true" + - "false" + + ExistingDatabaseCreateSchema: + Description: > + When ExistingDatabaseHost is set: "true" (default) runs CREATE DATABASE IF NOT EXISTS for DatabaseSchema. + Set "false" when the database already exists. Ignored when creating new RDS in stack. + Type: String + Default: "true" + AllowedValues: + - "true" + - "false" + DatabaseSchema: Description: > Database/schema name for MySQL or PostgreSQL. Each app sharing an RDS instance @@ -286,6 +313,9 @@ Conditions: UseExistingDatabasePrivateVpc: !And - !Condition UseExistingDatabase - !Equals [!Ref ExistingDatabaseNetworkMode, "private"] + UseExistingDatabaseWithCustomPort: !And + - !Condition UseExistingDatabase + - !Not [!Equals [!Ref ExistingDatabasePort, ""]] HasTokenEncryptionKeyOverride: !Not [!Equals [!Ref TokenEncryptionKeyOverride, ""]] HasExistingTokenEncryptionKeySecretArn: !Not [!Equals [!Ref ExistingTokenEncryptionKeySecretArn, ""]] HasAppDbPasswordOverride: !Not [!Equals [!Ref AppDbPasswordOverride, ""]] @@ -573,7 +603,9 @@ Resources: - Version: "2012-10-17" Statement: - Effect: Allow - Action: secretsmanager:GetSecretValue + Action: + - secretsmanager:GetSecretValue + - secretsmanager:PutSecretValue Resource: - !If - HasAppDbPasswordOverride @@ -626,6 +658,21 @@ Resources: - !Ref AppDbCredentialsSecretProvided - !Ref AppDbCredentialsSecretGenerated DatabaseEngine: !Ref DatabaseEngine + Port: !If + - UseExistingDatabaseWithCustomPort + - !Ref ExistingDatabasePort + - !If + - IsMysqlEngine + - "3306" + - "5432" + CreateAppUser: !If + - UseExistingDatabase + - !Ref ExistingDatabaseCreateAppUser + - "true" + CreateSchema: !If + - UseExistingDatabase + - !Ref ExistingDatabaseCreateSchema + - "true" # ============================================================ # Lambda Function @@ -688,9 +735,12 @@ Resources: SLACK_CLIENT_ID: !Ref SlackClientID DATABASE_BACKEND: !Ref DatabaseEngine DATABASE_PORT: !If - - IsMysqlEngine - - "3306" - - "5432" + - UseExistingDatabaseWithCustomPort + - !Ref ExistingDatabasePort + - !If + - IsMysqlEngine + - "3306" + - "5432" DATABASE_HOST: !If - UseExistingDatabase - !Ref ExistingDatabaseHost diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf index 8a34047..950da62 100644 --- a/infra/gcp/main.tf +++ b/infra/gcp/main.tf @@ -263,6 +263,14 @@ resource "google_cloud_run_v2_service" "syncbot" { location = var.region ingress = "INGRESS_TRAFFIC_ALL" + labels = merge( + {}, + var.use_existing_database ? { + syncbot_existing_db_create_app_user = var.existing_db_create_app_user ? "true" : "false" + syncbot_existing_db_create_schema = var.existing_db_create_schema ? "true" : "false" + } : {}, + ) + template { service_account = google_service_account.cloud_run.email diff --git a/infra/gcp/scripts/deploy.sh b/infra/gcp/scripts/deploy.sh index 90af4ec..3ed82ad 100755 --- a/infra/gcp/scripts/deploy.sh +++ b/infra/gcp/scripts/deploy.sh @@ -569,6 +569,9 @@ fi if [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then echo echo "=== Configuration ===" +DB_PORT="3306" +EXISTING_DB_CREATE_APP_USER="true" +EXISTING_DB_CREATE_SCHEMA="true" echo "=== Database Source ===" # USE_EXISTING=true: point Terraform at an external DB only (use_existing_database); skip creating Cloud SQL. # USE_EXISTING_DEFAULT: y/n default for the prompt when redeploying without a managed instance for this stage. @@ -611,6 +614,32 @@ if [[ "$USE_EXISTING" == "true" ]]; then echo "Error: Database user is required when using existing database mode." >&2 exit 1 fi + + echo + echo "=== Existing database port and setup (operator / external DB) ===" + DEFAULT_DB_PORT="3306" + if [[ -n "$EXISTING_SERVICE_URL" ]]; then + DETECTED_DB_PORT_EARLY="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_PORT")" + [[ -n "$DETECTED_DB_PORT_EARLY" ]] && DEFAULT_DB_PORT="$DETECTED_DB_PORT_EARLY" + fi + DB_PORT="$(prompt_line "Database TCP port (DATABASE_PORT)" "$DEFAULT_DB_PORT")" + if [[ -z "$DB_PORT" ]]; then + echo "Error: Database TCP port is required when using existing database mode." >&2 + exit 1 + fi + + CREATE_APP_DEF="y" + CREATE_SCHEMA_DEF="y" + if prompt_yn "Create dedicated app DB user on the server (CREATE USER / grants)?" "$CREATE_APP_DEF"; then + EXISTING_DB_CREATE_APP_USER="true" + else + EXISTING_DB_CREATE_APP_USER="false" + fi + if prompt_yn "Run CREATE DATABASE IF NOT EXISTS for DatabaseSchema (you or a hook)?" "$CREATE_SCHEMA_DEF"; then + EXISTING_DB_CREATE_SCHEMA="true" + else + EXISTING_DB_CREATE_SCHEMA="false" + fi fi DETECTED_CLOUD_IMAGE="" @@ -652,7 +681,6 @@ ENABLE_DB_RESET_VAR="" DB_TLS_VAR="" DB_SSL_CA_VAR="" DB_BACKEND="mysql" -DB_PORT="3306" if [[ -n "$EXISTING_SERVICE_URL" ]]; then DETECTED_RA="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "REQUIRE_ADMIN")" [[ -n "$DETECTED_RA" ]] && REQUIRE_ADMIN_DEFAULT="$DETECTED_RA" @@ -679,8 +707,10 @@ if [[ -n "$EXISTING_SERVICE_URL" ]]; then DB_SSL_CA_VAR="${DETECTED_DB_SSL_CA:-}" DETECTED_DB_BACKEND="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_BACKEND")" [[ -n "$DETECTED_DB_BACKEND" ]] && DB_BACKEND="$DETECTED_DB_BACKEND" - DETECTED_DB_PORT="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_PORT")" - [[ -n "$DETECTED_DB_PORT" ]] && DB_PORT="$DETECTED_DB_PORT" + if [[ "$USE_EXISTING" != "true" ]]; then + DETECTED_DB_PORT="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_PORT")" + [[ -n "$DETECTED_DB_PORT" ]] && DB_PORT="$DETECTED_DB_PORT" + fi fi echo @@ -726,6 +756,8 @@ if [[ "$USE_EXISTING" == "true" ]]; then VARS+=("-var=existing_db_host=$EXISTING_HOST") VARS+=("-var=existing_db_schema=$EXISTING_SCHEMA") VARS+=("-var=existing_db_user=$EXISTING_USER") + VARS+=("-var=existing_db_create_app_user=$EXISTING_DB_CREATE_APP_USER") + VARS+=("-var=existing_db_create_schema=$EXISTING_DB_CREATE_SCHEMA") else VARS+=("-var=use_existing_database=false") fi diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf index 9237f90..eca6786 100644 --- a/infra/gcp/variables.tf +++ b/infra/gcp/variables.tf @@ -48,6 +48,18 @@ variable "existing_db_user" { description = "Existing MySQL user (when use_existing_database = true)" } +variable "existing_db_create_app_user" { + type = bool + default = true + description = "When use_existing_database: operator note — whether a dedicated app DB user exists (no Cloud SQL user resource; app uses existing_db_user / secret)." +} + +variable "existing_db_create_schema" { + type = bool + default = true + description = "When use_existing_database: operator note — whether DatabaseSchema was created manually (Terraform does not create schema for existing host)." +} + # --------------------------------------------------------------------------- # Cloud Run # --------------------------------------------------------------------------- diff --git a/tests/test_db_setup.py b/tests/test_db_setup.py index 9bf794f..2db15a2 100644 --- a/tests/test_db_setup.py +++ b/tests/test_db_setup.py @@ -92,3 +92,81 @@ def test_handler_calls_postgresql_setup(cfn_create_event): mock_pg.assert_called_once() mock_mysql.assert_not_called() assert mock_send.call_args[0][2] == "SUCCESS" + + +def test_handler_custom_port_passed_to_tcp_and_mysql(cfn_create_event): + cfn_create_event["ResourceProperties"]["Port"] = "4000" + handler = _fresh_handler() + with ( + patch.object(handler, "send"), + patch.object(handler, "get_secret_value", return_value="apppw"), + patch.object(handler, "_assert_tcp_reachable") as mock_tcp, + patch.object(handler, "setup_database_mysql") as mock_mysql, + patch.object(handler, "setup_database_postgresql"), + ): + handler._handler_impl(cfn_create_event, MagicMock()) + mock_tcp.assert_called_once_with("db.example.com", 4000) + assert mock_mysql.call_args[1]["port"] == 4000 + + +def test_handler_mysql_create_schema_false(cfn_create_event): + cfn_create_event["ResourceProperties"]["CreateSchema"] = "false" + handler = _fresh_handler() + with ( + patch.object(handler, "send"), + patch.object(handler, "get_secret_value", return_value="apppw"), + patch.object(handler, "_assert_tcp_reachable"), + patch.object(handler, "setup_database_mysql") as mock_mysql, + patch.object(handler, "setup_database_postgresql"), + ): + handler._handler_impl(cfn_create_event, MagicMock()) + assert mock_mysql.call_args[1]["create_schema"] is False + + +def test_handler_put_secret_when_no_app_user(cfn_create_event): + cfn_create_event["ResourceProperties"]["CreateAppUser"] = "false" + handler = _fresh_handler() + with ( + patch.object(handler, "send") as mock_send, + patch.object(handler, "get_secret_value") as mock_get, + patch.object(handler, "_assert_tcp_reachable"), + patch.object(handler, "setup_database_mysql") as mock_mysql, + patch.object(handler, "setup_database_postgresql") as mock_pg, + patch.object(handler, "put_secret_string") as mock_put, + ): + handler._handler_impl(cfn_create_event, MagicMock()) + mock_get.assert_not_called() + mock_mysql.assert_called_once() + assert mock_mysql.call_args[1]["create_app_user"] is False + mock_pg.assert_not_called() + mock_put.assert_called_once_with( + cfn_create_event["ResourceProperties"]["SecretArn"], + "adminpw", + ) + assert mock_send.call_args[0][2] == "SUCCESS" + assert mock_send.call_args[0][3] == {"Username": "admin"} + + +def test_handler_skip_both_no_db_client(cfn_create_event): + cfn_create_event["ResourceProperties"]["CreateAppUser"] = "false" + cfn_create_event["ResourceProperties"]["CreateSchema"] = "false" + handler = _fresh_handler() + with ( + patch.object(handler, "send") as mock_send, + patch.object(handler, "get_secret_value") as mock_get, + patch.object(handler, "_assert_tcp_reachable") as mock_tcp, + patch.object(handler, "setup_database_mysql") as mock_mysql, + patch.object(handler, "setup_database_postgresql") as mock_pg, + patch.object(handler, "put_secret_string") as mock_put, + ): + handler._handler_impl(cfn_create_event, MagicMock()) + mock_get.assert_not_called() + mock_mysql.assert_not_called() + mock_pg.assert_not_called() + mock_tcp.assert_called_once_with("db.example.com", 3306) + mock_put.assert_called_once_with( + cfn_create_event["ResourceProperties"]["SecretArn"], + "adminpw", + ) + assert mock_send.call_args[0][2] == "SUCCESS" + assert mock_send.call_args[0][3] == {"Username": "admin"} From d6f856fb6b8047e14028240430811a6b1f2449c5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Mar 2026 19:08:51 +0000 Subject: [PATCH 03/19] Bump actions/checkout from 4 to 6 Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 6. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4...v6) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/ci.yml | 6 +++--- .github/workflows/deploy-aws.yml | 2 +- .github/workflows/deploy-gcp.yml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9824f5a..7867fdf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,7 +16,7 @@ jobs: permissions: contents: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: ref: ${{ github.head_ref || github.ref_name }} repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} @@ -51,7 +51,7 @@ jobs: sam-lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - uses: aws-actions/setup-sam@v2 with: use-installer: true @@ -63,7 +63,7 @@ jobs: test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - uses: actions/setup-python@v5 with: python-version: "3.12" diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index fab46d5..187373b 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -23,7 +23,7 @@ jobs: if: vars.DEPLOY_TARGET != 'gcp' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - uses: actions/setup-python@v5 with: python-version: '3.12' diff --git a/.github/workflows/deploy-gcp.yml b/.github/workflows/deploy-gcp.yml index 0415265..172fc48 100644 --- a/.github/workflows/deploy-gcp.yml +++ b/.github/workflows/deploy-gcp.yml @@ -25,7 +25,7 @@ jobs: if: vars.DEPLOY_TARGET == 'gcp' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 # Workload Identity Federation: authenticate without a key file # - uses: google-github-actions/auth@v2 From 3149dd27a9138f184d8cfd1e9e3827027272b4e1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Mar 2026 19:08:51 +0000 Subject: [PATCH 04/19] Bump actions/download-artifact from 4 to 8 --- .github/workflows/deploy-aws.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index 187373b..e3fdb51 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -67,7 +67,7 @@ jobs: aws-region: ${{ vars.AWS_REGION }} - name: Download artifact - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: name: build-artifact path: './.aws-sam/build' @@ -129,7 +129,7 @@ jobs: aws-region: ${{ vars.AWS_REGION }} - name: Download artifact - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: name: build-artifact path: './.aws-sam/build' From f98d2eb409f7a05a16c9c64c8f4069136bf38203 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 27 Mar 2026 19:23:02 -0500 Subject: [PATCH 05/19] Bump actions/setup-python from 5 to 6 --- .github/workflows/ci.yml | 4 ++-- .github/workflows/deploy-aws.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7867fdf..8415e13 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,7 +21,7 @@ jobs: ref: ${{ github.head_ref || github.ref_name }} repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} fetch-depth: 0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: "3.12" - name: Install Poetry and export plugin @@ -64,7 +64,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: "3.12" - name: Install dependencies diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index e3fdb51..6ac7490 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: '3.12' - uses: aws-actions/setup-sam@v2 From 40f95ebb5f3bc0aabfde0d076d4ef7b6f8e133b0 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 27 Mar 2026 19:23:48 -0500 Subject: [PATCH 06/19] Bump actions/upload-artifact from 4 to 7 --- .github/workflows/deploy-aws.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index 6ac7490..84b470a 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -50,7 +50,7 @@ jobs: pip-audit -r infra/aws/db_setup/requirements.txt - name: Publish artifact - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v7 with: name: build-artifact path: './.aws-sam/build' From 4835cb5b9f91df205f991bf5e24f193957d6b8c4 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 27 Mar 2026 19:24:53 -0500 Subject: [PATCH 07/19] Bump aws-actions/configure-aws-credentials from 4 to 6 --- .github/workflows/deploy-aws.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index 84b470a..a393136 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -36,7 +36,7 @@ jobs: sam validate -t infra/aws/template.yaml --lint sam validate -t infra/aws/template.bootstrap.yaml --lint - - uses: aws-actions/configure-aws-credentials@v4 + - uses: aws-actions/configure-aws-credentials@v6 with: role-to-assume: ${{ vars.AWS_ROLE_TO_ASSUME }} aws-region: ${{ vars.AWS_REGION }} @@ -61,7 +61,7 @@ jobs: environment: test needs: sam-build steps: - - uses: aws-actions/configure-aws-credentials@v4 + - uses: aws-actions/configure-aws-credentials@v6 with: role-to-assume: ${{ vars.AWS_ROLE_TO_ASSUME }} aws-region: ${{ vars.AWS_REGION }} @@ -123,7 +123,7 @@ jobs: environment: prod needs: sam-build steps: - - uses: aws-actions/configure-aws-credentials@v4 + - uses: aws-actions/configure-aws-credentials@v6 with: role-to-assume: ${{ vars.AWS_ROLE_TO_ASSUME }} aws-region: ${{ vars.AWS_REGION }} From 6e7d88176a90c88340316c8612c9a0e82b38f4a1 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 27 Mar 2026 19:37:14 -0500 Subject: [PATCH 08/19] Bump python from 3.12-slim to 3.14-slim --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index dc365f2..3baf6ba 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.12-slim +FROM python:3.14-slim WORKDIR /app From 2ec53b718a0fa598a162bcbd27ad6f5a9852db16 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 27 Mar 2026 22:07:59 -0500 Subject: [PATCH 09/19] Update Python code to work with 3.14. --- .github/workflows/ci.yml | 4 +- .github/workflows/deploy-aws.yml | 2 +- .gitignore | 1 + CHANGELOG.md | 12 ++ docs/DEPLOYMENT.md | 2 +- docs/DEVELOPMENT.md | 2 +- docs/INFRA_CONTRACT.md | 2 +- infra/aws/db_setup/handler.py | 19 +-- infra/aws/template.yaml | 4 +- infra/gcp/tests/test_terraform_validate.py | 3 +- poetry.lock | 10 +- pyproject.toml | 4 +- syncbot/app.py | 36 ++--- syncbot/builders/user_mapping.py | 4 +- syncbot/constants.py | 1 + syncbot/db/__init__.py | 30 +--- syncbot/db/alembic/versions/001_baseline.py | 1 + syncbot/federation/api.py | 5 +- syncbot/federation/core.py | 17 +- syncbot/handlers/_common.py | 4 +- syncbot/handlers/channel_sync.py | 12 +- syncbot/handlers/export_import.py | 8 +- syncbot/handlers/federation_cmds.py | 3 +- syncbot/handlers/group_manage.py | 5 +- syncbot/handlers/groups.py | 14 +- syncbot/handlers/messages.py | 5 +- syncbot/handlers/sync.py | 10 +- syncbot/handlers/users.py | 2 +- syncbot/helpers/core.py | 2 +- syncbot/helpers/export_import.py | 165 ++++++++++++-------- syncbot/helpers/files.py | 39 +++-- syncbot/helpers/notifications.py | 2 +- syncbot/helpers/oauth.py | 6 +- syncbot/helpers/refresh.py | 3 +- syncbot/helpers/user_matching.py | 6 +- syncbot/helpers/workspace.py | 3 +- syncbot/logger.py | 8 +- syncbot/requirements.txt | 38 ++--- syncbot/slack/deferred_ack_views.py | 14 +- tests/test_federation_reactions.py | 8 +- tests/test_groups_handlers.py | 6 +- tests/test_helpers.py | 8 +- tests/test_message_event_dedup.py | 5 +- tests/test_split_message_reactions.py | 4 +- 44 files changed, 283 insertions(+), 256 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8415e13..ae46bd6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -23,7 +23,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v6 with: - python-version: "3.12" + python-version: "3.14" - name: Install Poetry and export plugin run: | python -m pip install --upgrade pip @@ -66,7 +66,7 @@ jobs: - uses: actions/checkout@v6 - uses: actions/setup-python@v6 with: - python-version: "3.12" + python-version: "3.14" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index a393136..51c5484 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@v6 - uses: actions/setup-python@v6 with: - python-version: '3.12' + python-version: '3.14' - uses: aws-actions/setup-sam@v2 with: use-installer: true diff --git a/.gitignore b/.gitignore index 77c5db2..f538a04 100644 --- a/.gitignore +++ b/.gitignore @@ -124,6 +124,7 @@ celerybeat.pid # Environments .env .venv +.venv-py314 env/ venv/ ENV/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 0068f3f..c7068bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,18 @@ All notable changes to this project are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +### Changed + +- Bumped GitHub Actions: `actions/checkout` v6, `actions/setup-python` v6, `actions/upload-artifact` v7, `actions/download-artifact` v8, `aws-actions/configure-aws-credentials` v6 +- Python runtime baseline from 3.12 to 3.14 (Docker base image, AWS Lambda, CI) +- Ruff `target-version` set to Python 3.14 (import order and annotation cleanups applied repo-wide) + +### Fixed + +- Replaced deprecated `datetime.utcnow()` with `datetime.now(UTC)` in backup/migration export helpers + ## [1.0.1] - 2026-03-26 ### Changed diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index cf02d2e..d5a0d7b 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -2,7 +2,7 @@ This guide explains **what the guided deploy scripts do**, how to perform the **same steps manually** on **AWS** or **GCP**, and how **GitHub Actions** fits in. For the runtime environment variables the app expects in any cloud, see [INFRA_CONTRACT.md](INFRA_CONTRACT.md). -**Runtime baseline:** Python 3.12 — keep `pyproject.toml`, `syncbot/requirements.txt`, Lambda/Cloud Run runtimes, and CI aligned. +**Runtime baseline:** Python 3.14 — keep `pyproject.toml`, `syncbot/requirements.txt`, Lambda/Cloud Run runtimes, and CI aligned. --- diff --git a/docs/DEVELOPMENT.md b/docs/DEVELOPMENT.md index b2affa4..8d3105f 100644 --- a/docs/DEVELOPMENT.md +++ b/docs/DEVELOPMENT.md @@ -37,7 +37,7 @@ App on port **3000**; restart the `app` service after code changes. ### Native Python -**Needs:** Python 3.12+, Poetry. Run MySQL locally (e.g. `docker run ... mysql:8`) or SQLite. See [`.env.example`](../.env.example) and [INFRA_CONTRACT.md](INFRA_CONTRACT.md). +**Needs:** Python 3.14+, Poetry. Run MySQL locally (e.g. `docker run ... mysql:8`) or SQLite. See [`.env.example`](../.env.example) and [INFRA_CONTRACT.md](INFRA_CONTRACT.md). ## Configuration reference diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index 216dabe..6c6df23 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -12,7 +12,7 @@ The application reads configuration from environment variables. Providers must i ## Toolchain Baseline -- Runtime baseline: **Python 3.12**. +- Runtime baseline: **Python 3.14**. - Keep runtime/tooling aligned across: - Lambda/Cloud Run runtime configuration - CI Python version diff --git a/infra/aws/db_setup/handler.py b/infra/aws/db_setup/handler.py index f72a328..042c953 100644 --- a/infra/aws/db_setup/handler.py +++ b/infra/aws/db_setup/handler.py @@ -6,11 +6,11 @@ - admin password fetched from an admin secret ARN (new-RDS mode). """ +import base64 import json import re -import base64 -import time import socket +import time import boto3 import psycopg2 @@ -66,9 +66,7 @@ def handler(event, context): try: send(event, context, "FAILED", reason=f"Unhandled error: {e}") except Exception as send_err: - raise RuntimeError( - f"Unhandled error in handler: {e}; failed to notify CloudFormation: {send_err}" - ) from e + raise RuntimeError(f"Unhandled error in handler: {e}; failed to notify CloudFormation: {send_err}") from e raise @@ -169,9 +167,7 @@ def _assert_tcp_reachable(host: str, port: int) -> None: time.sleep(DB_CONNECT_RETRY_SECONDS) finally: sock.close() - raise RuntimeError( - f"Cannot reach {host}:{port} over TCP after {DB_CONNECT_ATTEMPTS} attempts: {last_exc}" - ) + raise RuntimeError(f"Cannot reach {host}:{port} over TCP after {DB_CONNECT_ATTEMPTS} attempts: {last_exc}") def get_secret_value(secret_arn: str, json_key: str | None = None) -> str: @@ -228,9 +224,7 @@ def setup_database_mysql( last_exc = exc time.sleep(DB_CONNECT_RETRY_SECONDS) if conn is None: - raise RuntimeError( - f"MySQL connect failed after {DB_CONNECT_ATTEMPTS} attempts: {last_exc}" - ) + raise RuntimeError(f"MySQL connect failed after {DB_CONNECT_ATTEMPTS} attempts: {last_exc}") try: with conn.cursor() as cur: cur.execute(f"CREATE DATABASE IF NOT EXISTS `{safe_schema}`") @@ -328,6 +322,5 @@ def setup_database_postgresql( last_exc = exc time.sleep(db_connect_retry_seconds) raise RuntimeError( - f"Failed connecting to newly created database '{schema}' after " - f"{max_db_connect_attempts} attempts: {last_exc}" + f"Failed connecting to newly created database '{schema}' after {max_db_connect_attempts} attempts: {last_exc}" ) diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index dc57999..bdee1e3 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -563,7 +563,7 @@ Resources: Properties: CodeUri: db_setup/ Handler: handler.handler - Runtime: python3.12 + Runtime: python3.14 Architectures: - x86_64 Timeout: 60 @@ -641,7 +641,7 @@ Resources: Properties: CodeUri: ../../syncbot/ Handler: app.handler - Runtime: python3.12 + Runtime: python3.14 Architectures: - x86_64 Timeout: 30 diff --git a/infra/gcp/tests/test_terraform_validate.py b/infra/gcp/tests/test_terraform_validate.py index 515f01a..8a02516 100644 --- a/infra/gcp/tests/test_terraform_validate.py +++ b/infra/gcp/tests/test_terraform_validate.py @@ -39,8 +39,7 @@ def test_terraform_validates() -> None: ) if init.returncode != 0: pytest.skip( - "terraform init failed (terraform missing or no network for providers?):\n" - f"{init.stdout}\n{init.stderr}" + f"terraform init failed (terraform missing or no network for providers?):\n{init.stdout}\n{init.stderr}" ) validate = subprocess.run( [tf, "validate"], diff --git a/poetry.lock b/poetry.lock index 7ea3e3b..91a674b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.3.2 and should not be changed by hand. [[package]] name = "alembic" @@ -857,10 +857,10 @@ files = [ ] [package.dependencies] -botocore = ">=1.37.4,<2.0a.0" +botocore = ">=1.37.4,<2.0a0" [package.extras] -crt = ["botocore[crt] (>=1.37.4,<2.0a.0)"] +crt = ["botocore[crt] (>=1.37.4,<2.0a0)"] [[package]] name = "six" @@ -1038,5 +1038,5 @@ zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""] [metadata] lock-version = "2.1" -python-versions = "^3.12" -content-hash = "1a838a06c2d452cf5cd57fa44cb4c195fc2fa1e0c75bce56b8b53312b2366ebb" +python-versions = "^3.14" +content-hash = "8214131496230362e52452512cdb3392d665fcf32e33914a9b7724e5df17309a" diff --git a/pyproject.toml b/pyproject.toml index aef914d..fbe8459 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ readme = "README.md" poetry-plugin-export = ">=1.8" [tool.poetry.dependencies] -python = "^3.12" +python = "^3.14" alembic = "^1.13" python-dotenv = "^1.2.0" slack-bolt = "^1.27.0" @@ -29,7 +29,7 @@ testpaths = ["tests", "infra/aws/tests", "infra/gcp/tests"] pythonpath = ["syncbot", "infra/aws/db_setup"] [tool.ruff] -target-version = "py312" +target-version = "py314" line-length = 120 src = ["syncbot", "tests", "infra/aws/tests", "infra/gcp/tests"] diff --git a/syncbot/app.py b/syncbot/app.py index b0d234a..c007f1f 100644 --- a/syncbot/app.py +++ b/syncbot/app.py @@ -56,10 +56,17 @@ ) from routing import MAIN_MAPPER, VIEW_ACK_MAPPER, VIEW_MAPPER -_SENSITIVE_KEYS = frozenset({ - "token", "bot_token", "access_token", "shared_secret", - "public_key", "private_key", "private_key_encrypted", -}) +_SENSITIVE_KEYS = frozenset( + { + "token", + "bot_token", + "access_token", + "shared_secret", + "public_key", + "private_key", + "private_key_encrypted", + } +) def _redact_sensitive(obj, _depth=0): @@ -67,10 +74,7 @@ def _redact_sensitive(obj, _depth=0): if _depth > 10: return obj if isinstance(obj, dict): - return { - k: "[REDACTED]" if k in _SENSITIVE_KEYS else _redact_sensitive(v, _depth + 1) - for k, v in obj.items() - } + return {k: "[REDACTED]" if k in _SENSITIVE_KEYS else _redact_sensitive(v, _depth + 1) for k, v in obj.items()} if isinstance(obj, list): return [_redact_sensitive(v, _depth + 1) for v in obj] return obj @@ -98,7 +102,7 @@ def _capture_slack_retry_num(req, resp, next): try: v = vals[0] if isinstance(vals, (list, tuple)) else vals req.context["slack_retry_num"] = int(v) - except (ValueError, TypeError, IndexError): + except ValueError, TypeError, IndexError: pass return next() @@ -224,11 +228,7 @@ def main_response(body: dict, logger, client, ack, context: dict) -> None: ) raise else: - if not ( - request_type == "view_submission" - and request_id in VIEW_ACK_MAPPER - and request_id not in VIEW_MAPPER - ): + if not (request_type == "view_submission" and request_id in VIEW_ACK_MAPPER and request_id not in VIEW_MAPPER): _logger.error( "no_handler", extra={ @@ -364,7 +364,7 @@ def do_POST(self) -> None: return try: content_len = int(self.headers.get("Content-Length") or 0) - except (TypeError, ValueError): + except TypeError, ValueError: content_len = 0 query = self.path.partition("?")[2] request_body = self.rfile.read(content_len).decode("utf-8") @@ -382,13 +382,11 @@ def _handle_federation(self, method: str) -> None: int(self.headers.get("Content-Length", 0)), _fed_max_body, ) - except (TypeError, ValueError): + except TypeError, ValueError: content_len = 0 body_str = self.rfile.read(content_len).decode() if content_len else "" headers = {k: v for k, v in self.headers.items()} - status, resp = dispatch_federation_request( - method, self._path_no_query(), body_str, headers - ) + status, resp = dispatch_federation_request(method, self._path_no_query(), body_str, headers) self._send_raw( status, {"Content-Type": ["application/json"]}, diff --git a/syncbot/builders/user_mapping.py b/syncbot/builders/user_mapping.py index f45ecfc..677f9d1 100644 --- a/syncbot/builders/user_mapping.py +++ b/syncbot/builders/user_mapping.py @@ -257,14 +257,14 @@ def build_user_mapping_edit_modal( mapping_id_str = action_id.replace(actions.CONFIG_USER_MAPPING_EDIT + "_", "") try: mapping_id = int(mapping_id_str) - except (TypeError, ValueError): + except TypeError, ValueError: _logger.warning(f"build_user_mapping_edit_modal: invalid mapping_id: {mapping_id_str}") return raw_group = helpers.safe_get(body, "actions", 0, "value") or "0" try: group_id = int(raw_group) - except (TypeError, ValueError): + except TypeError, ValueError: group_id = 0 mapping = DbManager.get_record(UserMapping, id=mapping_id) diff --git a/syncbot/constants.py b/syncbot/constants.py index 5c670a0..31cfaea 100644 --- a/syncbot/constants.py +++ b/syncbot/constants.py @@ -101,6 +101,7 @@ def _has_real_bot_token() -> bool: # handles any requests. Fails fast in production; warns in local dev. # --------------------------------------------------------------------------- + def get_database_backend() -> str: """Return ``postgresql``, ``mysql``, or ``sqlite``. diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py index 63fb4df..69cafef 100644 --- a/syncbot/db/__init__.py +++ b/syncbot/db/__init__.py @@ -48,9 +48,7 @@ class DatabaseField: # Repo root locally; Lambda deployment root (/var/task) in AWS — used for relative SQLite paths. _syncbot_dir = Path(__file__).resolve().parent.parent -_PROJECT_ROOT = ( - _syncbot_dir if os.environ.get("AWS_LAMBDA_FUNCTION_NAME") else _syncbot_dir.parent -) +_PROJECT_ROOT = _syncbot_dir if os.environ.get("AWS_LAMBDA_FUNCTION_NAME") else _syncbot_dir.parent def _mysql_port() -> str: @@ -76,7 +74,7 @@ def _build_mysql_url(include_schema: bool = False) -> tuple[str, dict]: if ca_path: try: ssl_ctx = ssl.create_default_context(cafile=ca_path) - except (OSError, ssl.SSLError): + except OSError, ssl.SSLError: ssl_ctx = ssl.create_default_context() else: ssl_ctx = ssl.create_default_context() @@ -114,7 +112,7 @@ def _network_sql_connect_args_from_url() -> dict: if ca_path: try: ssl_ctx = ssl.create_default_context(cafile=ca_path) - except (OSError, ssl.SSLError): + except OSError, ssl.SSLError: ssl_ctx = ssl.create_default_context() else: ssl_ctx = ssl.create_default_context() @@ -202,6 +200,7 @@ def _ensure_database_exists() -> None: def _alembic_config(): """Build Alembic config with script_location set to syncbot/db/alembic.""" from alembic.config import Config # pyright: ignore[reportMissingImports] + config = Config() config.set_main_option("script_location", str(_ALEMBIC_SCRIPT_LOCATION)) return config @@ -253,22 +252,14 @@ def _drop_all_tables_dialect_aware(engine) -> None: if engine.dialect.name == "postgresql": with engine.begin() as conn: result = conn.execute( - text( - "SELECT tablename FROM pg_tables " - "WHERE schemaname = 'public' ORDER BY tablename" - ) + text("SELECT tablename FROM pg_tables WHERE schemaname = 'public' ORDER BY tablename") ) for (table_name,) in result: conn.execute(text(f'DROP TABLE IF EXISTS "{table_name}" CASCADE')) return with engine.begin() as conn: conn.execute(text("SET FOREIGN_KEY_CHECKS = 0")) - result = conn.execute( - text( - "SELECT TABLE_NAME FROM information_schema.TABLES " - "WHERE TABLE_SCHEMA = DATABASE()" - ) - ) + result = conn.execute(text("SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_SCHEMA = DATABASE()")) for (table_name,) in result: conn.execute(text(f"DROP TABLE IF EXISTS `{table_name}`")) conn.execute(text("SET FOREIGN_KEY_CHECKS = 1")) @@ -283,9 +274,7 @@ def drop_and_init_db() -> None: """ global GLOBAL_ENGINE, GLOBAL_SESSION, GLOBAL_SCHEMA - _logger.critical( - "DB RESET: emptying schema and reinitializing via Alembic. All data will be lost." - ) + _logger.critical("DB RESET: emptying schema and reinitializing via Alembic. All data will be lost.") db_url, connect_args = _get_database_url_and_args() engine = create_engine( @@ -316,9 +305,7 @@ def get_engine(echo: bool = False, schema: str = None): backend = constants.get_database_backend() target_schema = ( - (schema or os.environ.get(constants.DATABASE_SCHEMA, "syncbot")) - if backend in ("mysql", "postgresql") - else "" + (schema or os.environ.get(constants.DATABASE_SCHEMA, "syncbot")) if backend in ("mysql", "postgresql") else "" ) cache_key = target_schema or backend @@ -565,4 +552,3 @@ def delete_records(cls: T, filters, schema=None): raise finally: close_session(session) - diff --git a/syncbot/db/alembic/versions/001_baseline.py b/syncbot/db/alembic/versions/001_baseline.py index eeec36e..69f853f 100644 --- a/syncbot/db/alembic/versions/001_baseline.py +++ b/syncbot/db/alembic/versions/001_baseline.py @@ -5,6 +5,7 @@ Create Date: Baseline from ORM models + OAuth tables """ + from collections.abc import Sequence import sqlalchemy as sa diff --git a/syncbot/federation/api.py b/syncbot/federation/api.py index 91fd017..d63b232 100644 --- a/syncbot/federation/api.py +++ b/syncbot/federation/api.py @@ -45,6 +45,7 @@ def _find_post_records(post_id: str, sync_channel_id: int) -> list[schemas.PostM [schemas.PostMeta.post_id == pid, schemas.PostMeta.sync_channel_id == sync_channel_id], ) + _PAIRING_CODE_RE = re.compile(r"^FED-[0-9A-Fa-f]{8}$") _FIELD_MAX_LENGTHS = { @@ -83,7 +84,9 @@ def _validate_fields(body: dict, required: list[str], extras: list[str] | None = return None -def _pick_user_mapping_for_federated_target(source_user_id: str, target_workspace_id: int) -> schemas.UserMapping | None: +def _pick_user_mapping_for_federated_target( + source_user_id: str, target_workspace_id: int +) -> schemas.UserMapping | None: maps = DbManager.find_records( schemas.UserMapping, [ diff --git a/syncbot/federation/core.py b/syncbot/federation/core.py index 99800f4..a5bb800 100644 --- a/syncbot/federation/core.py +++ b/syncbot/federation/core.py @@ -98,12 +98,8 @@ def get_or_create_instance_keypair(): return private_key, existing[0].public_key private_key = Ed25519PrivateKey.generate() - public_pem = private_key.public_key().public_bytes( - Encoding.PEM, PublicFormat.SubjectPublicKeyInfo - ).decode() - private_pem = private_key.private_bytes( - Encoding.PEM, PrivateFormat.PKCS8, NoEncryption() - ).decode() + public_pem = private_key.public_key().public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo).decode() + private_pem = private_key.private_bytes(Encoding.PEM, PrivateFormat.PKCS8, NoEncryption()).decode() record = schemas.InstanceKey( public_key=public_pem, @@ -144,7 +140,7 @@ def federation_verify(body: str, signature_b64: str, timestamp: str, public_key_ """ try: ts_int = int(timestamp) - except (TypeError, ValueError): + except TypeError, ValueError: return False if abs(time.time() - ts_int) > _TIMESTAMP_MAX_AGE: @@ -156,7 +152,7 @@ def federation_verify(body: str, signature_b64: str, timestamp: str, public_key_ signing_str = f"{timestamp}:{body}".encode() public_key.verify(base64.b64decode(signature_b64), signing_str) return True - except (InvalidSignature, ValueError, TypeError): + except InvalidSignature, ValueError, TypeError: return False @@ -173,7 +169,7 @@ def verify_body(body: str, signature_b64: str, public_key_pem: str) -> bool: public_key = load_pem_public_key(public_key_pem.encode()) public_key.verify(base64.b64decode(signature_b64), body.encode()) return True - except (InvalidSignature, ValueError, TypeError): + except InvalidSignature, ValueError, TypeError: return False @@ -219,6 +215,7 @@ def validate_webhook_url(url: str) -> bool: return False import socket + try: addr_infos = socket.getaddrinfo(hostname, None) for info in addr_infos: @@ -230,7 +227,7 @@ def validate_webhook_url(url: str) -> bool: extra={"url": url, "resolved_ip": str(addr)}, ) return False - except (socket.gaierror, ValueError): + except socket.gaierror, ValueError: return False return True diff --git a/syncbot/handlers/_common.py b/syncbot/handlers/_common.py index bd43ce5..669d736 100644 --- a/syncbot/handlers/_common.py +++ b/syncbot/handlers/_common.py @@ -100,9 +100,7 @@ def _get_selected_conversation_or_option(body: dict, action_id: str) -> str | No """Return selected conversation ID, falling back to selected option value.""" for aid, action_data in _iter_view_state_actions(body): if aid == action_id: - return action_data.get("selected_conversation") or helpers.safe_get( - action_data, "selected_option", "value" - ) + return action_data.get("selected_conversation") or helpers.safe_get(action_data, "selected_option", "value") return None diff --git a/syncbot/handlers/channel_sync.py b/syncbot/handlers/channel_sync.py index d9fe8b4..9a04c65 100644 --- a/syncbot/handlers/channel_sync.py +++ b/syncbot/handlers/channel_sync.py @@ -143,7 +143,7 @@ def handle_publish_channel( raw_group_id = helpers.safe_get(body, "actions", 0, "value") try: group_id = int(raw_group_id) - except (TypeError, ValueError): + except TypeError, ValueError: _logger.warning(f"publish_channel: invalid group_id: {raw_group_id!r}") return @@ -386,7 +386,7 @@ def handle_unpublish_channel( raw_value = helpers.safe_get(body, "actions", 0, "value") try: sync_id = int(raw_value) - except (TypeError, ValueError): + except TypeError, ValueError: _logger.warning(f"Invalid sync_id for unpublish: {raw_value!r}") return @@ -452,7 +452,7 @@ def _toggle_sync_status( try: sync_id = int(sync_id_str) - except (TypeError, ValueError): + except TypeError, ValueError: _logger.warning(f"{log_event}_invalid_id", extra={"action_id": action_id}) return @@ -564,7 +564,7 @@ def handle_stop_sync( try: sync_id = int(sync_id_str) - except (TypeError, ValueError): + except TypeError, ValueError: _logger.warning("stop_sync_invalid_id", extra={"action_id": action_id}) return @@ -708,9 +708,7 @@ def handle_subscribe_channel( channel_options = _get_publishable_channel_options(client, workspace_record.id) if not channel_options: channel_options = [ - orm.SelectorOption( - name="— No Channels available to Sync in this Workspace. —", value="__none__" - ), + orm.SelectorOption(name="— No Channels available to Sync in this Workspace. —", value="__none__"), ] blocks.append( orm.InputBlock( diff --git a/syncbot/handlers/export_import.py b/syncbot/handlers/export_import.py index 45148c0..2b186e3 100644 --- a/syncbot/handlers/export_import.py +++ b/syncbot/handlers/export_import.py @@ -772,7 +772,13 @@ def handle_data_migration_submit_work( if not constants.FEDERATION_ENABLED: return err, data, group_id, team_id_to_workspace_id, workspace_record = _data_migration_prepare(body, client, context) - if err is not None or data is None or group_id is None or team_id_to_workspace_id is None or workspace_record is None: + if ( + err is not None + or data is None + or group_id is None + or team_id_to_workspace_id is None + or workspace_record is None + ): return source = data.get("source_instance") diff --git a/syncbot/handlers/federation_cmds.py b/syncbot/handlers/federation_cmds.py index e0dd65d..b858047 100644 --- a/syncbot/handlers/federation_cmds.py +++ b/syncbot/handlers/federation_cmds.py @@ -336,7 +336,7 @@ def handle_remove_federation_connection( try: member_id = int(member_id_str) - except (TypeError, ValueError): + except TypeError, ValueError: _logger.warning("remove_federation_connection_invalid_id", extra={"action_id": action_id}) return @@ -345,6 +345,7 @@ def handle_remove_federation_connection( return from datetime import UTC, datetime + now = datetime.now(UTC) DbManager.update_records( schemas.WorkspaceGroupMember, diff --git a/syncbot/handlers/group_manage.py b/syncbot/handlers/group_manage.py index 1f279f4..51d486f 100644 --- a/syncbot/handlers/group_manage.py +++ b/syncbot/handlers/group_manage.py @@ -31,7 +31,7 @@ def handle_leave_group( try: group_id = int(group_id_str) - except (TypeError, ValueError): + except TypeError, ValueError: _logger.warning("leave_group_invalid_id", extra={"action_id": action_id}) return @@ -48,7 +48,7 @@ def handle_leave_group( blocks=[ orm.SectionBlock( label=( - f":warning: *Are you sure you want to leave the group \"{group.name}\"?*\n\n" + f':warning: *Are you sure you want to leave the group "{group.name}"?*\n\n' "This will:\n" "\u2022 Stop all channel syncs you have in this group\n" "\u2022 Remove your synced message history from this group\n" @@ -162,6 +162,7 @@ def handle_leave_group_confirm( ) from datetime import UTC, datetime + now = datetime.now(UTC) for member in members: DbManager.update_records( diff --git a/syncbot/handlers/groups.py b/syncbot/handlers/groups.py index c9b2dca..e9450a1 100644 --- a/syncbot/handlers/groups.py +++ b/syncbot/handlers/groups.py @@ -35,8 +35,8 @@ def _generate_invite_code(length: int = 7) -> str: def _activate_group_membership( client: WebClient, - workspace_record: "schemas.Workspace", - group: "schemas.WorkspaceGroup", + workspace_record: schemas.Workspace, + group: schemas.WorkspaceGroup, ) -> None: """Refresh user directories and seed mappings for all existing group members.""" try: @@ -365,7 +365,7 @@ def handle_invite_workspace( raw_group_id = helpers.safe_get(body, "actions", 0, "value") try: group_id = int(raw_group_id) - except (TypeError, ValueError): + except TypeError, ValueError: _logger.warning(f"invite_workspace: invalid group_id: {raw_group_id!r}") return @@ -497,7 +497,7 @@ def handle_invite_workspace_submit( try: target_ws_id = int(selected_ws_id) - except (TypeError, ValueError): + except TypeError, ValueError: return target_ws = helpers.get_workspace_by_id(target_ws_id) @@ -597,7 +597,7 @@ def handle_accept_group_invite( raw_member_id = helpers.safe_get(body, "actions", 0, "value") try: member_id = int(raw_member_id) - except (TypeError, ValueError): + except TypeError, ValueError: _logger.warning(f"accept_group_invite: invalid member_id: {raw_member_id!r}") return @@ -680,7 +680,7 @@ def handle_decline_group_invite( raw_member_id = helpers.safe_get(body, "actions", 0, "value") try: member_id = int(raw_member_id) - except (TypeError, ValueError): + except TypeError, ValueError: _logger.warning(f"decline_group_invite: invalid member_id: {raw_member_id!r}") return @@ -756,7 +756,7 @@ def _update_invite_dms( try: entries = _json.loads(member.dm_messages) - except (ValueError, TypeError): + except ValueError, TypeError: _logger.warning("_update_invite_dms: invalid dm_messages JSON for member %s", member.id) return diff --git a/syncbot/handlers/messages.py b/syncbot/handlers/messages.py index 00aca22..b108e57 100644 --- a/syncbot/handlers/messages.py +++ b/syncbot/handlers/messages.py @@ -23,6 +23,7 @@ def _find_source_workspace_id(records: list[tuple], channel_id: str, ws_index: i return workspace.id return None + _logger = logging.getLogger(__name__) @@ -770,14 +771,14 @@ def _should_skip_slack_event_retry(body: dict, context: dict) -> bool: try: if int(rn) >= 1: return True - except (TypeError, ValueError): + except TypeError, ValueError: pass ra = helpers.safe_get(body, "retry_attempt") if ra is not None: try: if int(ra) >= 1: return True - except (TypeError, ValueError): + except TypeError, ValueError: pass return False diff --git a/syncbot/handlers/sync.py b/syncbot/handlers/sync.py index 4f8878a..332b3f8 100644 --- a/syncbot/handlers/sync.py +++ b/syncbot/handlers/sync.py @@ -35,7 +35,7 @@ def handle_remove_sync( raw_value = helpers.safe_get(body, "actions", 0, "value") try: sync_channel_id = int(raw_value) - except (TypeError, ValueError): + except TypeError, ValueError: _logger.warning(f"Invalid sync_channel_id value: {raw_value!r}") return @@ -119,12 +119,8 @@ def handle_refresh_home( cooldown_sec = getattr(constants, "REFRESH_COOLDOWN_SECONDS", 60) if action == "cooldown" and cached_blocks is not None and remaining is not None: - refresh_idx = helpers.index_of_block_with_action( - cached_blocks, actions.CONFIG_REFRESH_HOME - ) - blocks_with_message = helpers.inject_cooldown_message( - cached_blocks, refresh_idx, remaining - ) + refresh_idx = helpers.index_of_block_with_action(cached_blocks, actions.CONFIG_REFRESH_HOME) + blocks_with_message = helpers.inject_cooldown_message(cached_blocks, refresh_idx, remaining) client.views_publish(user_id=user_id, view={"type": "home", "blocks": blocks_with_message}) return if action == "cached" and cached_blocks is not None: diff --git a/syncbot/handlers/users.py b/syncbot/handlers/users.py index 6e1c74f..e200c98 100644 --- a/syncbot/handlers/users.py +++ b/syncbot/handlers/users.py @@ -138,7 +138,7 @@ def handle_user_mapping_refresh( raw_group = helpers.safe_get(body, "actions", 0, "value") or "0" try: group_id = int(raw_group) - except (TypeError, ValueError): + except TypeError, ValueError: group_id = 0 gid_opt = group_id or None diff --git a/syncbot/helpers/core.py b/syncbot/helpers/core.py index d3e7dc7..a9f2ba1 100644 --- a/syncbot/helpers/core.py +++ b/syncbot/helpers/core.py @@ -24,7 +24,7 @@ def safe_get(data: Any, *keys: Any) -> Any: else: return None return result - except (KeyError, AttributeError, IndexError): + except KeyError, AttributeError, IndexError: return None diff --git a/syncbot/helpers/export_import.py b/syncbot/helpers/export_import.py index 43b5352..e4a696f 100644 --- a/syncbot/helpers/export_import.py +++ b/syncbot/helpers/export_import.py @@ -9,7 +9,7 @@ import json import logging import os -from datetime import datetime +from datetime import UTC, datetime from decimal import Decimal from typing import Any @@ -23,12 +23,14 @@ BACKUP_VERSION = 1 MIGRATION_VERSION = 1 _RAW_BACKUP_TABLES = ("slack_bots", "slack_installations", "slack_oauth_states") -_DATETIME_COLUMNS = frozenset({ - "bot_token_expires_at", - "user_token_expires_at", - "installed_at", - "expire_at", -}) +_DATETIME_COLUMNS = frozenset( + { + "bot_token_expires_at", + "user_token_expires_at", + "installed_at", + "expire_at", + } +) def _dump_raw_table(table_name: str) -> list[dict]: @@ -122,11 +124,12 @@ def _records_to_list(records: list, cls: type) -> list[dict]: # Full-instance backup # --------------------------------------------------------------------------- + def build_full_backup() -> dict: """Build full-instance backup payload (all tables, version, exported_at, encryption_key_hash, hmac).""" payload = { "version": BACKUP_VERSION, - "exported_at": datetime.utcnow().isoformat() + "Z", + "exported_at": datetime.now(UTC).isoformat() + "Z", "encryption_key_hash": _compute_encryption_key_hash(), } tables = [ @@ -243,9 +246,11 @@ def restore_full_backup( # Cache invalidation after restore/import # --------------------------------------------------------------------------- + def invalidate_home_tab_caches_for_team(team_id: str) -> None: """Clear home_tab_hash and home_tab_blocks for a team so next Refresh does full rebuild.""" from helpers._cache import _cache_delete_prefix + _cache_delete_prefix(f"home_tab_hash:{team_id}") _cache_delete_prefix(f"home_tab_blocks:{team_id}") @@ -259,6 +264,7 @@ def invalidate_home_tab_caches_for_all_teams(team_ids: list[str]) -> None: def invalidate_sync_list_cache_for_channel(channel_id: str) -> None: """Clear get_sync_list cache for a channel.""" from helpers._cache import _cache_delete + _cache_delete(f"sync_list:{channel_id}") @@ -266,6 +272,7 @@ def invalidate_sync_list_cache_for_channel(channel_id: str) -> None: # Data migration export (workspace-scoped) # --------------------------------------------------------------------------- + def build_migration_export(workspace_id: int, include_source_instance: bool = True) -> dict: """Build workspace-scoped migration JSON. Optionally sign with Ed25519 and include source_instance.""" workspace = DbManager.get_record(schemas.Workspace, workspace_id) @@ -317,27 +324,33 @@ def build_migration_export(workspace_id: int, include_source_instance: bool = Tr tw = DbManager.get_record(schemas.Workspace, sync.target_workspace_id) if tw: tgt_team = tw.team_id - syncs_data.append({ - "title": sync.title, - "sync_mode": sync.sync_mode or "group", - "publisher_team_id": pub_team, - "target_team_id": tgt_team, - "is_publisher": sync.publisher_workspace_id == workspace_id, - }) + syncs_data.append( + { + "title": sync.title, + "sync_mode": sync.sync_mode or "group", + "publisher_team_id": pub_team, + "target_team_id": tgt_team, + "is_publisher": sync.publisher_workspace_id == workspace_id, + } + ) for sync_channel in sync_channels_w: if sync_channel.sync_id != sync_id: continue - sync_channels_data.append({ - "sync_title": sync.title, - "channel_id": sync_channel.channel_id, - "status": sync_channel.status or "active", - }) + sync_channels_data.append( + { + "sync_title": sync.title, + "channel_id": sync_channel.channel_id, + "status": sync_channel.status or "active", + } + ) key = f"{sync.title}:{sync_channel.channel_id}" post_metas = DbManager.find_records( schemas.PostMeta, [schemas.PostMeta.sync_channel_id == sync_channel.id], ) - post_meta_by_key[key] = [{"post_id": post_meta.post_id, "ts": float(post_meta.ts)} for post_meta in post_metas] + post_meta_by_key[key] = [ + {"post_id": post_meta.post_id, "ts": float(post_meta.ts)} for post_meta in post_metas + ] # user_directory for W ud_records = DbManager.find_records( @@ -349,37 +362,42 @@ def build_migration_export(workspace_id: int, include_source_instance: bool = Tr ) user_directory_data = [] for u in ud_records: - user_directory_data.append({ - "slack_user_id": u.slack_user_id, - "email": u.email, - "real_name": u.real_name, - "display_name": u.display_name, - "normalized_name": u.normalized_name, - "updated_at": u.updated_at.isoformat() if u.updated_at else None, - }) + user_directory_data.append( + { + "slack_user_id": u.slack_user_id, + "email": u.email, + "real_name": u.real_name, + "display_name": u.display_name, + "normalized_name": u.normalized_name, + "updated_at": u.updated_at.isoformat() if u.updated_at else None, + } + ) # user_mappings involving W (export with team_id for other side) um_records = DbManager.find_records( schemas.UserMapping, [ - (schemas.UserMapping.source_workspace_id == workspace_id) | (schemas.UserMapping.target_workspace_id == workspace_id), + (schemas.UserMapping.source_workspace_id == workspace_id) + | (schemas.UserMapping.target_workspace_id == workspace_id), ], ) user_mappings_data = [] for um in um_records: src_ws = DbManager.get_record(schemas.Workspace, um.source_workspace_id) if um.source_workspace_id else None tgt_ws = DbManager.get_record(schemas.Workspace, um.target_workspace_id) if um.target_workspace_id else None - user_mappings_data.append({ - "source_team_id": src_ws.team_id if src_ws else None, - "target_team_id": tgt_ws.team_id if tgt_ws else None, - "source_user_id": um.source_user_id, - "target_user_id": um.target_user_id, - "match_method": um.match_method, - }) + user_mappings_data.append( + { + "source_team_id": src_ws.team_id if src_ws else None, + "target_team_id": tgt_ws.team_id if tgt_ws else None, + "source_user_id": um.source_user_id, + "target_user_id": um.target_user_id, + "match_method": um.match_method, + } + ) payload = { "version": MIGRATION_VERSION, - "exported_at": datetime.utcnow().isoformat() + "Z", + "exported_at": datetime.now(UTC).isoformat() + "Z", "workspace": {"team_id": team_id, "workspace_name": workspace_name}, "groups": groups_data, "syncs": syncs_data, @@ -391,11 +409,14 @@ def build_migration_export(workspace_id: int, include_source_instance: bool = Tr if include_source_instance: from federation import core as federation + try: url = federation.get_public_url() instance_id = federation.get_instance_id() _, public_key_pem = federation.get_or_create_instance_keypair() - code = federation.generate_federation_code(webhook_url=url, instance_id=instance_id, public_key=public_key_pem) + code = federation.generate_federation_code( + webhook_url=url, instance_id=instance_id, public_key=public_key_pem + ) payload["source_instance"] = { "webhook_url": url, "instance_id": instance_id, @@ -408,7 +429,8 @@ def build_migration_export(workspace_id: int, include_source_instance: bool = Tr # Sign with Ed25519 (exclude signature from signed bytes; include signed_at) try: from federation import core as federation - payload["signed_at"] = datetime.utcnow().isoformat() + "Z" + + payload["signed_at"] = datetime.now(UTC).isoformat() + "Z" to_sign = {k: v for k, v in payload.items() if k != "signature"} raw = canonical_json_dumps(to_sign).decode("utf-8") payload["signature"] = federation.sign_body(raw) @@ -430,6 +452,7 @@ def verify_migration_signature(data: dict) -> bool: to_verify = {k: v for k, v in data.items() if k != "signature"} raw = canonical_json_dumps(to_verify).decode("utf-8") from federation import core as federation + return federation.verify_body(raw, sig, public_key) @@ -445,8 +468,6 @@ def import_migration_data( - Replace mode: soft-delete W's SyncChannels in this group and their PostMeta, then create from export. - team_id_to_workspace_id: map export team_id -> B's workspace id (for publisher/target and user_mappings). """ - from datetime import UTC - syncs_export = data.get("syncs", []) sync_channels_export = data.get("sync_channels", []) post_meta_export = data.get("post_meta", {}) @@ -496,7 +517,11 @@ def import_migration_data( tgt_team = s.get("target_team_id") is_publisher = s.get("is_publisher") pub_ws_id = (workspace_id if is_publisher else team_id_to_workspace_id.get(pub_team)) if pub_team else None - tgt_ws_id = (workspace_id if tgt_team == export_team_id else team_id_to_workspace_id.get(tgt_team)) if tgt_team else None + tgt_ws_id = ( + (workspace_id if tgt_team == export_team_id else team_id_to_workspace_id.get(tgt_team)) + if tgt_team + else None + ) new_sync = schemas.Sync( title=title, group_id=group_id, @@ -525,11 +550,13 @@ def import_migration_data( DbManager.create_record(new_sync_channel) key = f"{sync_title}:{channel_id}" for post_meta in post_meta_export.get(key, []): - DbManager.create_record(schemas.PostMeta( - post_id=post_meta["post_id"], - sync_channel_id=new_sync_channel.id, - ts=Decimal(str(post_meta["ts"])), - )) + DbManager.create_record( + schemas.PostMeta( + post_id=post_meta["post_id"], + sync_channel_id=new_sync_channel.id, + ts=Decimal(str(post_meta["ts"])), + ) + ) # user_directory for W (replace: remove existing for this workspace then insert) DbManager.delete_records( @@ -537,15 +564,19 @@ def import_migration_data( [schemas.UserDirectory.workspace_id == workspace_id], ) for u in user_directory_export: - DbManager.create_record(schemas.UserDirectory( - workspace_id=workspace_id, - slack_user_id=u["slack_user_id"], - email=u.get("email"), - real_name=u.get("real_name"), - display_name=u.get("display_name"), - normalized_name=u.get("normalized_name"), - updated_at=datetime.fromisoformat(u["updated_at"].replace("Z", "+00:00")) if u.get("updated_at") else datetime.now(UTC), - )) + DbManager.create_record( + schemas.UserDirectory( + workspace_id=workspace_id, + slack_user_id=u["slack_user_id"], + email=u.get("email"), + real_name=u.get("real_name"), + display_name=u.get("display_name"), + normalized_name=u.get("normalized_name"), + updated_at=datetime.fromisoformat(u["updated_at"].replace("Z", "+00:00")) + if u.get("updated_at") + else datetime.now(UTC), + ) + ) # user_mappings where both source and target workspace exist on B for um in user_mappings_export: @@ -565,12 +596,14 @@ def import_migration_data( ) if existing: continue - DbManager.create_record(schemas.UserMapping( - source_workspace_id=src_ws_id, - source_user_id=um["source_user_id"], - target_workspace_id=tgt_ws_id, - target_user_id=um.get("target_user_id"), - match_method=um.get("match_method", "none"), - matched_at=datetime.now(UTC), - group_id=group_id, - )) + DbManager.create_record( + schemas.UserMapping( + source_workspace_id=src_ws_id, + source_user_id=um["source_user_id"], + target_workspace_id=tgt_ws_id, + target_user_id=um.get("target_user_id"), + match_method=um.get("match_method", "none"), + matched_at=datetime.now(UTC), + group_id=group_id, + ) + ) diff --git a/syncbot/helpers/files.py b/syncbot/helpers/files.py index 87c8931..ad4423b 100644 --- a/syncbot/helpers/files.py +++ b/syncbot/helpers/files.py @@ -87,9 +87,7 @@ def download_public_file(url: str, logger: Logger) -> dict | None: return None -def download_slack_files( - files: list[dict], client: WebClient, logger: Logger -) -> list[dict]: +def download_slack_files(files: list[dict], client: WebClient, logger: Logger) -> list[dict]: """Download files from Slack to /tmp for direct re-upload.""" downloaded: list[dict] = [] auth_headers = {"Authorization": f"Bearer {client.token}"} @@ -106,11 +104,13 @@ def download_slack_files( _download_to_file(url, file_path, headers=auth_headers) - downloaded.append({ - "path": file_path, - "name": file_name, - "mimetype": f.get("mimetype", "application/octet-stream"), - }) + downloaded.append( + { + "path": file_path, + "name": file_name, + "mimetype": f.get("mimetype", "application/octet-stream"), + } + ) except Exception as e: logger.error(f"download_slack_files: failed for {f.get('id')}: {e}") return downloaded @@ -130,10 +130,12 @@ def upload_files_to_slack( slack_client = WebClient(bot_token) file_uploads = [] for f in files: - file_uploads.append({ - "file": f["path"], - "filename": f["name"], - }) + file_uploads.append( + { + "file": f["path"], + "filename": f["name"], + } + ) kwargs: dict = {"channel": channel_id} if initial_comment: @@ -158,7 +160,9 @@ def upload_files_to_slack( def _extract_file_message_ts( - client: WebClient, upload_response, channel_id: str, + client: WebClient, + upload_response, + channel_id: str, thread_ts: str | None = None, ) -> str | None: """Extract the message ts created by a file upload.""" @@ -174,7 +178,7 @@ def _extract_file_message_ts( files_list = upload_response["files"] if files_list and len(files_list) > 0: file_id = files_list[0]["id"] if isinstance(files_list[0], dict) else files_list[0].get("id") - except (KeyError, TypeError, IndexError): + except KeyError, TypeError, IndexError: pass if not file_id: @@ -189,10 +193,11 @@ def _extract_file_message_ts( channel_shares = shares.get(share_type, {}).get(channel_id, []) if channel_shares: ts = channel_shares[0].get("ts") - _logger.info("_extract_file_message_ts: success", - extra={"file_id": file_id, "ts": ts, "attempt": attempt}) + _logger.info( + "_extract_file_message_ts: success", extra={"file_id": file_id, "ts": ts, "attempt": attempt} + ) return ts - except (KeyError, TypeError, IndexError): + except KeyError, TypeError, IndexError: pass except Exception as e: _logger.warning(f"_extract_file_message_ts: files.info error (attempt {attempt}): {e}") diff --git a/syncbot/helpers/notifications.py b/syncbot/helpers/notifications.py index 4dc472c..72cb4f4 100644 --- a/syncbot/helpers/notifications.py +++ b/syncbot/helpers/notifications.py @@ -128,7 +128,7 @@ def save_dm_messages_to_group_member(member_id: int, dm_entries: list[dict]) -> return try: prev = _json.loads(existing.dm_messages) if existing.dm_messages else [] - except (ValueError, TypeError): + except ValueError, TypeError: prev = [] prev.extend(dm_entries) DbManager.update_records( diff --git a/syncbot/helpers/oauth.py b/syncbot/helpers/oauth.py index 9ecedc3..70ca03f 100644 --- a/syncbot/helpers/oauth.py +++ b/syncbot/helpers/oauth.py @@ -50,11 +50,7 @@ def get_oauth_flow(): ) bot_scopes = [s.strip() for s in scopes_raw.split(",") if s.strip()] - user_scopes = ( - [s.strip() for s in user_scopes_raw.split(",") if s.strip()] - if user_scopes_raw - else list(USER_SCOPES) - ) + user_scopes = [s.strip() for s in user_scopes_raw.split(",") if s.strip()] if user_scopes_raw else list(USER_SCOPES) return OAuthFlow( settings=OAuthSettings( diff --git a/syncbot/helpers/refresh.py b/syncbot/helpers/refresh.py index f500c69..60b46a5 100644 --- a/syncbot/helpers/refresh.py +++ b/syncbot/helpers/refresh.py @@ -16,8 +16,7 @@ def cooldown_message_block(remaining_seconds: int) -> dict: """Return a Block Kit context block dict for the refresh cooldown message.""" text = ( - f"No new data. Wait {remaining_seconds} second{'s' if remaining_seconds != 1 else ''} " - "before refreshing again." + f"No new data. Wait {remaining_seconds} second{'s' if remaining_seconds != 1 else ''} before refreshing again." ) return { "type": "context", diff --git a/syncbot/helpers/user_matching.py b/syncbot/helpers/user_matching.py index e6661a6..d250fb0 100644 --- a/syncbot/helpers/user_matching.py +++ b/syncbot/helpers/user_matching.py @@ -516,9 +516,7 @@ def find_synced_channel_in_target(source_channel_id: str, target_workspace_id: i return target_rows[0].channel_id -_ARCHIVE_LINK_PATTERN = re.compile( - r"]+)>" -) +_ARCHIVE_LINK_PATTERN = re.compile(r"]+)>") def _rewrite_slack_archive_links_to_native_channels(msg_text: str, target_workspace_id: int) -> str: @@ -557,7 +555,7 @@ def _get_workspace_domain(client: WebClient, team_id: str) -> str | None: def resolve_channel_references( msg_text: str, source_client: WebClient | None, - source_workspace: "schemas.Workspace | None" = None, + source_workspace: schemas.Workspace | None = None, target_workspace_id: int | None = None, ) -> str: """Replace ``<#CHANNEL_ID>`` references with native local channels when synced, else archive URLs or fallbacks. diff --git a/syncbot/helpers/workspace.py b/syncbot/helpers/workspace.py index f64a234..13b7dcd 100644 --- a/syncbot/helpers/workspace.py +++ b/syncbot/helpers/workspace.py @@ -263,7 +263,8 @@ def _restore_workspace( ) syncs_in_group = DbManager.find_records( - schemas.Sync, [schemas.Sync.group_id == group_id], + schemas.Sync, + [schemas.Sync.group_id == group_id], ) other_channel_ids = [] for sync in syncs_in_group: diff --git a/syncbot/logger.py b/syncbot/logger.py index 7cd116e..c4bb215 100644 --- a/syncbot/logger.py +++ b/syncbot/logger.py @@ -130,10 +130,10 @@ class DevFormatter(logging.Formatter): _RESERVED = frozenset(logging.LogRecord("", 0, "", 0, "", (), None).__dict__.keys()) _COLORS = { - "DEBUG": "\033[90m", # grey - "INFO": "\033[32m", # green - "WARNING": "\033[33m", # yellow - "ERROR": "\033[31m", # red + "DEBUG": "\033[90m", # grey + "INFO": "\033[32m", # green + "WARNING": "\033[33m", # yellow + "ERROR": "\033[31m", # red "CRITICAL": "\033[1;31m", # bold red } _RESET = "\033[0m" diff --git a/syncbot/requirements.txt b/syncbot/requirements.txt index 8d1ceb2..055a5c3 100644 --- a/syncbot/requirements.txt +++ b/syncbot/requirements.txt @@ -1,19 +1,19 @@ -alembic==1.18.4 ; python_version >= "3.12" and python_version < "4.0" -certifi==2026.2.25 ; python_version >= "3.12" and python_version < "4.0" -cffi==2.0.0 ; python_version >= "3.12" and python_version < "4.0" and platform_python_implementation != "PyPy" -charset-normalizer==3.4.6 ; python_version >= "3.12" and python_version < "4.0" -cryptography==46.0.6 ; python_version >= "3.12" and python_version < "4.0" -greenlet==3.3.2 ; python_version >= "3.12" and python_version < "4.0" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") -idna==3.11 ; python_version >= "3.12" and python_version < "4.0" -mako==1.3.10 ; python_version >= "3.12" and python_version < "4.0" -markupsafe==3.0.3 ; python_version >= "3.12" and python_version < "4.0" -psycopg2-binary==2.9.11 ; python_version >= "3.12" and python_version < "4.0" -pycparser==3.0 ; python_version >= "3.12" and python_version < "4.0" and platform_python_implementation != "PyPy" and implementation_name != "PyPy" -pymysql==1.1.2 ; python_version >= "3.12" and python_version < "4.0" -python-dotenv==1.2.2 ; python_version >= "3.12" and python_version < "4.0" -requests==2.33.0 ; python_version >= "3.12" and python_version < "4.0" -slack-bolt==1.27.0 ; python_version >= "3.12" and python_version < "4.0" -slack-sdk==3.41.0 ; python_version >= "3.12" and python_version < "4.0" -sqlalchemy==2.0.48 ; python_version >= "3.12" and python_version < "4.0" -typing-extensions==4.15.0 ; python_version >= "3.12" and python_version < "4.0" -urllib3==2.6.3 ; python_version >= "3.12" and python_version < "4.0" +alembic==1.18.4 ; python_version >= "3.14" and python_version < "4.0" +certifi==2026.2.25 ; python_version >= "3.14" and python_version < "4.0" +cffi==2.0.0 ; python_version >= "3.14" and python_version < "4.0" and platform_python_implementation != "PyPy" +charset-normalizer==3.4.6 ; python_version >= "3.14" and python_version < "4.0" +cryptography==46.0.6 ; python_version >= "3.14" and python_version < "4.0" +greenlet==3.3.2 ; python_version >= "3.14" and python_version < "4.0" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") +idna==3.11 ; python_version >= "3.14" and python_version < "4.0" +mako==1.3.10 ; python_version >= "3.14" and python_version < "4.0" +markupsafe==3.0.3 ; python_version >= "3.14" and python_version < "4.0" +psycopg2-binary==2.9.11 ; python_version >= "3.14" and python_version < "4.0" +pycparser==3.0 ; python_version >= "3.14" and python_version < "4.0" and platform_python_implementation != "PyPy" and implementation_name != "PyPy" +pymysql==1.1.2 ; python_version >= "3.14" and python_version < "4.0" +python-dotenv==1.2.2 ; python_version >= "3.14" and python_version < "4.0" +requests==2.33.0 ; python_version >= "3.14" and python_version < "4.0" +slack-bolt==1.27.0 ; python_version >= "3.14" and python_version < "4.0" +slack-sdk==3.41.0 ; python_version >= "3.14" and python_version < "4.0" +sqlalchemy==2.0.48 ; python_version >= "3.14" and python_version < "4.0" +typing-extensions==4.15.0 ; python_version >= "3.14" and python_version < "4.0" +urllib3==2.6.3 ; python_version >= "3.14" and python_version < "4.0" diff --git a/syncbot/slack/deferred_ack_views.py b/syncbot/slack/deferred_ack_views.py index 3ffb144..d647d33 100644 --- a/syncbot/slack/deferred_ack_views.py +++ b/syncbot/slack/deferred_ack_views.py @@ -10,9 +10,11 @@ CONFIG_PUBLISH_MODE_SUBMIT, ) -DEFERRED_ACK_VIEW_CALLBACK_IDS = frozenset({ - CONFIG_PUBLISH_MODE_SUBMIT, - CONFIG_PUBLISH_CHANNEL_SUBMIT, - CONFIG_BACKUP_RESTORE_SUBMIT, - CONFIG_DATA_MIGRATION_SUBMIT, -}) +DEFERRED_ACK_VIEW_CALLBACK_IDS = frozenset( + { + CONFIG_PUBLISH_MODE_SUBMIT, + CONFIG_PUBLISH_CHANNEL_SUBMIT, + CONFIG_BACKUP_RESTORE_SUBMIT, + CONFIG_DATA_MIGRATION_SUBMIT, + } +) diff --git a/tests/test_federation_reactions.py b/tests/test_federation_reactions.py index ea6bfb4..fe8044a 100644 --- a/tests/test_federation_reactions.py +++ b/tests/test_federation_reactions.py @@ -60,7 +60,9 @@ def test_invalid_name_reaction_falls_back_to_thread_text(self): patch.object(federation_api, "_find_post_records", return_value=[post_meta]), patch.object(federation_api.helpers, "decrypt_bot_token", return_value="xoxb-test"), patch.object(federation_api, "WebClient", return_value=ws_client), - patch.object(federation_api.helpers, "post_message", return_value={"ts": "200.000001"}) as post_message_mock, + patch.object( + federation_api.helpers, "post_message", return_value={"ts": "200.000001"} + ) as post_message_mock, ): status, resp = federation_api.handle_message_react(body, fed_ws) @@ -201,7 +203,9 @@ def test_missing_user_fields_use_defaults(self): patch.object(federation_api, "_find_post_records", return_value=[post_meta]), patch.object(federation_api.helpers, "decrypt_bot_token", return_value="xoxb-test"), patch.object(federation_api, "WebClient", return_value=ws_client), - patch.object(federation_api.helpers, "post_message", return_value={"ts": "200.000001"}) as post_message_mock, + patch.object( + federation_api.helpers, "post_message", return_value={"ts": "200.000001"} + ) as post_message_mock, ): status, resp = federation_api.handle_message_react(body, fed_ws) diff --git a/tests/test_groups_handlers.py b/tests/test_groups_handlers.py index e02e289..da25291 100644 --- a/tests/test_groups_handlers.py +++ b/tests/test_groups_handlers.py @@ -35,11 +35,7 @@ def test_invalid_group_code_log_is_sanitized(self): ): handle_join_group_submit(body, client, logger, context={}) - matched = [ - call - for call in warn_log.call_args_list - if call.args and call.args[0] == "group_code_invalid" - ] + matched = [call for call in warn_log.call_args_list if call.args and call.args[0] == "group_code_invalid"] assert matched, "Expected group_code_invalid warning" extra = matched[0].kwargs["extra"] assert "code" not in extra diff --git a/tests/test_helpers.py b/tests/test_helpers.py index b81b937..adce72e 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -325,9 +325,7 @@ def conv_info(channel): client.conversations_info.side_effect = conv_info client.team_info.return_value = {"team": {"domain": "acme"}} ws = self._make_workspace(team_id="T123", name="Acme") - result = helpers.resolve_channel_references( - "see <#CABC111> and <#CABC222>", client, ws - ) + result = helpers.resolve_channel_references("see <#CABC111> and <#CABC222>", client, ws) assert "archives/CABC111" in result assert "archives/CABC222" in result assert "#alpha" in result @@ -344,9 +342,7 @@ def test_native_channel_when_synced_to_target(self, mock_find): mock_find.return_value = "C_LOCAL_TARGET" client = self._make_client(channel_name="general", domain="acme") ws = self._make_workspace(team_id="T123", name="Acme") - result = helpers.resolve_channel_references( - "see <#CSOURCE123>", client, ws, target_workspace_id=42 - ) + result = helpers.resolve_channel_references("see <#CSOURCE123>", client, ws, target_workspace_id=42) assert result == "see <#C_LOCAL_TARGET>" mock_find.assert_called_with("CSOURCE123", 42) assert "slack.com" not in result diff --git a/tests/test_message_event_dedup.py b/tests/test_message_event_dedup.py index 94d5894..45baf2d 100644 --- a/tests/test_message_event_dedup.py +++ b/tests/test_message_event_dedup.py @@ -86,7 +86,10 @@ def test_file_share_subtype_still_calls_new_post(self): with ( patch("handlers.messages._is_own_bot_message", return_value=False), patch("handlers.messages._handle_new_post") as mock_new, - patch("handlers.messages._build_file_context", return_value=([], [], [{"path": "/tmp/x", "name": "x.jpg", "mimetype": "image/jpeg"}])), + patch( + "handlers.messages._build_file_context", + return_value=([], [], [{"path": "/tmp/x", "name": "x.jpg", "mimetype": "image/jpeg"}]), + ), ): respond_to_message_event(body, client, logger, context) diff --git a/tests/test_split_message_reactions.py b/tests/test_split_message_reactions.py index 44a21ff..ea903c4 100644 --- a/tests/test_split_message_reactions.py +++ b/tests/test_split_message_reactions.py @@ -40,7 +40,9 @@ def capture_post_meta(rows): created.extend(rows) with ( - patch("handlers.messages.helpers.get_sync_list", return_value=[(sc_source, ws_source), (sc_target, ws_target)]), + patch( + "handlers.messages.helpers.get_sync_list", return_value=[(sc_source, ws_source), (sc_target, ws_target)] + ), patch("handlers.messages.helpers.get_user_info", return_value=("N", "http://i")), patch("handlers.messages.helpers.get_mapped_target_user_id", return_value=None), patch("handlers.messages.helpers.get_federated_workspace_for_sync", return_value=None), From b61603547c6b8456496a26fbf6f92f80011c7a57 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sat, 28 Mar 2026 10:33:59 -0500 Subject: [PATCH 10/19] Revert to Python 3.12 code floor. --- .github/dependabot.yml | 3 +++ .github/workflows/ci.yml | 4 +-- .github/workflows/deploy-aws.yml | 2 +- .gitignore | 1 - CHANGELOG.md | 3 +-- Dockerfile | 2 +- docs/DEPLOYMENT.md | 2 +- docs/DEVELOPMENT.md | 2 +- docs/INFRA_CONTRACT.md | 2 +- infra/aws/template.yaml | 4 +-- poetry.lock | 4 +-- pyproject.toml | 4 +-- syncbot/app.py | 6 ++--- syncbot/builders/user_mapping.py | 4 +-- syncbot/db/__init__.py | 4 +-- syncbot/federation/core.py | 8 +++--- syncbot/handlers/channel_sync.py | 8 +++--- syncbot/handlers/federation_cmds.py | 2 +- syncbot/handlers/group_manage.py | 2 +- syncbot/handlers/groups.py | 14 +++++------ syncbot/handlers/messages.py | 4 +-- syncbot/handlers/sync.py | 2 +- syncbot/handlers/users.py | 2 +- syncbot/helpers/core.py | 2 +- syncbot/helpers/files.py | 4 +-- syncbot/helpers/notifications.py | 2 +- syncbot/helpers/user_matching.py | 2 +- syncbot/requirements.txt | 38 ++++++++++++++--------------- 28 files changed, 69 insertions(+), 68 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index e644f37..47592e3 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -26,3 +26,6 @@ updates: schedule: interval: "weekly" open-pull-requests-limit: 10 + ignore: + - dependency-name: "python" + update-types: ["version-update:semver-major"] diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ae46bd6..8415e13 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -23,7 +23,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v6 with: - python-version: "3.14" + python-version: "3.12" - name: Install Poetry and export plugin run: | python -m pip install --upgrade pip @@ -66,7 +66,7 @@ jobs: - uses: actions/checkout@v6 - uses: actions/setup-python@v6 with: - python-version: "3.14" + python-version: "3.12" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index 51c5484..a393136 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@v6 - uses: actions/setup-python@v6 with: - python-version: '3.14' + python-version: '3.12' - uses: aws-actions/setup-sam@v2 with: use-installer: true diff --git a/.gitignore b/.gitignore index f538a04..77c5db2 100644 --- a/.gitignore +++ b/.gitignore @@ -124,7 +124,6 @@ celerybeat.pid # Environments .env .venv -.venv-py314 env/ venv/ ENV/ diff --git a/CHANGELOG.md b/CHANGELOG.md index c7068bc..337a81b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,8 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - Bumped GitHub Actions: `actions/checkout` v6, `actions/setup-python` v6, `actions/upload-artifact` v7, `actions/download-artifact` v8, `aws-actions/configure-aws-credentials` v6 -- Python runtime baseline from 3.12 to 3.14 (Docker base image, AWS Lambda, CI) -- Ruff `target-version` set to Python 3.14 (import order and annotation cleanups applied repo-wide) +- Dependabot: ignore semver-major updates for the Docker `python` image (keeps base image on Python 3.12.x line) ### Fixed diff --git a/Dockerfile b/Dockerfile index 3baf6ba..dc365f2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.14-slim +FROM python:3.12-slim WORKDIR /app diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index d5a0d7b..cf02d2e 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -2,7 +2,7 @@ This guide explains **what the guided deploy scripts do**, how to perform the **same steps manually** on **AWS** or **GCP**, and how **GitHub Actions** fits in. For the runtime environment variables the app expects in any cloud, see [INFRA_CONTRACT.md](INFRA_CONTRACT.md). -**Runtime baseline:** Python 3.14 — keep `pyproject.toml`, `syncbot/requirements.txt`, Lambda/Cloud Run runtimes, and CI aligned. +**Runtime baseline:** Python 3.12 — keep `pyproject.toml`, `syncbot/requirements.txt`, Lambda/Cloud Run runtimes, and CI aligned. --- diff --git a/docs/DEVELOPMENT.md b/docs/DEVELOPMENT.md index 8d3105f..b2affa4 100644 --- a/docs/DEVELOPMENT.md +++ b/docs/DEVELOPMENT.md @@ -37,7 +37,7 @@ App on port **3000**; restart the `app` service after code changes. ### Native Python -**Needs:** Python 3.14+, Poetry. Run MySQL locally (e.g. `docker run ... mysql:8`) or SQLite. See [`.env.example`](../.env.example) and [INFRA_CONTRACT.md](INFRA_CONTRACT.md). +**Needs:** Python 3.12+, Poetry. Run MySQL locally (e.g. `docker run ... mysql:8`) or SQLite. See [`.env.example`](../.env.example) and [INFRA_CONTRACT.md](INFRA_CONTRACT.md). ## Configuration reference diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index 6c6df23..216dabe 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -12,7 +12,7 @@ The application reads configuration from environment variables. Providers must i ## Toolchain Baseline -- Runtime baseline: **Python 3.14**. +- Runtime baseline: **Python 3.12**. - Keep runtime/tooling aligned across: - Lambda/Cloud Run runtime configuration - CI Python version diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index bdee1e3..dc57999 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -563,7 +563,7 @@ Resources: Properties: CodeUri: db_setup/ Handler: handler.handler - Runtime: python3.14 + Runtime: python3.12 Architectures: - x86_64 Timeout: 60 @@ -641,7 +641,7 @@ Resources: Properties: CodeUri: ../../syncbot/ Handler: app.handler - Runtime: python3.14 + Runtime: python3.12 Architectures: - x86_64 Timeout: 30 diff --git a/poetry.lock b/poetry.lock index 91a674b..c06148d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1038,5 +1038,5 @@ zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""] [metadata] lock-version = "2.1" -python-versions = "^3.14" -content-hash = "8214131496230362e52452512cdb3392d665fcf32e33914a9b7724e5df17309a" +python-versions = "^3.12" +content-hash = "1a838a06c2d452cf5cd57fa44cb4c195fc2fa1e0c75bce56b8b53312b2366ebb" diff --git a/pyproject.toml b/pyproject.toml index fbe8459..aef914d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ readme = "README.md" poetry-plugin-export = ">=1.8" [tool.poetry.dependencies] -python = "^3.14" +python = "^3.12" alembic = "^1.13" python-dotenv = "^1.2.0" slack-bolt = "^1.27.0" @@ -29,7 +29,7 @@ testpaths = ["tests", "infra/aws/tests", "infra/gcp/tests"] pythonpath = ["syncbot", "infra/aws/db_setup"] [tool.ruff] -target-version = "py314" +target-version = "py312" line-length = 120 src = ["syncbot", "tests", "infra/aws/tests", "infra/gcp/tests"] diff --git a/syncbot/app.py b/syncbot/app.py index c007f1f..14edfbf 100644 --- a/syncbot/app.py +++ b/syncbot/app.py @@ -102,7 +102,7 @@ def _capture_slack_retry_num(req, resp, next): try: v = vals[0] if isinstance(vals, (list, tuple)) else vals req.context["slack_retry_num"] = int(v) - except ValueError, TypeError, IndexError: + except (ValueError, TypeError, IndexError): pass return next() @@ -364,7 +364,7 @@ def do_POST(self) -> None: return try: content_len = int(self.headers.get("Content-Length") or 0) - except TypeError, ValueError: + except (TypeError, ValueError): content_len = 0 query = self.path.partition("?")[2] request_body = self.rfile.read(content_len).decode("utf-8") @@ -382,7 +382,7 @@ def _handle_federation(self, method: str) -> None: int(self.headers.get("Content-Length", 0)), _fed_max_body, ) - except TypeError, ValueError: + except (TypeError, ValueError): content_len = 0 body_str = self.rfile.read(content_len).decode() if content_len else "" headers = {k: v for k, v in self.headers.items()} diff --git a/syncbot/builders/user_mapping.py b/syncbot/builders/user_mapping.py index 677f9d1..f45ecfc 100644 --- a/syncbot/builders/user_mapping.py +++ b/syncbot/builders/user_mapping.py @@ -257,14 +257,14 @@ def build_user_mapping_edit_modal( mapping_id_str = action_id.replace(actions.CONFIG_USER_MAPPING_EDIT + "_", "") try: mapping_id = int(mapping_id_str) - except TypeError, ValueError: + except (TypeError, ValueError): _logger.warning(f"build_user_mapping_edit_modal: invalid mapping_id: {mapping_id_str}") return raw_group = helpers.safe_get(body, "actions", 0, "value") or "0" try: group_id = int(raw_group) - except TypeError, ValueError: + except (TypeError, ValueError): group_id = 0 mapping = DbManager.get_record(UserMapping, id=mapping_id) diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py index 69cafef..90f9b67 100644 --- a/syncbot/db/__init__.py +++ b/syncbot/db/__init__.py @@ -74,7 +74,7 @@ def _build_mysql_url(include_schema: bool = False) -> tuple[str, dict]: if ca_path: try: ssl_ctx = ssl.create_default_context(cafile=ca_path) - except OSError, ssl.SSLError: + except (OSError, ssl.SSLError): ssl_ctx = ssl.create_default_context() else: ssl_ctx = ssl.create_default_context() @@ -112,7 +112,7 @@ def _network_sql_connect_args_from_url() -> dict: if ca_path: try: ssl_ctx = ssl.create_default_context(cafile=ca_path) - except OSError, ssl.SSLError: + except (OSError, ssl.SSLError): ssl_ctx = ssl.create_default_context() else: ssl_ctx = ssl.create_default_context() diff --git a/syncbot/federation/core.py b/syncbot/federation/core.py index a5bb800..f275e2e 100644 --- a/syncbot/federation/core.py +++ b/syncbot/federation/core.py @@ -140,7 +140,7 @@ def federation_verify(body: str, signature_b64: str, timestamp: str, public_key_ """ try: ts_int = int(timestamp) - except TypeError, ValueError: + except (TypeError, ValueError): return False if abs(time.time() - ts_int) > _TIMESTAMP_MAX_AGE: @@ -152,7 +152,7 @@ def federation_verify(body: str, signature_b64: str, timestamp: str, public_key_ signing_str = f"{timestamp}:{body}".encode() public_key.verify(base64.b64decode(signature_b64), signing_str) return True - except InvalidSignature, ValueError, TypeError: + except (InvalidSignature, ValueError, TypeError): return False @@ -169,7 +169,7 @@ def verify_body(body: str, signature_b64: str, public_key_pem: str) -> bool: public_key = load_pem_public_key(public_key_pem.encode()) public_key.verify(base64.b64decode(signature_b64), body.encode()) return True - except InvalidSignature, ValueError, TypeError: + except (InvalidSignature, ValueError, TypeError): return False @@ -227,7 +227,7 @@ def validate_webhook_url(url: str) -> bool: extra={"url": url, "resolved_ip": str(addr)}, ) return False - except socket.gaierror, ValueError: + except (socket.gaierror, ValueError): return False return True diff --git a/syncbot/handlers/channel_sync.py b/syncbot/handlers/channel_sync.py index 9a04c65..d12f4a8 100644 --- a/syncbot/handlers/channel_sync.py +++ b/syncbot/handlers/channel_sync.py @@ -143,7 +143,7 @@ def handle_publish_channel( raw_group_id = helpers.safe_get(body, "actions", 0, "value") try: group_id = int(raw_group_id) - except TypeError, ValueError: + except (TypeError, ValueError): _logger.warning(f"publish_channel: invalid group_id: {raw_group_id!r}") return @@ -386,7 +386,7 @@ def handle_unpublish_channel( raw_value = helpers.safe_get(body, "actions", 0, "value") try: sync_id = int(raw_value) - except TypeError, ValueError: + except (TypeError, ValueError): _logger.warning(f"Invalid sync_id for unpublish: {raw_value!r}") return @@ -452,7 +452,7 @@ def _toggle_sync_status( try: sync_id = int(sync_id_str) - except TypeError, ValueError: + except (TypeError, ValueError): _logger.warning(f"{log_event}_invalid_id", extra={"action_id": action_id}) return @@ -564,7 +564,7 @@ def handle_stop_sync( try: sync_id = int(sync_id_str) - except TypeError, ValueError: + except (TypeError, ValueError): _logger.warning("stop_sync_invalid_id", extra={"action_id": action_id}) return diff --git a/syncbot/handlers/federation_cmds.py b/syncbot/handlers/federation_cmds.py index b858047..8c50f0e 100644 --- a/syncbot/handlers/federation_cmds.py +++ b/syncbot/handlers/federation_cmds.py @@ -336,7 +336,7 @@ def handle_remove_federation_connection( try: member_id = int(member_id_str) - except TypeError, ValueError: + except (TypeError, ValueError): _logger.warning("remove_federation_connection_invalid_id", extra={"action_id": action_id}) return diff --git a/syncbot/handlers/group_manage.py b/syncbot/handlers/group_manage.py index 51d486f..6a8af46 100644 --- a/syncbot/handlers/group_manage.py +++ b/syncbot/handlers/group_manage.py @@ -31,7 +31,7 @@ def handle_leave_group( try: group_id = int(group_id_str) - except TypeError, ValueError: + except (TypeError, ValueError): _logger.warning("leave_group_invalid_id", extra={"action_id": action_id}) return diff --git a/syncbot/handlers/groups.py b/syncbot/handlers/groups.py index e9450a1..c9b2dca 100644 --- a/syncbot/handlers/groups.py +++ b/syncbot/handlers/groups.py @@ -35,8 +35,8 @@ def _generate_invite_code(length: int = 7) -> str: def _activate_group_membership( client: WebClient, - workspace_record: schemas.Workspace, - group: schemas.WorkspaceGroup, + workspace_record: "schemas.Workspace", + group: "schemas.WorkspaceGroup", ) -> None: """Refresh user directories and seed mappings for all existing group members.""" try: @@ -365,7 +365,7 @@ def handle_invite_workspace( raw_group_id = helpers.safe_get(body, "actions", 0, "value") try: group_id = int(raw_group_id) - except TypeError, ValueError: + except (TypeError, ValueError): _logger.warning(f"invite_workspace: invalid group_id: {raw_group_id!r}") return @@ -497,7 +497,7 @@ def handle_invite_workspace_submit( try: target_ws_id = int(selected_ws_id) - except TypeError, ValueError: + except (TypeError, ValueError): return target_ws = helpers.get_workspace_by_id(target_ws_id) @@ -597,7 +597,7 @@ def handle_accept_group_invite( raw_member_id = helpers.safe_get(body, "actions", 0, "value") try: member_id = int(raw_member_id) - except TypeError, ValueError: + except (TypeError, ValueError): _logger.warning(f"accept_group_invite: invalid member_id: {raw_member_id!r}") return @@ -680,7 +680,7 @@ def handle_decline_group_invite( raw_member_id = helpers.safe_get(body, "actions", 0, "value") try: member_id = int(raw_member_id) - except TypeError, ValueError: + except (TypeError, ValueError): _logger.warning(f"decline_group_invite: invalid member_id: {raw_member_id!r}") return @@ -756,7 +756,7 @@ def _update_invite_dms( try: entries = _json.loads(member.dm_messages) - except ValueError, TypeError: + except (ValueError, TypeError): _logger.warning("_update_invite_dms: invalid dm_messages JSON for member %s", member.id) return diff --git a/syncbot/handlers/messages.py b/syncbot/handlers/messages.py index b108e57..bfb8784 100644 --- a/syncbot/handlers/messages.py +++ b/syncbot/handlers/messages.py @@ -771,14 +771,14 @@ def _should_skip_slack_event_retry(body: dict, context: dict) -> bool: try: if int(rn) >= 1: return True - except TypeError, ValueError: + except (TypeError, ValueError): pass ra = helpers.safe_get(body, "retry_attempt") if ra is not None: try: if int(ra) >= 1: return True - except TypeError, ValueError: + except (TypeError, ValueError): pass return False diff --git a/syncbot/handlers/sync.py b/syncbot/handlers/sync.py index 332b3f8..2ae0982 100644 --- a/syncbot/handlers/sync.py +++ b/syncbot/handlers/sync.py @@ -35,7 +35,7 @@ def handle_remove_sync( raw_value = helpers.safe_get(body, "actions", 0, "value") try: sync_channel_id = int(raw_value) - except TypeError, ValueError: + except (TypeError, ValueError): _logger.warning(f"Invalid sync_channel_id value: {raw_value!r}") return diff --git a/syncbot/handlers/users.py b/syncbot/handlers/users.py index e200c98..6e1c74f 100644 --- a/syncbot/handlers/users.py +++ b/syncbot/handlers/users.py @@ -138,7 +138,7 @@ def handle_user_mapping_refresh( raw_group = helpers.safe_get(body, "actions", 0, "value") or "0" try: group_id = int(raw_group) - except TypeError, ValueError: + except (TypeError, ValueError): group_id = 0 gid_opt = group_id or None diff --git a/syncbot/helpers/core.py b/syncbot/helpers/core.py index a9f2ba1..d3e7dc7 100644 --- a/syncbot/helpers/core.py +++ b/syncbot/helpers/core.py @@ -24,7 +24,7 @@ def safe_get(data: Any, *keys: Any) -> Any: else: return None return result - except KeyError, AttributeError, IndexError: + except (KeyError, AttributeError, IndexError): return None diff --git a/syncbot/helpers/files.py b/syncbot/helpers/files.py index ad4423b..7518342 100644 --- a/syncbot/helpers/files.py +++ b/syncbot/helpers/files.py @@ -178,7 +178,7 @@ def _extract_file_message_ts( files_list = upload_response["files"] if files_list and len(files_list) > 0: file_id = files_list[0]["id"] if isinstance(files_list[0], dict) else files_list[0].get("id") - except KeyError, TypeError, IndexError: + except (KeyError, TypeError, IndexError): pass if not file_id: @@ -197,7 +197,7 @@ def _extract_file_message_ts( "_extract_file_message_ts: success", extra={"file_id": file_id, "ts": ts, "attempt": attempt} ) return ts - except KeyError, TypeError, IndexError: + except (KeyError, TypeError, IndexError): pass except Exception as e: _logger.warning(f"_extract_file_message_ts: files.info error (attempt {attempt}): {e}") diff --git a/syncbot/helpers/notifications.py b/syncbot/helpers/notifications.py index 72cb4f4..4dc472c 100644 --- a/syncbot/helpers/notifications.py +++ b/syncbot/helpers/notifications.py @@ -128,7 +128,7 @@ def save_dm_messages_to_group_member(member_id: int, dm_entries: list[dict]) -> return try: prev = _json.loads(existing.dm_messages) if existing.dm_messages else [] - except ValueError, TypeError: + except (ValueError, TypeError): prev = [] prev.extend(dm_entries) DbManager.update_records( diff --git a/syncbot/helpers/user_matching.py b/syncbot/helpers/user_matching.py index d250fb0..da3cace 100644 --- a/syncbot/helpers/user_matching.py +++ b/syncbot/helpers/user_matching.py @@ -555,7 +555,7 @@ def _get_workspace_domain(client: WebClient, team_id: str) -> str | None: def resolve_channel_references( msg_text: str, source_client: WebClient | None, - source_workspace: schemas.Workspace | None = None, + source_workspace: "schemas.Workspace | None" = None, target_workspace_id: int | None = None, ) -> str: """Replace ``<#CHANNEL_ID>`` references with native local channels when synced, else archive URLs or fallbacks. diff --git a/syncbot/requirements.txt b/syncbot/requirements.txt index 055a5c3..8d1ceb2 100644 --- a/syncbot/requirements.txt +++ b/syncbot/requirements.txt @@ -1,19 +1,19 @@ -alembic==1.18.4 ; python_version >= "3.14" and python_version < "4.0" -certifi==2026.2.25 ; python_version >= "3.14" and python_version < "4.0" -cffi==2.0.0 ; python_version >= "3.14" and python_version < "4.0" and platform_python_implementation != "PyPy" -charset-normalizer==3.4.6 ; python_version >= "3.14" and python_version < "4.0" -cryptography==46.0.6 ; python_version >= "3.14" and python_version < "4.0" -greenlet==3.3.2 ; python_version >= "3.14" and python_version < "4.0" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") -idna==3.11 ; python_version >= "3.14" and python_version < "4.0" -mako==1.3.10 ; python_version >= "3.14" and python_version < "4.0" -markupsafe==3.0.3 ; python_version >= "3.14" and python_version < "4.0" -psycopg2-binary==2.9.11 ; python_version >= "3.14" and python_version < "4.0" -pycparser==3.0 ; python_version >= "3.14" and python_version < "4.0" and platform_python_implementation != "PyPy" and implementation_name != "PyPy" -pymysql==1.1.2 ; python_version >= "3.14" and python_version < "4.0" -python-dotenv==1.2.2 ; python_version >= "3.14" and python_version < "4.0" -requests==2.33.0 ; python_version >= "3.14" and python_version < "4.0" -slack-bolt==1.27.0 ; python_version >= "3.14" and python_version < "4.0" -slack-sdk==3.41.0 ; python_version >= "3.14" and python_version < "4.0" -sqlalchemy==2.0.48 ; python_version >= "3.14" and python_version < "4.0" -typing-extensions==4.15.0 ; python_version >= "3.14" and python_version < "4.0" -urllib3==2.6.3 ; python_version >= "3.14" and python_version < "4.0" +alembic==1.18.4 ; python_version >= "3.12" and python_version < "4.0" +certifi==2026.2.25 ; python_version >= "3.12" and python_version < "4.0" +cffi==2.0.0 ; python_version >= "3.12" and python_version < "4.0" and platform_python_implementation != "PyPy" +charset-normalizer==3.4.6 ; python_version >= "3.12" and python_version < "4.0" +cryptography==46.0.6 ; python_version >= "3.12" and python_version < "4.0" +greenlet==3.3.2 ; python_version >= "3.12" and python_version < "4.0" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") +idna==3.11 ; python_version >= "3.12" and python_version < "4.0" +mako==1.3.10 ; python_version >= "3.12" and python_version < "4.0" +markupsafe==3.0.3 ; python_version >= "3.12" and python_version < "4.0" +psycopg2-binary==2.9.11 ; python_version >= "3.12" and python_version < "4.0" +pycparser==3.0 ; python_version >= "3.12" and python_version < "4.0" and platform_python_implementation != "PyPy" and implementation_name != "PyPy" +pymysql==1.1.2 ; python_version >= "3.12" and python_version < "4.0" +python-dotenv==1.2.2 ; python_version >= "3.12" and python_version < "4.0" +requests==2.33.0 ; python_version >= "3.12" and python_version < "4.0" +slack-bolt==1.27.0 ; python_version >= "3.12" and python_version < "4.0" +slack-sdk==3.41.0 ; python_version >= "3.12" and python_version < "4.0" +sqlalchemy==2.0.48 ; python_version >= "3.12" and python_version < "4.0" +typing-extensions==4.15.0 ; python_version >= "3.12" and python_version < "4.0" +urllib3==2.6.3 ; python_version >= "3.12" and python_version < "4.0" From c0ce9cf58e826be3c953ff2b0ccf0ffd84dc4d82 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sat, 28 Mar 2026 11:03:55 -0500 Subject: [PATCH 11/19] Improve AWS Lambda app startup post-deploy. --- .github/workflows/deploy-aws.yml | 26 +++++++++++++++++++++++ CHANGELOG.md | 4 ++++ docs/ARCHITECTURE.md | 2 ++ docs/DEPLOYMENT.md | 18 ++++++++++++++-- docs/INFRA_CONTRACT.md | 4 ++-- infra/aws/scripts/deploy.sh | 15 ++++++++++++++ infra/aws/template.bootstrap.yaml | 1 + infra/aws/template.yaml | 2 +- syncbot/app.py | 23 ++++++++++++++++++++- tests/test_app_main_response.py | 34 +++++++++++++++++++++++++++++++ 10 files changed, 123 insertions(+), 6 deletions(-) diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index a393136..13edc09 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -117,6 +117,19 @@ jobs: SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ $OVERRIDE_PARAM" + - name: Run migrations and warm up Lambda + run: | + FUNCTION_ARN=$(aws cloudformation describe-stacks \ + --stack-name ${{ vars.AWS_STACK_NAME }} \ + --query "Stacks[0].Outputs[?OutputKey=='SyncBotFunctionArn'].OutputValue" \ + --output text) + aws lambda invoke \ + --function-name "$FUNCTION_ARN" \ + --payload '{"action":"migrate"}' \ + --cli-binary-format raw-in-base64-out \ + /tmp/migrate-response.json + cat /tmp/migrate-response.json + sam-deploy-prod: if: github.ref == 'refs/heads/prod' runs-on: ubuntu-latest @@ -178,3 +191,16 @@ jobs: SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ $OVERRIDE_PARAM" + + - name: Run migrations and warm up Lambda + run: | + FUNCTION_ARN=$(aws cloudformation describe-stacks \ + --stack-name ${{ vars.AWS_STACK_NAME }} \ + --query "Stacks[0].Outputs[?OutputKey=='SyncBotFunctionArn'].OutputValue" \ + --output text) + aws lambda invoke \ + --function-name "$FUNCTION_ARN" \ + --payload '{"action":"migrate"}' \ + --cli-binary-format raw-in-base64-out \ + /tmp/migrate-response.json + cat /tmp/migrate-response.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 337a81b..24aea94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Bumped GitHub Actions: `actions/checkout` v6, `actions/setup-python` v6, `actions/upload-artifact` v7, `actions/download-artifact` v8, `aws-actions/configure-aws-credentials` v6 - Dependabot: ignore semver-major updates for the Docker `python` image (keeps base image on Python 3.12.x line) +- AWS Lambda: Alembic migrations run via a post-deploy `{"action":"migrate"}` invoke (GitHub Actions after `sam deploy`) instead of on every cold start, keeping Slack interaction acks under the 3s budget; Cloud Run and local dev still run migrations at startup +- AWS Lambda memory increased from 128 MB to 256 MB for faster cold starts +- EventBridge keep-warm ScheduleV2 invokes return a clean JSON response from `app.handler` instead of falling through to the Slack Bolt handler +- AWS bootstrap IAM deploy policy: added `lambda:InvokeFunction` on `syncbot-*` functions so CI and the guided deploy script can run the post-deploy migrate invoke (re-sync the bootstrap stack to pick this up) ### Fixed diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index a395926..dba2cc0 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -119,6 +119,8 @@ flowchart TB All infrastructure is defined in `infra/aws/template.yaml` (AWS SAM). Dashed lines indicate resources that are conditionally created — when `Existing*` parameters are set, those resources are skipped. +**Lambda cold start vs Slack acks:** The main function uses **256 MB** memory (faster init than 128 MB). Alembic migrations run only when the function is invoked with `{"action":"migrate"}` (post-deploy in CI), not on every cold start, so the first Slack interaction after deploy can ack within Slack’s time limit. EventBridge keep-warm ScheduleV2 invokes are handled in `app.handler` with a trivial JSON response instead of the Slack Bolt adapter. + ## Security & Hardening | Layer | Protection | diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index cf02d2e..19ef032 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -90,7 +90,7 @@ See [infra/gcp/README.md](../infra/gcp/README.md) for Terraform variables and ou ## Database backends -The app supports **MySQL** (default), **PostgreSQL**, and **SQLite**. Schema changes are applied at startup via Alembic (`alembic upgrade head`). +The app supports **MySQL** (default), **PostgreSQL**, and **SQLite**. Schema changes use Alembic (`alembic upgrade head`). **AWS Lambda:** Applied after each deploy via a workflow step that invokes the function with `{"action":"migrate"}` (not on every cold start). **Cloud Run / local:** Applied at process startup before serving HTTP. - **AWS:** Choose engine in the deploy script or pass `DatabaseEngine=mysql` / `postgresql` to `sam deploy`. - **Contract:** [INFRA_CONTRACT.md](INFRA_CONTRACT.md) — `DATABASE_BACKEND`, `DATABASE_URL` or host/user/password/schema. @@ -152,6 +152,17 @@ Use **`sam deploy --guided`** the first time if you prefer prompts. For **existi **Token key:** The stack can auto-generate `TOKEN_ENCRYPTION_KEY` in Secrets Manager. Back it up after first deploy. Optional: `TokenEncryptionKeyOverride`, `ExistingTokenEncryptionKeySecretArn` for recovery. +**Post-deploy migrate (Lambda only):** After `sam deploy`, run Alembic and warm the function (same as CI): + +```bash +FUNCTION_ARN=$(aws cloudformation describe-stacks --stack-name syncbot-test \ + --query "Stacks[0].Outputs[?OutputKey=='SyncBotFunctionArn'].OutputValue" --output text) +aws lambda invoke --function-name "$FUNCTION_ARN" --payload '{"action":"migrate"}' \ + --cli-binary-format raw-in-base64-out /tmp/migrate.json && cat /tmp/migrate.json +``` + +The GitHub deploy role and bootstrap policy must allow `lambda:InvokeFunction` on `syncbot-*` functions; re-deploy the **bootstrap** stack if your policy predates that permission. + ### 3. GitHub Actions (AWS) Workflow: `.github/workflows/deploy-aws.yml` (runs on push to `test`/`prod` when not using GCP). @@ -270,7 +281,10 @@ See also [Sharing infrastructure across apps](#sharing-infrastructure-across-app ## Database schema (Alembic) -Schema lives under `syncbot/db/alembic/`. On startup the app runs **`alembic upgrade head`**. +Schema lives under `syncbot/db/alembic/`. **`alembic upgrade head`** runs: + +- **AWS (GitHub Actions):** After `sam deploy`, the workflow invokes the Lambda with `{"action":"migrate"}` (migrations + warm instance). Manual `sam deploy` from the guided script should be followed by the same invoke (see script post-deploy or run `aws lambda invoke` with that payload using stack output `SyncBotFunctionArn`). +- **Cloud Run / `python app.py`:** At process startup before the server listens. --- diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index 216dabe..7ffca09 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -4,7 +4,7 @@ This document defines what any infrastructure provider (AWS, GCP, Azure, etc.) m **Deploy entrypoint:** From the repo root, `./deploy.sh` (macOS/Linux, or Git Bash/WSL bash) or `.\deploy.ps1` (Windows PowerShell — finds Git Bash or WSL, then bash) runs an interactive helper that delegates to `infra//scripts/deploy.sh`. After identity/auth prompts, each provider script shows a **Deploy Tasks** menu (comma-separated numbers, default all): bootstrap (AWS only), build/deploy, CI/CD (GitHub Actions), Slack API configuration, and DR backup secret output—so operators can run subsets (e.g. CI/CD only against an existing stack) without mid-flow surprises. That flow sets Cloud/Terraform resources and runtime env vars consistent with this document. Step-by-step and manual alternatives: [DEPLOYMENT.md](DEPLOYMENT.md). -**Schema:** The database schema is managed by **Alembic**. On startup the app runs **`alembic upgrade head`** so new and existing databases stay current with the latest migrations. +**Schema:** The database schema is managed by **Alembic** (`alembic upgrade head`). **AWS Lambda:** Migrations are **not** run on every cold start (that would exceed Slack’s interaction ack budget). The Lambda handler accepts a post-deploy invoke with payload `{"action":"migrate"}` to run migrations; the reference GitHub Actions deploy workflow invokes this after `sam deploy`. **Cloud Run / local / container:** Migrations still run at process startup before the HTTP server accepts traffic (no Slack ack on that path). ## Runtime Environment Variables @@ -93,7 +93,7 @@ The provider must deliver: **PostgreSQL / MySQL:** In non–local environments the app uses TLS by default; allow outbound TCP to the DB host (typically **5432** for PostgreSQL, **3306** for MySQL). **SQLite:** No network; the app uses a local file. Single-writer; ensure backups and file durability for production use. 4. **Keep-warm / scheduled ping (optional but recommended)** - To avoid cold-start latency, the app supports a periodic HTTP GET to a configurable path. The provider should support a scheduled job (e.g. CloudWatch Events, Cloud Scheduler) that hits the service on an interval (e.g. 5 minutes). + To avoid cold-start latency, the app supports a periodic HTTP GET to a configurable path. The provider should support a scheduled job (e.g. CloudWatch Events, Cloud Scheduler) that hits the service on an interval (e.g. 5 minutes). **AWS (SAM):** EventBridge Scheduler invokes the Lambda directly on a schedule; the Lambda handler returns a small JSON success for `source` `aws.scheduler` / `aws.events` without treating the payload as a Slack request. 5. **Stateless execution** The app is stateless; state lives in the configured database (PostgreSQL, MySQL, or SQLite). Horizontal scaling is supported with PostgreSQL/MySQL as long as all instances share the same DB and env; SQLite is single-writer. diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index 0380d76..89ecf82 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -1716,6 +1716,21 @@ sam deploy \ APP_OUTPUTS="$(app_describe_outputs "$STACK_NAME" "$REGION")" + FUNCTION_ARN="$(output_value "$APP_OUTPUTS" "SyncBotFunctionArn")" + if [[ -n "$FUNCTION_ARN" ]]; then + echo "=== Lambda migrate + warm-up ===" + TMP_MIGRATE="$(mktemp)" + aws lambda invoke \ + --function-name "$FUNCTION_ARN" \ + --payload '{"action":"migrate"}' \ + --cli-binary-format raw-in-base64-out \ + "$TMP_MIGRATE" \ + --region "$REGION" + cat "$TMP_MIGRATE" + echo + rm -f "$TMP_MIGRATE" + fi + else echo echo "Skipping Build/Deploy (task 2 not selected)." diff --git a/infra/aws/template.bootstrap.yaml b/infra/aws/template.bootstrap.yaml index 5c19c82..b8110b4 100644 --- a/infra/aws/template.bootstrap.yaml +++ b/infra/aws/template.bootstrap.yaml @@ -164,6 +164,7 @@ Resources: - lambda:AddPermission - lambda:RemovePermission - lambda:PublishVersion + - lambda:InvokeFunction Resource: !Sub "arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:syncbot-*" - Sid: ApiGateway Effect: Allow diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index dc57999..78ab1e2 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -645,7 +645,7 @@ Resources: Architectures: - x86_64 Timeout: 30 - MemorySize: 128 + MemorySize: 256 Policies: - AWSLambdaVPCAccessExecutionRole VpcConfig: !If diff --git a/syncbot/app.py b/syncbot/app.py index 14edfbf..c7f0169 100644 --- a/syncbot/app.py +++ b/syncbot/app.py @@ -84,7 +84,10 @@ def _redact_sensitive(obj, _depth=0): configure_logging() validate_config() -initialize_database() +# On Lambda, defer Alembic to a post-deploy invoke (see handler migrate branch) so cold +# starts stay under Slack's 3s ack budget. Cloud Run / local still run migrations here. +if not os.environ.get("AWS_LAMBDA_FUNCTION_NAME"): + initialize_database() app = App( process_before_response=not LOCAL_DEVELOPMENT, @@ -113,7 +116,25 @@ def handler(event: dict, context: dict) -> dict: Receives an API Gateway proxy event. Federation API paths (``/api/federation/*``) are handled directly; everything else is delegated to the Slack Bolt request handler. + + Also handles post-deploy ``{"action": "migrate"}`` (Alembic) and EventBridge + keep-warm invokes before Slack routing. """ + if event.get("action") == "migrate": + initialize_database() + return { + "statusCode": 200, + "headers": {"Content-Type": "application/json"}, + "body": json.dumps({"status": "ok", "action": "migrate"}), + } + + if event.get("source") in ("aws.scheduler", "aws.events"): + return { + "statusCode": 200, + "headers": {"Content-Type": "application/json"}, + "body": json.dumps({"status": "ok", "action": "warmup"}), + } + path = event.get("path", "") or event.get("rawPath", "") if path.startswith("/api/federation"): return _lambda_federation_handler(event) diff --git a/tests/test_app_main_response.py b/tests/test_app_main_response.py index 359379b..e39c4df 100644 --- a/tests/test_app_main_response.py +++ b/tests/test_app_main_response.py @@ -1,5 +1,6 @@ """Unit tests for syncbot.app.view_ack and main_response (ack + lazy work).""" +import json import os from unittest.mock import MagicMock, patch @@ -118,3 +119,36 @@ def handler(b, c, log, ctx): app_module.main_response(_body_view_submit(cid), MagicMock(), MagicMock(), ack, context) ack.assert_not_called() + + +class TestLambdaHandler: + """AWS Lambda :func:`~app.handler` branches (migrate, warmup, Slack).""" + + def test_handler_migrate_event_calls_initialize_database(self): + with patch.object(app_module, "initialize_database") as mock_init: + result = app_module.handler({"action": "migrate"}, {}) + mock_init.assert_called_once() + assert result["statusCode"] == 200 + assert json.loads(result["body"]) == {"status": "ok", "action": "migrate"} + + def test_handler_warmup_scheduler_returns_ok(self): + with patch.object(app_module, "SlackRequestHandler") as mock_srh: + result = app_module.handler({"source": "aws.scheduler"}, {}) + mock_srh.assert_not_called() + assert result["statusCode"] == 200 + assert json.loads(result["body"]) == {"status": "ok", "action": "warmup"} + + def test_handler_warmup_events_returns_ok(self): + with patch.object(app_module, "SlackRequestHandler") as mock_srh: + result = app_module.handler({"source": "aws.events"}, {}) + mock_srh.assert_not_called() + assert result["statusCode"] == 200 + assert json.loads(result["body"])["action"] == "warmup" + + def test_handler_slack_event_delegates_to_bolt(self): + mock_handle = MagicMock(return_value={"statusCode": 200, "body": "{}"}) + with patch.object(app_module, "SlackRequestHandler") as mock_srh_class: + mock_srh_class.return_value.handle = mock_handle + app_module.handler({"httpMethod": "POST", "path": "/slack/events", "body": "{}"}, {}) + mock_srh_class.assert_called_once_with(app=app_module.app) + mock_handle.assert_called_once() From 7d69be8a7dca2b55023298c0753f1112d4061a72 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sat, 28 Mar 2026 11:14:45 -0500 Subject: [PATCH 12/19] Simplified CHANGELOG. --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 24aea94..b5cbcd1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,10 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Bumped GitHub Actions: `actions/checkout` v6, `actions/setup-python` v6, `actions/upload-artifact` v7, `actions/download-artifact` v8, `aws-actions/configure-aws-credentials` v6 - Dependabot: ignore semver-major updates for the Docker `python` image (keeps base image on Python 3.12.x line) -- AWS Lambda: Alembic migrations run via a post-deploy `{"action":"migrate"}` invoke (GitHub Actions after `sam deploy`) instead of on every cold start, keeping Slack interaction acks under the 3s budget; Cloud Run and local dev still run migrations at startup +- AWS Lambda: Alembic migrations now run via a post-deploy invoke instead of on every cold start, fixing Slack ack timeouts after deployment; Cloud Run and local dev unchanged - AWS Lambda memory increased from 128 MB to 256 MB for faster cold starts -- EventBridge keep-warm ScheduleV2 invokes return a clean JSON response from `app.handler` instead of falling through to the Slack Bolt handler -- AWS bootstrap IAM deploy policy: added `lambda:InvokeFunction` on `syncbot-*` functions so CI and the guided deploy script can run the post-deploy migrate invoke (re-sync the bootstrap stack to pick this up) +- EventBridge keep-warm invokes now return a clean JSON response instead of falling through to Slack Bolt +- AWS bootstrap deploy policy: added `lambda:InvokeFunction` -- **re-run the deploy script (Bootstrap task) or `aws cloudformation deploy` the bootstrap stack to pick up this permission** ### Fixed From d0374a125c25d538228eee3971aae28a1574be42 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sat, 28 Mar 2026 11:18:58 -0500 Subject: [PATCH 13/19] Minor poetry lock update. --- poetry.lock | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/poetry.lock b/poetry.lock index c06148d..54d08ad 100644 --- a/poetry.lock +++ b/poetry.lock @@ -22,18 +22,18 @@ tz = ["tzdata"] [[package]] name = "boto3" -version = "1.42.76" +version = "1.42.78" description = "The AWS SDK for Python" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "boto3-1.42.76-py3-none-any.whl", hash = "sha256:63c6779c814847016b89ae1b72ed968f8a63d80e589ba337511aa6fc1b59585e"}, - {file = "boto3-1.42.76.tar.gz", hash = "sha256:aa2b1973eee8973a9475d24bb579b1dee7176595338d4e4f7880b5c6189b8814"}, + {file = "boto3-1.42.78-py3-none-any.whl", hash = "sha256:480a34a077484a5ca60124dfd150ba3ea6517fc89963a679e45b30c6db614d26"}, + {file = "boto3-1.42.78.tar.gz", hash = "sha256:cef2ebdb9be5c0e96822f8d3941ac4b816c90a5737a7ffb901d664c808964b63"}, ] [package.dependencies] -botocore = ">=1.42.76,<1.43.0" +botocore = ">=1.42.78,<1.43.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.16.0,<0.17.0" @@ -42,14 +42,14 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.42.76" +version = "1.42.78" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "botocore-1.42.76-py3-none-any.whl", hash = "sha256:151e714ae3c32f68ea0b4dc60751401e03f84a87c6cf864ea0ee64aa10eb4607"}, - {file = "botocore-1.42.76.tar.gz", hash = "sha256:c553fa0ae29e36a5c407f74da78b78404b81b74b15fb62bf640a3cd9385f0874"}, + {file = "botocore-1.42.78-py3-none-any.whl", hash = "sha256:038ab63c7f898e8b5db58cb6a45e4da56c31dd984e7e995839a3540c735564ea"}, + {file = "botocore-1.42.78.tar.gz", hash = "sha256:61cbd49728e23f68cfd945406ab40044d49abed143362f7ffa4a4f4bd4311791"}, ] [package.dependencies] From 7abfa2a412bce04b9a6a33f4f26a8bfec43d566a Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sat, 28 Mar 2026 11:20:39 -0500 Subject: [PATCH 14/19] Added notes about naming conventions to CONTRIBUTING. --- CONTRIBUTING.md | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 426d16e..2c81c75 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing -Thanks for helping improve SyncBot. +Thanks for helping to improve SyncBot! ## Branching (upstream vs downstream) @@ -11,7 +11,27 @@ The **upstream** repository ([F3Nation-Community/syncbot](https://github.com/F3N | **`main`** | Tracks upstream. Use it to merge PRs and to **sync with the upstream repository** (`git pull upstream main`, etc.). | | **`test`** / **`prod`** | On your fork, use these for **deployments**: GitHub Actions deploy workflows run on **push** to `test` and `prod` (see [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md)). | -Typical flow: develop on a feature branch → open a PR to **`main`** → merge → when ready to deploy, merge **`main`** into **`test`** or **`prod`** on your fork. +Typical flow: develop a fix or new feature on a branch in your repo → test and deploy to your infra → open a PR to **`upstream/main`**. + +### Branch Naming Conventions + +Format: `/` or `/-` + +Types: + +- feature/ New functionality +- bugfix/ Bug fixes for existing features +- hotfix/ Urgent production issues +- refactor/ Code improvements without behavior changes +- docs/ Documentation only changes +- chore/ Build process, dependency updates, etc. + +Rules: + +- Use lowercase +- Separate words with hyphens +- Keep descriptions under 50 characters +- Be specific: feature/user-auth not feature/auth ## Workflow From a0f32de4e3c63a80ca89a79d825fbb8dd8708506 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sat, 28 Mar 2026 13:29:58 -0500 Subject: [PATCH 15/19] Improved deploy script when vars conflict between local and cloud. --- infra/aws/scripts/deploy.sh | 155 ++++++++++++++++++++++++++---------- 1 file changed, 113 insertions(+), 42 deletions(-) diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index 1f3c6b6..7b5192c 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -97,6 +97,92 @@ required_from_env_or_prompt() { fi } +# When local env overrides differ from the CloudFormation stack (e.g. GitHub-deployed TiDB vs .env RDS), +# prompt the operator instead of silently preferring env. +resolve_with_conflict_check() { + local label="$1" + local env_value="$2" + local stack_value="$3" + local prompt_default_value="$4" + local mode="${5:-plain}" # plain|secret|bool + + if [[ -z "$env_value" ]]; then + if [[ "$mode" == "secret" ]]; then + prompt_secret_required "$label" + elif [[ "$mode" == "bool" ]]; then + local yn_def="${prompt_default_value:-y}" + if prompt_yes_no "$label" "$yn_def"; then + echo "true" + else + echo "false" + fi + else + prompt_default "$label" "$prompt_default_value" + fi + return 0 + fi + + if [[ "$mode" == "bool" ]]; then + if [[ "$env_value" != "true" && "$env_value" != "false" ]]; then + echo "Error: environment value for $label must be true or false." >&2 + exit 1 + fi + if [[ -z "$stack_value" || "$env_value" == "$stack_value" ]]; then + echo "Using $label from environment variable." >&2 + echo "$env_value" + return 0 + fi + echo "" >&2 + echo "CONFLICT: $label differs between local env and deployed stack:" >&2 + echo " Local env: $env_value" >&2 + echo " AWS stack: $stack_value" >&2 + local choice + read -r -p "Use (l)ocal env / (a)ws stack / (e)nter new value? [l/a/e]: " choice >&2 + case "$choice" in + a | A) echo "$stack_value" ;; + e | E) + if prompt_yes_no "$label" "${prompt_default_value:-y}"; then + echo "true" + else + echo "false" + fi + ;; + *) echo "$env_value" ;; + esac + return 0 + fi + + if [[ -z "$stack_value" || "$env_value" == "$stack_value" ]]; then + echo "Using $label from environment variable." >&2 + echo "$env_value" + return 0 + fi + + local display_env="$env_value" + local display_stack="$stack_value" + if [[ "$mode" == "secret" ]]; then + display_env="(hidden)" + display_stack="(hidden)" + fi + echo "" >&2 + echo "CONFLICT: $label differs between local env and deployed stack:" >&2 + echo " Local env: $display_env" >&2 + echo " AWS stack: $display_stack" >&2 + local choice + read -r -p "Use (l)ocal env / (a)ws stack / (e)nter new value? [l/a/e]: " choice >&2 + case "$choice" in + a | A) echo "$stack_value" ;; + e | E) + if [[ "$mode" == "secret" ]]; then + prompt_secret_required "$label" + else + prompt_required "$label" + fi + ;; + *) echo "$env_value" ;; + esac +} + prompt_yes_no() { local prompt="$1" local default="${2:-y}" @@ -1411,12 +1497,11 @@ if [[ "$DB_MODE" == "2" ]]; then EXISTING_DATABASE_ADMIN_USER_DEFAULT="admin" [[ -n "$PREV_EXISTING_DATABASE_ADMIN_USER" ]] && EXISTING_DATABASE_ADMIN_USER_DEFAULT="$PREV_EXISTING_DATABASE_ADMIN_USER" - if [[ -n "$ENV_EXISTING_DATABASE_HOST" ]]; then - echo "Using ExistingDatabaseHost from environment variable EXISTING_DATABASE_HOST." - EXISTING_DATABASE_HOST="$ENV_EXISTING_DATABASE_HOST" - else - EXISTING_DATABASE_HOST="$(prompt_default "ExistingDatabaseHost (RDS endpoint hostname)" "$EXISTING_DATABASE_HOST_DEFAULT")" - fi + EXISTING_DATABASE_HOST="$(resolve_with_conflict_check \ + "ExistingDatabaseHost (RDS endpoint hostname)" \ + "$ENV_EXISTING_DATABASE_HOST" \ + "$PREV_EXISTING_DATABASE_HOST" \ + "$EXISTING_DATABASE_HOST_DEFAULT")" DETECTED_ADMIN_USER="" DETECTED_ADMIN_SECRET_ARN="" @@ -1432,12 +1517,11 @@ if [[ "$DB_MODE" == "2" ]]; then if [[ -z "$EXISTING_DATABASE_ADMIN_USER_DEFAULT" || "$EXISTING_DATABASE_ADMIN_USER_DEFAULT" == "admin" ]]; then [[ -n "$DETECTED_ADMIN_USER" ]] && EXISTING_DATABASE_ADMIN_USER_DEFAULT="$DETECTED_ADMIN_USER" fi - if [[ -n "$ENV_EXISTING_DATABASE_ADMIN_USER" ]]; then - echo "Using ExistingDatabaseAdminUser from environment variable EXISTING_DATABASE_ADMIN_USER." - EXISTING_DATABASE_ADMIN_USER="$ENV_EXISTING_DATABASE_ADMIN_USER" - else - EXISTING_DATABASE_ADMIN_USER="$(prompt_default "ExistingDatabaseAdminUser" "$EXISTING_DATABASE_ADMIN_USER_DEFAULT")" - fi + EXISTING_DATABASE_ADMIN_USER="$(resolve_with_conflict_check \ + "ExistingDatabaseAdminUser" \ + "$ENV_EXISTING_DATABASE_ADMIN_USER" \ + "$PREV_EXISTING_DATABASE_ADMIN_USER" \ + "$EXISTING_DATABASE_ADMIN_USER_DEFAULT")" if [[ -n "$ENV_EXISTING_DATABASE_ADMIN_PASSWORD" ]]; then echo "Using ExistingDatabaseAdminPassword from environment variable EXISTING_DATABASE_ADMIN_PASSWORD." @@ -1465,12 +1549,11 @@ if [[ "$DB_MODE" == "2" ]]; then echo "Leave port blank to use the engine default (3306 MySQL, 5432 PostgreSQL)." DEFAULT_EXISTING_DB_PORT="" [[ -n "$PREV_EXISTING_DATABASE_PORT" ]] && DEFAULT_EXISTING_DB_PORT="$PREV_EXISTING_DATABASE_PORT" - if [[ -n "$ENV_EXISTING_DATABASE_PORT" ]]; then - echo "Using ExistingDatabasePort from environment variable EXISTING_DATABASE_PORT." - EXISTING_DATABASE_PORT="$ENV_EXISTING_DATABASE_PORT" - else - EXISTING_DATABASE_PORT="$(prompt_default "ExistingDatabasePort (optional)" "$DEFAULT_EXISTING_DB_PORT")" - fi + EXISTING_DATABASE_PORT="$(resolve_with_conflict_check \ + "ExistingDatabasePort (optional)" \ + "$ENV_EXISTING_DATABASE_PORT" \ + "$PREV_EXISTING_DATABASE_PORT" \ + "$DEFAULT_EXISTING_DB_PORT")" if [[ "$DATABASE_ENGINE" == "mysql" && "$EXISTING_DATABASE_PORT" == "3306" ]]; then EXISTING_DATABASE_PORT="" fi @@ -1483,33 +1566,21 @@ if [[ "$DB_MODE" == "2" ]]; then CREATE_APP_DEFAULT="y" [[ "${PREV_EXISTING_DATABASE_CREATE_APP_USER:-}" == "false" ]] && CREATE_APP_DEFAULT="n" - if [[ -n "$ENV_EXISTING_DATABASE_CREATE_APP_USER" ]]; then - echo "Using ExistingDatabaseCreateAppUser from environment variable EXISTING_DATABASE_CREATE_APP_USER." - EXISTING_DATABASE_CREATE_APP_USER="$ENV_EXISTING_DATABASE_CREATE_APP_USER" - if [[ "$EXISTING_DATABASE_CREATE_APP_USER" != "true" && "$EXISTING_DATABASE_CREATE_APP_USER" != "false" ]]; then - echo "Error: EXISTING_DATABASE_CREATE_APP_USER must be true or false." >&2 - exit 1 - fi - elif prompt_yes_no "Create dedicated app DB user (CREATE USER / grants)?" "$CREATE_APP_DEFAULT"; then - EXISTING_DATABASE_CREATE_APP_USER="true" - else - EXISTING_DATABASE_CREATE_APP_USER="false" - fi + EXISTING_DATABASE_CREATE_APP_USER="$(resolve_with_conflict_check \ + "Create dedicated app DB user (CREATE USER / grants)?" \ + "$ENV_EXISTING_DATABASE_CREATE_APP_USER" \ + "${PREV_EXISTING_DATABASE_CREATE_APP_USER:-}" \ + "$CREATE_APP_DEFAULT" \ + bool)" CREATE_SCHEMA_DEFAULT="y" [[ "${PREV_EXISTING_DATABASE_CREATE_SCHEMA:-}" == "false" ]] && CREATE_SCHEMA_DEFAULT="n" - if [[ -n "$ENV_EXISTING_DATABASE_CREATE_SCHEMA" ]]; then - echo "Using ExistingDatabaseCreateSchema from environment variable EXISTING_DATABASE_CREATE_SCHEMA." - EXISTING_DATABASE_CREATE_SCHEMA="$ENV_EXISTING_DATABASE_CREATE_SCHEMA" - if [[ "$EXISTING_DATABASE_CREATE_SCHEMA" != "true" && "$EXISTING_DATABASE_CREATE_SCHEMA" != "false" ]]; then - echo "Error: EXISTING_DATABASE_CREATE_SCHEMA must be true or false." >&2 - exit 1 - fi - elif prompt_yes_no "Run CREATE DATABASE IF NOT EXISTS for DatabaseSchema?" "$CREATE_SCHEMA_DEFAULT"; then - EXISTING_DATABASE_CREATE_SCHEMA="true" - else - EXISTING_DATABASE_CREATE_SCHEMA="false" - fi + EXISTING_DATABASE_CREATE_SCHEMA="$(resolve_with_conflict_check \ + "Run CREATE DATABASE IF NOT EXISTS for DatabaseSchema?" \ + "$ENV_EXISTING_DATABASE_CREATE_SCHEMA" \ + "${PREV_EXISTING_DATABASE_CREATE_SCHEMA:-}" \ + "$CREATE_SCHEMA_DEFAULT" \ + bool)" if [[ -z "$EXISTING_DATABASE_HOST" || "$EXISTING_DATABASE_HOST" == REPLACE_ME* ]]; then echo "Error: valid ExistingDatabaseHost is required for existing DB mode." >&2 From bab1e497009274668668e5d4fddd5376fe005b7f Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sat, 28 Mar 2026 13:59:19 -0500 Subject: [PATCH 16/19] Adjusted app DB user creation to better accomodate other DB providers like TiDB. --- .github/workflows/deploy-aws.yml | 2 ++ CHANGELOG.md | 1 + docs/DEPLOYMENT.md | 3 ++- docs/INFRA_CONTRACT.md | 2 +- infra/aws/db_setup/handler.py | 14 +++++++++++--- infra/aws/scripts/deploy.sh | 27 ++++++++++++++++++++++++++- infra/aws/template.yaml | 12 ++++++++++++ infra/gcp/README.md | 1 + infra/gcp/main.tf | 5 ++++- infra/gcp/scripts/deploy.sh | 13 ++++++++++--- infra/gcp/variables.tf | 8 +++++++- tests/test_db_setup.py | 25 +++++++++++++++++++++++++ 12 files changed, 102 insertions(+), 11 deletions(-) diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index bcebfed..0512c10 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -106,6 +106,7 @@ jobs: ExistingDatabasePort=${{ vars.EXISTING_DATABASE_PORT }} \ ExistingDatabaseCreateAppUser=${{ vars.EXISTING_DATABASE_CREATE_APP_USER || 'true' }} \ ExistingDatabaseCreateSchema=${{ vars.EXISTING_DATABASE_CREATE_SCHEMA || 'true' }} \ + ExistingDatabaseAppUsernamePrefix=${{ vars.EXISTING_DATABASE_APP_USERNAME_PREFIX }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ RequireAdmin=${{ vars.REQUIRE_ADMIN || 'true' }} \ @@ -171,6 +172,7 @@ jobs: ExistingDatabasePort=${{ vars.EXISTING_DATABASE_PORT }} \ ExistingDatabaseCreateAppUser=${{ vars.EXISTING_DATABASE_CREATE_APP_USER || 'true' }} \ ExistingDatabaseCreateSchema=${{ vars.EXISTING_DATABASE_CREATE_SCHEMA || 'true' }} \ + ExistingDatabaseAppUsernamePrefix=${{ vars.EXISTING_DATABASE_APP_USERNAME_PREFIX }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ RequireAdmin=${{ vars.REQUIRE_ADMIN || 'true' }} \ diff --git a/CHANGELOG.md b/CHANGELOG.md index 6229a9f..13f7cd3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - `ExistingDatabasePort`, `ExistingDatabaseCreateAppUser`, and `ExistingDatabaseCreateSchema` deploy parameters for external DB providers (e.g. TiDB Cloud) +- `ExistingDatabaseAppUsernamePrefix` (AWS) / `existing_db_app_username_prefix` (GCP): optional prefix so the dedicated app user is `{prefix}syncbot_user_{stage}` (required for TiDB Cloud Serverless username rules) ## [1.0.1] - 2026-03-26 diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index ced1b93..ce93b23 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -48,7 +48,7 @@ Runs from repo root (or via `./deploy.sh` → **aws**). It: 3. **Bootstrap probe** — Reads bootstrap stack outputs if the stack exists (for suggested stack names and later CI/CD). Full **bootstrap** create/sync runs only if you select it in **Deploy Tasks** (see below). 4. **App stack identity** — Prompts for stage (`test`/`prod`) and stack name; detects an existing CloudFormation stack for update. 5. **Deploy Tasks** — Multi-select menu (comma-separated, default all): **Bootstrap** (create/sync bootstrap stack; respects `SYNCBOT_SKIP_BOOTSTRAP_SYNC=1` for sync), **Build/Deploy** (full config + SAM), **CI/CD** (`gh` / GitHub Actions), **Slack API**, **Backup Secrets** (DR plaintext echo). Omitting **Build/Deploy** requires an existing stack for tasks that need live outputs. -6. **Configuration** (if Build/Deploy selected) — **Database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, optional **ExistingDatabasePort** (blank = engine default; use for non-standard ports e.g. TiDB **4000**), whether to **create a dedicated app DB user** and whether to run **`CREATE DATABASE IF NOT EXISTS`**, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight** using the effective DB port). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. Optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), **deploy summary**, then **SAM build** (`--use-container`) and **sam deploy**. +6. **Configuration** (if Build/Deploy selected) — **Database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, optional **ExistingDatabasePort** (blank = engine default; use for non-standard ports e.g. TiDB **4000**), optional **ExistingDatabaseAppUsernamePrefix** (e.g. TiDB Cloud cluster prefix including trailing dot; app user becomes `{prefix}syncbot_user_{stage}`), whether to **create a dedicated app DB user** and whether to run **`CREATE DATABASE IF NOT EXISTS`**, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight** using the effective DB port). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. Optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), **deploy summary**, then **SAM build** (`--use-container`) and **sam deploy**. 7. **Post-deploy** — According to selected tasks: stack outputs, `slack-manifest_.json`, Slack API, **`gh`** setup, deploy receipt under `deploy-receipts/` (gitignored), and DR backup lines. ### GCP: `infra/gcp/scripts/deploy.sh` @@ -178,6 +178,7 @@ Configure **per-environment** (`test` / `prod`) variables and secrets so they ma | Var | `EXISTING_DATABASE_PORT` | Optional; non-standard TCP port (e.g. `4000`). Empty = engine default in SAM. | | Var | `EXISTING_DATABASE_CREATE_APP_USER` | `true` / `false` (default `true`). Set `false` when the DB cannot create a dedicated app user. | | Var | `EXISTING_DATABASE_CREATE_SCHEMA` | `true` / `false` (default `true`). Set `false` when the database/schema already exists. | +| Var | `EXISTING_DATABASE_APP_USERNAME_PREFIX` | Optional. Provider-specific username prefix (e.g. TiDB Cloud `42bvZAUSurKwhxc.`). Empty for RDS/standard MySQL. | | Secret | `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET` | | | Secret | `EXISTING_DATABASE_ADMIN_PASSWORD` | When `EXISTING_DATABASE_HOST` is set | | Secret | `TOKEN_ENCRYPTION_KEY_OVERRIDE` | Optional DR only | diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index 5812402..7674327 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -28,7 +28,7 @@ The application reads configuration from environment variables. Providers must i | `DATABASE_URL` | Full SQLAlchemy URL. When set, overrides host/user/password/schema. **Required for SQLite** (e.g. `sqlite:///path/to/syncbot.db`). For `mysql` / `postgresql`, optional if unset (legacy vars below are used). | | `DATABASE_HOST` | Database hostname (IP or FQDN). Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | | `DATABASE_PORT` | Optional. Defaults to **5432** for `postgresql`, **3306** for `mysql`. Set explicitly for external providers that use a non-standard port (e.g. TiDB Cloud **4000**). | -| `DATABASE_USER` | Username. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | +| `DATABASE_USER` | Username. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. Some providers (e.g. TiDB Cloud Serverless) require a cluster-specific prefix on every SQL user; AWS SAM exposes **`ExistingDatabaseAppUsernamePrefix`** so the bootstrap Lambda can create `{prefix}syncbot_user_{stage}`. On GCP with **`existing_db_app_username_prefix`** set, Terraform sets `DATABASE_USER` the same way and ignores **`existing_db_user`**. | | `DATABASE_PASSWORD` | Password. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | | `DATABASE_SCHEMA` | Database name (MySQL) or PostgreSQL database name (same convention as MySQL). Use alphanumeric and underscore only for PostgreSQL when the app must `CREATE DATABASE` at bootstrap. | | `DATABASE_TLS_ENABLED` | Optional TLS toggle (`true`/`false`). Defaults to enabled outside local dev. | diff --git a/infra/aws/db_setup/handler.py b/infra/aws/db_setup/handler.py index 16f380c..8f8e108 100644 --- a/infra/aws/db_setup/handler.py +++ b/infra/aws/db_setup/handler.py @@ -78,6 +78,13 @@ def _safe_ident(name: str) -> str: return name +def _safe_username(name: str) -> str: + """Validate a database username. Allows dots for provider prefixes (e.g. TiDB Cloud).""" + if not re.match(r"^[A-Za-z0-9_][A-Za-z0-9_.]*$", name): + raise ValueError(f"Invalid username: {name}") + return name + + def _default_port(database_engine: str) -> int: return 3306 if database_engine == "mysql" else 5432 @@ -147,7 +154,8 @@ def _handler_impl(event, context): ) return - app_username = f"syncbot_user_{stage}".replace("-", "_") + app_username_prefix = (props.get("AppUsernamePrefix") or "").strip() + app_username = f"{app_username_prefix}syncbot_user_{stage}".replace("-", "_") app_password = "" if create_app_user: try: @@ -268,7 +276,7 @@ def setup_database_mysql( ) -> None: safe_schema = _safe_ident(schema) if create_app_user: - _safe_ident(app_username) + _safe_username(app_username) conn = None last_exc = None for _attempt in range(1, DB_CONNECT_ATTEMPTS + 1): @@ -323,7 +331,7 @@ def setup_database_postgresql( db_connect_retry_seconds = POSTGRES_DB_CONNECT_RETRY_SECONDS _safe_ident(schema) if create_app_user: - _safe_ident(app_username) + _safe_username(app_username) _safe_ident(admin_user) conn = psycopg2.connect( diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index 7b5192c..94c21c7 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -420,6 +420,7 @@ configure_github_actions_aws() { # $15 Existing DB port (empty = engine default in SAM) # $16 ExistingDatabaseCreateAppUser: true | false # $17 ExistingDatabaseCreateSchema: true | false + # $18 ExistingDatabaseAppUsernamePrefix (e.g. TiDB cluster prefix; empty for RDS) local bootstrap_outputs="$1" local bootstrap_stack_name="$2" local aws_region="$3" @@ -439,6 +440,7 @@ configure_github_actions_aws() { local existing_db_port="${15:-}" local existing_db_create_app_user="${16:-true}" local existing_db_create_schema="${17:-true}" + local existing_db_app_username_prefix="${18:-}" [[ -z "$existing_db_create_app_user" ]] && existing_db_create_app_user="true" [[ -z "$existing_db_create_schema" ]] && existing_db_create_schema="true" local role bucket boot_region @@ -500,6 +502,7 @@ configure_github_actions_aws() { gh variable set EXISTING_DATABASE_PORT --env "$env_name" --body "$existing_db_port" -R "$repo" gh variable set EXISTING_DATABASE_CREATE_APP_USER --env "$env_name" --body "$existing_db_create_app_user" -R "$repo" gh variable set EXISTING_DATABASE_CREATE_SCHEMA --env "$env_name" --body "$existing_db_create_schema" -R "$repo" + gh variable set EXISTING_DATABASE_APP_USERNAME_PREFIX --env "$env_name" --body "$existing_db_app_username_prefix" -R "$repo" else # Clear existing-host vars for new-RDS mode to avoid stale CI config. gh variable set EXISTING_DATABASE_HOST --env "$env_name" --body "" -R "$repo" @@ -510,6 +513,7 @@ configure_github_actions_aws() { gh variable set EXISTING_DATABASE_PORT --env "$env_name" --body "" -R "$repo" gh variable set EXISTING_DATABASE_CREATE_APP_USER --env "$env_name" --body "true" -R "$repo" gh variable set EXISTING_DATABASE_CREATE_SCHEMA --env "$env_name" --body "true" -R "$repo" + gh variable set EXISTING_DATABASE_APP_USERNAME_PREFIX --env "$env_name" --body "" -R "$repo" fi echo "Environment variables updated for '$env_name'." fi @@ -1292,6 +1296,7 @@ PREV_EXISTING_DATABASE_LAMBDA_SG_ID="" PREV_EXISTING_DATABASE_PORT="" PREV_EXISTING_DATABASE_CREATE_APP_USER="" PREV_EXISTING_DATABASE_CREATE_SCHEMA="" +PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX="" PREV_DATABASE_ENGINE="" PREV_DATABASE_SCHEMA="" PREV_LOG_LEVEL="" @@ -1323,6 +1328,7 @@ if [[ -n "$EXISTING_STACK_STATUS" && "$EXISTING_STACK_STATUS" != "None" ]]; then PREV_EXISTING_DATABASE_PORT="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabasePort")" PREV_EXISTING_DATABASE_CREATE_APP_USER="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseCreateAppUser")" PREV_EXISTING_DATABASE_CREATE_SCHEMA="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseCreateSchema")" + PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseAppUsernamePrefix")" PREV_DATABASE_ENGINE="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseEngine")" PREV_DATABASE_SCHEMA="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseSchema")" PREV_LOG_LEVEL="$(stack_param_value "$EXISTING_STACK_PARAMS" "LogLevel")" @@ -1472,6 +1478,7 @@ ENV_EXISTING_DATABASE_ADMIN_PASSWORD="${EXISTING_DATABASE_ADMIN_PASSWORD:-}" ENV_EXISTING_DATABASE_PORT="${EXISTING_DATABASE_PORT:-}" ENV_EXISTING_DATABASE_CREATE_APP_USER="${EXISTING_DATABASE_CREATE_APP_USER:-}" ENV_EXISTING_DATABASE_CREATE_SCHEMA="${EXISTING_DATABASE_CREATE_SCHEMA:-}" +ENV_EXISTING_DATABASE_APP_USERNAME_PREFIX="${EXISTING_DATABASE_APP_USERNAME_PREFIX:-}" EXISTING_DB_ADMIN_PASSWORD_SOURCE="prompt" EXISTING_DATABASE_HOST="" EXISTING_DATABASE_ADMIN_USER="" @@ -1482,6 +1489,7 @@ EXISTING_DATABASE_LAMBDA_SG_ID="" EXISTING_DATABASE_PORT="" EXISTING_DATABASE_CREATE_APP_USER="true" EXISTING_DATABASE_CREATE_SCHEMA="true" +EXISTING_DATABASE_APP_USERNAME_PREFIX="" EXISTING_DB_EFFECTIVE_PORT="" DATABASE_SCHEMA="" DATABASE_SCHEMA_DEFAULT="syncbot_${STAGE}" @@ -1582,6 +1590,14 @@ if [[ "$DB_MODE" == "2" ]]; then "$CREATE_SCHEMA_DEFAULT" \ bool)" + EXISTING_DATABASE_APP_USERNAME_PREFIX_DEFAULT="" + [[ -n "$PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX" ]] && EXISTING_DATABASE_APP_USERNAME_PREFIX_DEFAULT="$PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX" + EXISTING_DATABASE_APP_USERNAME_PREFIX="$(resolve_with_conflict_check \ + "App username prefix (e.g. abc123. for TiDB Cloud; blank for RDS/standard)" \ + "$ENV_EXISTING_DATABASE_APP_USERNAME_PREFIX" \ + "$PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX" \ + "$EXISTING_DATABASE_APP_USERNAME_PREFIX_DEFAULT")" + if [[ -z "$EXISTING_DATABASE_HOST" || "$EXISTING_DATABASE_HOST" == REPLACE_ME* ]]; then echo "Error: valid ExistingDatabaseHost is required for existing DB mode." >&2 exit 1 @@ -1760,6 +1776,11 @@ if [[ "$DB_MODE" == "2" ]]; then echo "DB port: ${EXISTING_DB_EFFECTIVE_PORT:-engine default}" echo "DB create user: $EXISTING_DATABASE_CREATE_APP_USER" echo "DB create schema: $EXISTING_DATABASE_CREATE_SCHEMA" + if [[ -n "$EXISTING_DATABASE_APP_USERNAME_PREFIX" ]]; then + echo "DB app user prefix: $EXISTING_DATABASE_APP_USERNAME_PREFIX (app user: ${EXISTING_DATABASE_APP_USERNAME_PREFIX}syncbot_user_${STAGE//-/_})" + else + echo "DB app user prefix: (none — app user: syncbot_user_${STAGE//-/_})" + fi echo "DB schema: $DATABASE_SCHEMA" else echo "DB mode: create new RDS" @@ -1839,6 +1860,7 @@ if [[ "$DB_MODE" == "2" ]]; then PARAMS+=( "ExistingDatabaseCreateAppUser=$EXISTING_DATABASE_CREATE_APP_USER" "ExistingDatabaseCreateSchema=$EXISTING_DATABASE_CREATE_SCHEMA" + "ExistingDatabaseAppUsernamePrefix=$EXISTING_DATABASE_APP_USERNAME_PREFIX" ) else # Clear existing-host parameters on updates to avoid stale previous values. @@ -1853,6 +1875,7 @@ else "ParameterKey=ExistingDatabasePort,ParameterValue=" "ExistingDatabaseCreateAppUser=true" "ExistingDatabaseCreateSchema=true" + "ParameterKey=ExistingDatabaseAppUsernamePrefix,ParameterValue=" ) fi @@ -1900,6 +1923,7 @@ else EXISTING_DATABASE_PORT="${PREV_EXISTING_DATABASE_PORT:-}" EXISTING_DATABASE_CREATE_APP_USER="${PREV_EXISTING_DATABASE_CREATE_APP_USER:-true}" EXISTING_DATABASE_CREATE_SCHEMA="${PREV_EXISTING_DATABASE_CREATE_SCHEMA:-true}" + EXISTING_DATABASE_APP_USERNAME_PREFIX="${PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX:-}" [[ -z "$EXISTING_DATABASE_CREATE_APP_USER" ]] && EXISTING_DATABASE_CREATE_APP_USER="true" [[ -z "$EXISTING_DATABASE_CREATE_SCHEMA" ]] && EXISTING_DATABASE_CREATE_SCHEMA="true" SLACK_SIGNING_SECRET="${SLACK_SIGNING_SECRET:-}" @@ -1951,7 +1975,8 @@ if [[ "$TASK_CICD" == "true" ]]; then "$DATABASE_ENGINE" \ "${EXISTING_DATABASE_PORT:-}" \ "${EXISTING_DATABASE_CREATE_APP_USER:-true}" \ - "${EXISTING_DATABASE_CREATE_SCHEMA:-true}" + "${EXISTING_DATABASE_CREATE_SCHEMA:-true}" \ + "${EXISTING_DATABASE_APP_USERNAME_PREFIX:-}" fi if [[ "$TASK_BUILD_DEPLOY" == "true" || "$TASK_BACKUP_SECRETS" == "true" ]]; then diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index 43555cf..194a0e4 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -153,6 +153,14 @@ Parameters: - "true" - "false" + ExistingDatabaseAppUsernamePrefix: + Description: > + Username prefix required by some managed DB providers (e.g. TiDB Cloud cluster prefix "abc123."). + Include the trailing dot/separator. The app username becomes {prefix}syncbot_user_{Stage}. + Leave empty for standard databases (RDS, self-hosted). Ignored when creating new RDS in stack. + Type: String + Default: "" + DatabaseSchema: Description: > Database/schema name for MySQL or PostgreSQL. Each app sharing an RDS instance @@ -673,6 +681,10 @@ Resources: - UseExistingDatabase - !Ref ExistingDatabaseCreateSchema - "true" + AppUsernamePrefix: !If + - UseExistingDatabase + - !Ref ExistingDatabaseAppUsernamePrefix + - "" # ============================================================ # Lambda Function diff --git a/infra/gcp/README.md b/infra/gcp/README.md index 0d37b14..30d5fe1 100644 --- a/infra/gcp/README.md +++ b/infra/gcp/README.md @@ -48,6 +48,7 @@ Minimal Terraform scaffold to run SyncBot on Google Cloud. Satisfies the [infras | `stage` | Stage name, e.g. `test` or `prod` | | `use_existing_database` | If `true`, use `existing_db_*` vars instead of creating Cloud SQL | | `existing_db_host`, `existing_db_schema`, `existing_db_user` | Existing MySQL connection (when `use_existing_database = true`) | +| `existing_db_app_username_prefix` | Optional (e.g. TiDB Cloud `abc123.`). When set, `DATABASE_USER` is `{prefix}syncbot_user_{stage}` and `existing_db_user` is ignored | | `cloud_run_image` | Container image URL for Cloud Run (set after first build) | | `secret_slack_bot_scopes` | Secret Manager secret ID for **bot** OAuth scopes (runtime `SLACK_BOT_SCOPES`; default `syncbot-slack-scopes`). The **secret value** must match `oauth_config.scopes.bot` / `BOT_SCOPES` (same requirement as AWS SAM `SlackOauthBotScopes`). | | `slack_user_scopes` | Plain-text **user** OAuth scopes for Cloud Run (`SLACK_USER_SCOPES`). Default matches repo standard (same comma-separated string as AWS SAM `SlackOauthUserScopes`); must match manifest `oauth_config.scopes.user` and `USER_SCOPES` in `slack_manifest_scopes.py`. | diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf index 950da62..ac47566 100644 --- a/infra/gcp/main.tf +++ b/infra/gcp/main.tf @@ -44,7 +44,10 @@ locals { length(google_sql_database_instance.main) > 0 ? google_sql_database_instance.main[0].public_ip_address : "" ) db_schema = var.use_existing_database ? var.existing_db_schema : "syncbot" - db_user = var.use_existing_database ? var.existing_db_user : "syncbot_app" + stage_syncbot_user = "syncbot_user_${replace(var.stage, "-", "_")}" + db_user = var.use_existing_database ? ( + trimspace(var.existing_db_app_username_prefix) != "" ? "${var.existing_db_app_username_prefix}${local.stage_syncbot_user}" : var.existing_db_user + ) : "syncbot_app" # Non-secret Cloud Run env (see docs/INFRA_CONTRACT.md) syncbot_public_url_effective = trimspace(var.syncbot_public_url_override) != "" ? trimspace(var.syncbot_public_url_override) : "" diff --git a/infra/gcp/scripts/deploy.sh b/infra/gcp/scripts/deploy.sh index 3ed82ad..331cd23 100755 --- a/infra/gcp/scripts/deploy.sh +++ b/infra/gcp/scripts/deploy.sh @@ -605,13 +605,18 @@ fi if [[ "$USE_EXISTING" == "true" ]]; then EXISTING_HOST="$(prompt_line "Existing DB host" "$DETECTED_EXISTING_HOST")" EXISTING_SCHEMA="$(prompt_line "Database schema name" "${DETECTED_EXISTING_SCHEMA:-syncbot}")" - EXISTING_USER="$(prompt_line "Database user" "$DETECTED_EXISTING_USER")" + EXISTING_DB_APP_USERNAME_PREFIX="$(prompt_line "App username prefix (optional; e.g. TiDB Cloud abc123.; blank = enter full DB user next)" "")" + if [[ -n "$EXISTING_DB_APP_USERNAME_PREFIX" ]]; then + EXISTING_USER="" + else + EXISTING_USER="$(prompt_line "Database user" "$DETECTED_EXISTING_USER")" + fi if [[ -z "$EXISTING_HOST" ]]; then echo "Error: Existing DB host is required when using existing database mode." >&2 exit 1 fi - if [[ -z "$EXISTING_USER" ]]; then - echo "Error: Database user is required when using existing database mode." >&2 + if [[ -z "$EXISTING_USER" && -z "$EXISTING_DB_APP_USERNAME_PREFIX" ]]; then + echo "Error: Database user or app username prefix is required when using existing database mode." >&2 exit 1 fi @@ -756,10 +761,12 @@ if [[ "$USE_EXISTING" == "true" ]]; then VARS+=("-var=existing_db_host=$EXISTING_HOST") VARS+=("-var=existing_db_schema=$EXISTING_SCHEMA") VARS+=("-var=existing_db_user=$EXISTING_USER") + VARS+=("-var=existing_db_app_username_prefix=$EXISTING_DB_APP_USERNAME_PREFIX") VARS+=("-var=existing_db_create_app_user=$EXISTING_DB_CREATE_APP_USER") VARS+=("-var=existing_db_create_schema=$EXISTING_DB_CREATE_SCHEMA") else VARS+=("-var=use_existing_database=false") + VARS+=("-var=existing_db_app_username_prefix=") fi VARS+=("-var=cloud_run_image=$CLOUD_IMAGE") diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf index eca6786..6487207 100644 --- a/infra/gcp/variables.tf +++ b/infra/gcp/variables.tf @@ -45,7 +45,13 @@ variable "existing_db_schema" { variable "existing_db_user" { type = string default = "" - description = "Existing MySQL user (when use_existing_database = true)" + description = "Existing MySQL user (when use_existing_database = true). Ignored when existing_db_app_username_prefix is set (DATABASE_USER becomes prefix + syncbot_user_{stage})." +} + +variable "existing_db_app_username_prefix" { + type = string + default = "" + description = "Optional prefix for app DB username (e.g. TiDB Cloud cluster prefix \"abc123.\"). When non-empty, DATABASE_USER is {prefix}syncbot_user_{stage} and existing_db_user is ignored." } variable "existing_db_create_app_user" { diff --git a/tests/test_db_setup.py b/tests/test_db_setup.py index 2db15a2..9cc74cc 100644 --- a/tests/test_db_setup.py +++ b/tests/test_db_setup.py @@ -94,6 +94,31 @@ def test_handler_calls_postgresql_setup(cfn_create_event): assert mock_send.call_args[0][2] == "SUCCESS" +def test_safe_username_accepts_dotted_prefix(): + handler = _fresh_handler() + handler._safe_username("42bvZAUSurKwhxc.syncbot_user_test") + + +def test_safe_ident_rejects_dots(): + handler = _fresh_handler() + with pytest.raises(ValueError, match="Invalid identifier"): + handler._safe_ident("bad.schema") + + +def test_handler_app_username_prefix_passed_to_mysql(cfn_create_event): + cfn_create_event["ResourceProperties"]["AppUsernamePrefix"] = "pre." + handler = _fresh_handler() + with ( + patch.object(handler, "send"), + patch.object(handler, "get_secret_value", return_value="apppw"), + patch.object(handler, "_assert_tcp_reachable"), + patch.object(handler, "setup_database_mysql") as mock_mysql, + patch.object(handler, "setup_database_postgresql"), + ): + handler._handler_impl(cfn_create_event, MagicMock()) + assert mock_mysql.call_args[1]["app_username"] == "pre.syncbot_user_test" + + def test_handler_custom_port_passed_to_tcp_and_mysql(cfn_create_event): cfn_create_event["ResourceProperties"]["Port"] = "4000" handler = _fresh_handler() From 1196b5bee0142db8cf37d80316b465f824082527 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sat, 28 Mar 2026 14:09:27 -0500 Subject: [PATCH 17/19] Changed to automatically put in dot separator. --- docs/DEPLOYMENT.md | 2 +- docs/INFRA_CONTRACT.md | 2 +- infra/aws/db_setup/handler.py | 2 ++ infra/aws/scripts/deploy.sh | 2 +- infra/aws/template.yaml | 4 ++-- infra/gcp/README.md | 2 +- infra/gcp/main.tf | 7 ++++++- infra/gcp/scripts/deploy.sh | 2 +- infra/gcp/variables.tf | 2 +- tests/test_db_setup.py | 16 +++++++++++++++- 10 files changed, 31 insertions(+), 10 deletions(-) diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index ce93b23..7a73612 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -48,7 +48,7 @@ Runs from repo root (or via `./deploy.sh` → **aws**). It: 3. **Bootstrap probe** — Reads bootstrap stack outputs if the stack exists (for suggested stack names and later CI/CD). Full **bootstrap** create/sync runs only if you select it in **Deploy Tasks** (see below). 4. **App stack identity** — Prompts for stage (`test`/`prod`) and stack name; detects an existing CloudFormation stack for update. 5. **Deploy Tasks** — Multi-select menu (comma-separated, default all): **Bootstrap** (create/sync bootstrap stack; respects `SYNCBOT_SKIP_BOOTSTRAP_SYNC=1` for sync), **Build/Deploy** (full config + SAM), **CI/CD** (`gh` / GitHub Actions), **Slack API**, **Backup Secrets** (DR plaintext echo). Omitting **Build/Deploy** requires an existing stack for tasks that need live outputs. -6. **Configuration** (if Build/Deploy selected) — **Database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, optional **ExistingDatabasePort** (blank = engine default; use for non-standard ports e.g. TiDB **4000**), optional **ExistingDatabaseAppUsernamePrefix** (e.g. TiDB Cloud cluster prefix including trailing dot; app user becomes `{prefix}syncbot_user_{stage}`), whether to **create a dedicated app DB user** and whether to run **`CREATE DATABASE IF NOT EXISTS`**, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight** using the effective DB port). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. Optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), **deploy summary**, then **SAM build** (`--use-container`) and **sam deploy**. +6. **Configuration** (if Build/Deploy selected) — **Database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, optional **ExistingDatabasePort** (blank = engine default; use for non-standard ports e.g. TiDB **4000**), optional **ExistingDatabaseAppUsernamePrefix** (e.g. TiDB Cloud cluster prefix `abc123`; a dot separator is added automatically; app user becomes `{prefix}.syncbot_user_{stage}`), whether to **create a dedicated app DB user** and whether to run **`CREATE DATABASE IF NOT EXISTS`**, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight** using the effective DB port). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. Optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), **deploy summary**, then **SAM build** (`--use-container`) and **sam deploy**. 7. **Post-deploy** — According to selected tasks: stack outputs, `slack-manifest_.json`, Slack API, **`gh`** setup, deploy receipt under `deploy-receipts/` (gitignored), and DR backup lines. ### GCP: `infra/gcp/scripts/deploy.sh` diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index 7674327..1895fdf 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -28,7 +28,7 @@ The application reads configuration from environment variables. Providers must i | `DATABASE_URL` | Full SQLAlchemy URL. When set, overrides host/user/password/schema. **Required for SQLite** (e.g. `sqlite:///path/to/syncbot.db`). For `mysql` / `postgresql`, optional if unset (legacy vars below are used). | | `DATABASE_HOST` | Database hostname (IP or FQDN). Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | | `DATABASE_PORT` | Optional. Defaults to **5432** for `postgresql`, **3306** for `mysql`. Set explicitly for external providers that use a non-standard port (e.g. TiDB Cloud **4000**). | -| `DATABASE_USER` | Username. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. Some providers (e.g. TiDB Cloud Serverless) require a cluster-specific prefix on every SQL user; AWS SAM exposes **`ExistingDatabaseAppUsernamePrefix`** so the bootstrap Lambda can create `{prefix}syncbot_user_{stage}`. On GCP with **`existing_db_app_username_prefix`** set, Terraform sets `DATABASE_USER` the same way and ignores **`existing_db_user`**. | +| `DATABASE_USER` | Username. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. Some providers (e.g. TiDB Cloud Serverless) require a cluster-specific prefix on every SQL user; AWS SAM exposes **`ExistingDatabaseAppUsernamePrefix`** so the bootstrap Lambda can create `{prefix}.syncbot_user_{stage}` (a dot separator is added automatically). On GCP with **`existing_db_app_username_prefix`** set, Terraform sets `DATABASE_USER` the same way and ignores **`existing_db_user`**. | | `DATABASE_PASSWORD` | Password. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | | `DATABASE_SCHEMA` | Database name (MySQL) or PostgreSQL database name (same convention as MySQL). Use alphanumeric and underscore only for PostgreSQL when the app must `CREATE DATABASE` at bootstrap. | | `DATABASE_TLS_ENABLED` | Optional TLS toggle (`true`/`false`). Defaults to enabled outside local dev. | diff --git a/infra/aws/db_setup/handler.py b/infra/aws/db_setup/handler.py index 8f8e108..eb9d57b 100644 --- a/infra/aws/db_setup/handler.py +++ b/infra/aws/db_setup/handler.py @@ -155,6 +155,8 @@ def _handler_impl(event, context): return app_username_prefix = (props.get("AppUsernamePrefix") or "").strip() + if app_username_prefix and not app_username_prefix.endswith("."): + app_username_prefix += "." app_username = f"{app_username_prefix}syncbot_user_{stage}".replace("-", "_") app_password = "" if create_app_user: diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index 94c21c7..bca2595 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -1593,7 +1593,7 @@ if [[ "$DB_MODE" == "2" ]]; then EXISTING_DATABASE_APP_USERNAME_PREFIX_DEFAULT="" [[ -n "$PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX" ]] && EXISTING_DATABASE_APP_USERNAME_PREFIX_DEFAULT="$PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX" EXISTING_DATABASE_APP_USERNAME_PREFIX="$(resolve_with_conflict_check \ - "App username prefix (e.g. abc123. for TiDB Cloud; blank for RDS/standard)" \ + "App username prefix (e.g. abc123 for TiDB Cloud; blank for RDS/standard)" \ "$ENV_EXISTING_DATABASE_APP_USERNAME_PREFIX" \ "$PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX" \ "$EXISTING_DATABASE_APP_USERNAME_PREFIX_DEFAULT")" diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index 194a0e4..3936a51 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -155,8 +155,8 @@ Parameters: ExistingDatabaseAppUsernamePrefix: Description: > - Username prefix required by some managed DB providers (e.g. TiDB Cloud cluster prefix "abc123."). - Include the trailing dot/separator. The app username becomes {prefix}syncbot_user_{Stage}. + Username prefix required by some managed DB providers (e.g. TiDB Cloud cluster prefix "abc123"). + A dot separator is added automatically if missing. The app username becomes {prefix}.syncbot_user_{Stage}. Leave empty for standard databases (RDS, self-hosted). Ignored when creating new RDS in stack. Type: String Default: "" diff --git a/infra/gcp/README.md b/infra/gcp/README.md index 30d5fe1..d6d7ff1 100644 --- a/infra/gcp/README.md +++ b/infra/gcp/README.md @@ -48,7 +48,7 @@ Minimal Terraform scaffold to run SyncBot on Google Cloud. Satisfies the [infras | `stage` | Stage name, e.g. `test` or `prod` | | `use_existing_database` | If `true`, use `existing_db_*` vars instead of creating Cloud SQL | | `existing_db_host`, `existing_db_schema`, `existing_db_user` | Existing MySQL connection (when `use_existing_database = true`) | -| `existing_db_app_username_prefix` | Optional (e.g. TiDB Cloud `abc123.`). When set, `DATABASE_USER` is `{prefix}syncbot_user_{stage}` and `existing_db_user` is ignored | +| `existing_db_app_username_prefix` | Optional (e.g. TiDB Cloud `abc123`). A dot separator is added automatically. When set, `DATABASE_USER` is `{prefix}.syncbot_user_{stage}` and `existing_db_user` is ignored | | `cloud_run_image` | Container image URL for Cloud Run (set after first build) | | `secret_slack_bot_scopes` | Secret Manager secret ID for **bot** OAuth scopes (runtime `SLACK_BOT_SCOPES`; default `syncbot-slack-scopes`). The **secret value** must match `oauth_config.scopes.bot` / `BOT_SCOPES` (same requirement as AWS SAM `SlackOauthBotScopes`). | | `slack_user_scopes` | Plain-text **user** OAuth scopes for Cloud Run (`SLACK_USER_SCOPES`). Default matches repo standard (same comma-separated string as AWS SAM `SlackOauthUserScopes`); must match manifest `oauth_config.scopes.user` and `USER_SCOPES` in `slack_manifest_scopes.py`. | diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf index ac47566..1452418 100644 --- a/infra/gcp/main.tf +++ b/infra/gcp/main.tf @@ -45,8 +45,13 @@ locals { ) db_schema = var.use_existing_database ? var.existing_db_schema : "syncbot" stage_syncbot_user = "syncbot_user_${replace(var.stage, "-", "_")}" + normalized_prefix = ( + trimspace(var.existing_db_app_username_prefix) != "" + ? (endswith(trimspace(var.existing_db_app_username_prefix), ".") ? trimspace(var.existing_db_app_username_prefix) : "${trimspace(var.existing_db_app_username_prefix)}.") + : "" + ) db_user = var.use_existing_database ? ( - trimspace(var.existing_db_app_username_prefix) != "" ? "${var.existing_db_app_username_prefix}${local.stage_syncbot_user}" : var.existing_db_user + local.normalized_prefix != "" ? "${local.normalized_prefix}${local.stage_syncbot_user}" : var.existing_db_user ) : "syncbot_app" # Non-secret Cloud Run env (see docs/INFRA_CONTRACT.md) diff --git a/infra/gcp/scripts/deploy.sh b/infra/gcp/scripts/deploy.sh index 331cd23..10f6ba4 100755 --- a/infra/gcp/scripts/deploy.sh +++ b/infra/gcp/scripts/deploy.sh @@ -605,7 +605,7 @@ fi if [[ "$USE_EXISTING" == "true" ]]; then EXISTING_HOST="$(prompt_line "Existing DB host" "$DETECTED_EXISTING_HOST")" EXISTING_SCHEMA="$(prompt_line "Database schema name" "${DETECTED_EXISTING_SCHEMA:-syncbot}")" - EXISTING_DB_APP_USERNAME_PREFIX="$(prompt_line "App username prefix (optional; e.g. TiDB Cloud abc123.; blank = enter full DB user next)" "")" + EXISTING_DB_APP_USERNAME_PREFIX="$(prompt_line "App username prefix (optional; e.g. TiDB Cloud abc123; blank = enter full DB user next)" "")" if [[ -n "$EXISTING_DB_APP_USERNAME_PREFIX" ]]; then EXISTING_USER="" else diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf index 6487207..88c83a8 100644 --- a/infra/gcp/variables.tf +++ b/infra/gcp/variables.tf @@ -51,7 +51,7 @@ variable "existing_db_user" { variable "existing_db_app_username_prefix" { type = string default = "" - description = "Optional prefix for app DB username (e.g. TiDB Cloud cluster prefix \"abc123.\"). When non-empty, DATABASE_USER is {prefix}syncbot_user_{stage} and existing_db_user is ignored." + description = "Optional prefix for app DB username (e.g. TiDB Cloud cluster prefix \"abc123\"). A dot separator is added automatically. When non-empty, DATABASE_USER is {prefix}.syncbot_user_{stage} and existing_db_user is ignored." } variable "existing_db_create_app_user" { diff --git a/tests/test_db_setup.py b/tests/test_db_setup.py index 9cc74cc..aee319c 100644 --- a/tests/test_db_setup.py +++ b/tests/test_db_setup.py @@ -105,7 +105,7 @@ def test_safe_ident_rejects_dots(): handler._safe_ident("bad.schema") -def test_handler_app_username_prefix_passed_to_mysql(cfn_create_event): +def test_handler_app_username_prefix_with_dot(cfn_create_event): cfn_create_event["ResourceProperties"]["AppUsernamePrefix"] = "pre." handler = _fresh_handler() with ( @@ -119,6 +119,20 @@ def test_handler_app_username_prefix_passed_to_mysql(cfn_create_event): assert mock_mysql.call_args[1]["app_username"] == "pre.syncbot_user_test" +def test_handler_app_username_prefix_without_dot(cfn_create_event): + cfn_create_event["ResourceProperties"]["AppUsernamePrefix"] = "pre" + handler = _fresh_handler() + with ( + patch.object(handler, "send"), + patch.object(handler, "get_secret_value", return_value="apppw"), + patch.object(handler, "_assert_tcp_reachable"), + patch.object(handler, "setup_database_mysql") as mock_mysql, + patch.object(handler, "setup_database_postgresql"), + ): + handler._handler_impl(cfn_create_event, MagicMock()) + assert mock_mysql.call_args[1]["app_username"] == "pre.syncbot_user_test" + + def test_handler_custom_port_passed_to_tcp_and_mysql(cfn_create_event): cfn_create_event["ResourceProperties"]["Port"] = "4000" handler = _fresh_handler() From eb26d70fd27e8f21312fbefa245639fbf1716e30 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sat, 28 Mar 2026 14:38:54 -0500 Subject: [PATCH 18/19] Changing env var to EXISTING_DATABASE_USERNAME_PREFIX. --- .github/workflows/deploy-aws.yml | 4 +-- CHANGELOG.md | 2 +- docs/DEPLOYMENT.md | 4 +-- docs/INFRA_CONTRACT.md | 2 +- infra/aws/db_setup/handler.py | 12 ++++---- infra/aws/scripts/deploy.sh | 51 ++++++++++++++++++-------------- infra/aws/template.yaml | 10 ++++--- infra/gcp/README.md | 2 +- infra/gcp/main.tf | 4 +-- infra/gcp/scripts/deploy.sh | 12 ++++---- infra/gcp/variables.tf | 6 ++-- tests/test_db_setup.py | 26 +++++++++++++--- 12 files changed, 82 insertions(+), 53 deletions(-) diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index 0512c10..6b496d1 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -106,7 +106,7 @@ jobs: ExistingDatabasePort=${{ vars.EXISTING_DATABASE_PORT }} \ ExistingDatabaseCreateAppUser=${{ vars.EXISTING_DATABASE_CREATE_APP_USER || 'true' }} \ ExistingDatabaseCreateSchema=${{ vars.EXISTING_DATABASE_CREATE_SCHEMA || 'true' }} \ - ExistingDatabaseAppUsernamePrefix=${{ vars.EXISTING_DATABASE_APP_USERNAME_PREFIX }} \ + ExistingDatabaseUsernamePrefix=${{ vars.EXISTING_DATABASE_USERNAME_PREFIX }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ RequireAdmin=${{ vars.REQUIRE_ADMIN || 'true' }} \ @@ -172,7 +172,7 @@ jobs: ExistingDatabasePort=${{ vars.EXISTING_DATABASE_PORT }} \ ExistingDatabaseCreateAppUser=${{ vars.EXISTING_DATABASE_CREATE_APP_USER || 'true' }} \ ExistingDatabaseCreateSchema=${{ vars.EXISTING_DATABASE_CREATE_SCHEMA || 'true' }} \ - ExistingDatabaseAppUsernamePrefix=${{ vars.EXISTING_DATABASE_APP_USERNAME_PREFIX }} \ + ExistingDatabaseUsernamePrefix=${{ vars.EXISTING_DATABASE_USERNAME_PREFIX }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ RequireAdmin=${{ vars.REQUIRE_ADMIN || 'true' }} \ diff --git a/CHANGELOG.md b/CHANGELOG.md index 13f7cd3..0dd7088 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - `ExistingDatabasePort`, `ExistingDatabaseCreateAppUser`, and `ExistingDatabaseCreateSchema` deploy parameters for external DB providers (e.g. TiDB Cloud) -- `ExistingDatabaseAppUsernamePrefix` (AWS) / `existing_db_app_username_prefix` (GCP): optional prefix so the dedicated app user is `{prefix}syncbot_user_{stage}` (required for TiDB Cloud Serverless username rules) +- `ExistingDatabaseUsernamePrefix` (AWS) / `existing_db_username_prefix` (GCP): optional prefix prepended to admin and app DB usernames in the AWS bootstrap Lambda; app user is `{prefix}.syncbot_user_{stage}` (required for TiDB Cloud Serverless username rules). Renamed from `ExistingDatabaseAppUsernamePrefix` / `existing_db_app_username_prefix`. ## [1.0.1] - 2026-03-26 diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 7a73612..ccc922d 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -48,7 +48,7 @@ Runs from repo root (or via `./deploy.sh` → **aws**). It: 3. **Bootstrap probe** — Reads bootstrap stack outputs if the stack exists (for suggested stack names and later CI/CD). Full **bootstrap** create/sync runs only if you select it in **Deploy Tasks** (see below). 4. **App stack identity** — Prompts for stage (`test`/`prod`) and stack name; detects an existing CloudFormation stack for update. 5. **Deploy Tasks** — Multi-select menu (comma-separated, default all): **Bootstrap** (create/sync bootstrap stack; respects `SYNCBOT_SKIP_BOOTSTRAP_SYNC=1` for sync), **Build/Deploy** (full config + SAM), **CI/CD** (`gh` / GitHub Actions), **Slack API**, **Backup Secrets** (DR plaintext echo). Omitting **Build/Deploy** requires an existing stack for tasks that need live outputs. -6. **Configuration** (if Build/Deploy selected) — **Database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, optional **ExistingDatabasePort** (blank = engine default; use for non-standard ports e.g. TiDB **4000**), optional **ExistingDatabaseAppUsernamePrefix** (e.g. TiDB Cloud cluster prefix `abc123`; a dot separator is added automatically; app user becomes `{prefix}.syncbot_user_{stage}`), whether to **create a dedicated app DB user** and whether to run **`CREATE DATABASE IF NOT EXISTS`**, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight** using the effective DB port). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. Optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), **deploy summary**, then **SAM build** (`--use-container`) and **sam deploy**. +6. **Configuration** (if Build/Deploy selected) — **Database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, optional **ExistingDatabasePort** (blank = engine default; use for non-standard ports e.g. TiDB **4000**), optional **ExistingDatabaseUsernamePrefix** (e.g. TiDB Cloud cluster prefix `abc123`; a dot separator is added automatically; prepended to **ExistingDatabaseAdminUser** and the app user `{prefix}.syncbot_user_{stage}` — use bare admin names like `root` when set), whether to **create a dedicated app DB user** and whether to run **`CREATE DATABASE IF NOT EXISTS`**, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight** using the effective DB port). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. Optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), **deploy summary**, then **SAM build** (`--use-container`) and **sam deploy**. 7. **Post-deploy** — According to selected tasks: stack outputs, `slack-manifest_.json`, Slack API, **`gh`** setup, deploy receipt under `deploy-receipts/` (gitignored), and DR backup lines. ### GCP: `infra/gcp/scripts/deploy.sh` @@ -178,7 +178,7 @@ Configure **per-environment** (`test` / `prod`) variables and secrets so they ma | Var | `EXISTING_DATABASE_PORT` | Optional; non-standard TCP port (e.g. `4000`). Empty = engine default in SAM. | | Var | `EXISTING_DATABASE_CREATE_APP_USER` | `true` / `false` (default `true`). Set `false` when the DB cannot create a dedicated app user. | | Var | `EXISTING_DATABASE_CREATE_SCHEMA` | `true` / `false` (default `true`). Set `false` when the database/schema already exists. | -| Var | `EXISTING_DATABASE_APP_USERNAME_PREFIX` | Optional. Provider-specific username prefix (e.g. TiDB Cloud `42bvZAUSurKwhxc.`). Empty for RDS/standard MySQL. | +| Var | `EXISTING_DATABASE_USERNAME_PREFIX` | Optional. Provider-specific username prefix (e.g. TiDB Cloud `abc123`; dot separator added automatically). Prepended to admin and app usernames in the bootstrap Lambda; use bare `EXISTING_DATABASE_ADMIN_USER` (e.g. `root`). Empty for RDS/standard MySQL. | | Secret | `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET` | | | Secret | `EXISTING_DATABASE_ADMIN_PASSWORD` | When `EXISTING_DATABASE_HOST` is set | | Secret | `TOKEN_ENCRYPTION_KEY_OVERRIDE` | Optional DR only | diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index 1895fdf..b8c9cb5 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -28,7 +28,7 @@ The application reads configuration from environment variables. Providers must i | `DATABASE_URL` | Full SQLAlchemy URL. When set, overrides host/user/password/schema. **Required for SQLite** (e.g. `sqlite:///path/to/syncbot.db`). For `mysql` / `postgresql`, optional if unset (legacy vars below are used). | | `DATABASE_HOST` | Database hostname (IP or FQDN). Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | | `DATABASE_PORT` | Optional. Defaults to **5432** for `postgresql`, **3306** for `mysql`. Set explicitly for external providers that use a non-standard port (e.g. TiDB Cloud **4000**). | -| `DATABASE_USER` | Username. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. Some providers (e.g. TiDB Cloud Serverless) require a cluster-specific prefix on every SQL user; AWS SAM exposes **`ExistingDatabaseAppUsernamePrefix`** so the bootstrap Lambda can create `{prefix}.syncbot_user_{stage}` (a dot separator is added automatically). On GCP with **`existing_db_app_username_prefix`** set, Terraform sets `DATABASE_USER` the same way and ignores **`existing_db_user`**. | +| `DATABASE_USER` | Username. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. Some providers (e.g. TiDB Cloud Serverless) require a cluster-specific prefix on every SQL user; AWS SAM exposes **`ExistingDatabaseUsernamePrefix`** so the bootstrap Lambda prepends it to **ExistingDatabaseAdminUser** and to the dedicated app user `{prefix}.syncbot_user_{stage}` (a dot separator is added automatically; use bare admin names like `root` when set). On GCP with **`existing_db_username_prefix`** set, Terraform sets `DATABASE_USER` the same way and ignores **`existing_db_user`**. | | `DATABASE_PASSWORD` | Password. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | | `DATABASE_SCHEMA` | Database name (MySQL) or PostgreSQL database name (same convention as MySQL). Use alphanumeric and underscore only for PostgreSQL when the app must `CREATE DATABASE` at bootstrap. | | `DATABASE_TLS_ENABLED` | Optional TLS toggle (`true`/`false`). Defaults to enabled outside local dev. | diff --git a/infra/aws/db_setup/handler.py b/infra/aws/db_setup/handler.py index eb9d57b..5e259ae 100644 --- a/infra/aws/db_setup/handler.py +++ b/infra/aws/db_setup/handler.py @@ -154,10 +154,12 @@ def _handler_impl(event, context): ) return - app_username_prefix = (props.get("AppUsernamePrefix") or "").strip() - if app_username_prefix and not app_username_prefix.endswith("."): - app_username_prefix += "." - app_username = f"{app_username_prefix}syncbot_user_{stage}".replace("-", "_") + username_prefix = (props.get("UsernamePrefix") or "").strip() + if username_prefix and not username_prefix.endswith("."): + username_prefix += "." + if username_prefix: + admin_user = f"{username_prefix}{admin_user}" + app_username = f"{username_prefix}syncbot_user_{stage}".replace("-", "_") app_password = "" if create_app_user: try: @@ -334,7 +336,7 @@ def setup_database_postgresql( _safe_ident(schema) if create_app_user: _safe_username(app_username) - _safe_ident(admin_user) + _safe_username(admin_user) conn = psycopg2.connect( host=host, diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index bca2595..36ecf5f 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -420,7 +420,7 @@ configure_github_actions_aws() { # $15 Existing DB port (empty = engine default in SAM) # $16 ExistingDatabaseCreateAppUser: true | false # $17 ExistingDatabaseCreateSchema: true | false - # $18 ExistingDatabaseAppUsernamePrefix (e.g. TiDB cluster prefix; empty for RDS) + # $18 ExistingDatabaseUsernamePrefix (e.g. TiDB cluster prefix; empty for RDS) local bootstrap_outputs="$1" local bootstrap_stack_name="$2" local aws_region="$3" @@ -440,7 +440,7 @@ configure_github_actions_aws() { local existing_db_port="${15:-}" local existing_db_create_app_user="${16:-true}" local existing_db_create_schema="${17:-true}" - local existing_db_app_username_prefix="${18:-}" + local existing_db_username_prefix="${18:-}" [[ -z "$existing_db_create_app_user" ]] && existing_db_create_app_user="true" [[ -z "$existing_db_create_schema" ]] && existing_db_create_schema="true" local role bucket boot_region @@ -502,7 +502,7 @@ configure_github_actions_aws() { gh variable set EXISTING_DATABASE_PORT --env "$env_name" --body "$existing_db_port" -R "$repo" gh variable set EXISTING_DATABASE_CREATE_APP_USER --env "$env_name" --body "$existing_db_create_app_user" -R "$repo" gh variable set EXISTING_DATABASE_CREATE_SCHEMA --env "$env_name" --body "$existing_db_create_schema" -R "$repo" - gh variable set EXISTING_DATABASE_APP_USERNAME_PREFIX --env "$env_name" --body "$existing_db_app_username_prefix" -R "$repo" + gh variable set EXISTING_DATABASE_USERNAME_PREFIX --env "$env_name" --body "$existing_db_username_prefix" -R "$repo" else # Clear existing-host vars for new-RDS mode to avoid stale CI config. gh variable set EXISTING_DATABASE_HOST --env "$env_name" --body "" -R "$repo" @@ -513,7 +513,7 @@ configure_github_actions_aws() { gh variable set EXISTING_DATABASE_PORT --env "$env_name" --body "" -R "$repo" gh variable set EXISTING_DATABASE_CREATE_APP_USER --env "$env_name" --body "true" -R "$repo" gh variable set EXISTING_DATABASE_CREATE_SCHEMA --env "$env_name" --body "true" -R "$repo" - gh variable set EXISTING_DATABASE_APP_USERNAME_PREFIX --env "$env_name" --body "" -R "$repo" + gh variable set EXISTING_DATABASE_USERNAME_PREFIX --env "$env_name" --body "" -R "$repo" fi echo "Environment variables updated for '$env_name'." fi @@ -1296,7 +1296,7 @@ PREV_EXISTING_DATABASE_LAMBDA_SG_ID="" PREV_EXISTING_DATABASE_PORT="" PREV_EXISTING_DATABASE_CREATE_APP_USER="" PREV_EXISTING_DATABASE_CREATE_SCHEMA="" -PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX="" +PREV_EXISTING_DATABASE_USERNAME_PREFIX="" PREV_DATABASE_ENGINE="" PREV_DATABASE_SCHEMA="" PREV_LOG_LEVEL="" @@ -1328,7 +1328,7 @@ if [[ -n "$EXISTING_STACK_STATUS" && "$EXISTING_STACK_STATUS" != "None" ]]; then PREV_EXISTING_DATABASE_PORT="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabasePort")" PREV_EXISTING_DATABASE_CREATE_APP_USER="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseCreateAppUser")" PREV_EXISTING_DATABASE_CREATE_SCHEMA="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseCreateSchema")" - PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseAppUsernamePrefix")" + PREV_EXISTING_DATABASE_USERNAME_PREFIX="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseUsernamePrefix")" PREV_DATABASE_ENGINE="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseEngine")" PREV_DATABASE_SCHEMA="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseSchema")" PREV_LOG_LEVEL="$(stack_param_value "$EXISTING_STACK_PARAMS" "LogLevel")" @@ -1478,7 +1478,7 @@ ENV_EXISTING_DATABASE_ADMIN_PASSWORD="${EXISTING_DATABASE_ADMIN_PASSWORD:-}" ENV_EXISTING_DATABASE_PORT="${EXISTING_DATABASE_PORT:-}" ENV_EXISTING_DATABASE_CREATE_APP_USER="${EXISTING_DATABASE_CREATE_APP_USER:-}" ENV_EXISTING_DATABASE_CREATE_SCHEMA="${EXISTING_DATABASE_CREATE_SCHEMA:-}" -ENV_EXISTING_DATABASE_APP_USERNAME_PREFIX="${EXISTING_DATABASE_APP_USERNAME_PREFIX:-}" +ENV_EXISTING_DATABASE_USERNAME_PREFIX="${EXISTING_DATABASE_USERNAME_PREFIX:-}" EXISTING_DB_ADMIN_PASSWORD_SOURCE="prompt" EXISTING_DATABASE_HOST="" EXISTING_DATABASE_ADMIN_USER="" @@ -1489,7 +1489,7 @@ EXISTING_DATABASE_LAMBDA_SG_ID="" EXISTING_DATABASE_PORT="" EXISTING_DATABASE_CREATE_APP_USER="true" EXISTING_DATABASE_CREATE_SCHEMA="true" -EXISTING_DATABASE_APP_USERNAME_PREFIX="" +EXISTING_DATABASE_USERNAME_PREFIX="" EXISTING_DB_EFFECTIVE_PORT="" DATABASE_SCHEMA="" DATABASE_SCHEMA_DEFAULT="syncbot_${STAGE}" @@ -1590,13 +1590,13 @@ if [[ "$DB_MODE" == "2" ]]; then "$CREATE_SCHEMA_DEFAULT" \ bool)" - EXISTING_DATABASE_APP_USERNAME_PREFIX_DEFAULT="" - [[ -n "$PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX" ]] && EXISTING_DATABASE_APP_USERNAME_PREFIX_DEFAULT="$PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX" - EXISTING_DATABASE_APP_USERNAME_PREFIX="$(resolve_with_conflict_check \ - "App username prefix (e.g. abc123 for TiDB Cloud; blank for RDS/standard)" \ - "$ENV_EXISTING_DATABASE_APP_USERNAME_PREFIX" \ - "$PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX" \ - "$EXISTING_DATABASE_APP_USERNAME_PREFIX_DEFAULT")" + EXISTING_DATABASE_USERNAME_PREFIX_DEFAULT="" + [[ -n "$PREV_EXISTING_DATABASE_USERNAME_PREFIX" ]] && EXISTING_DATABASE_USERNAME_PREFIX_DEFAULT="$PREV_EXISTING_DATABASE_USERNAME_PREFIX" + EXISTING_DATABASE_USERNAME_PREFIX="$(resolve_with_conflict_check \ + "DB username prefix (e.g. abc123 for TiDB Cloud; blank for RDS/standard)" \ + "$ENV_EXISTING_DATABASE_USERNAME_PREFIX" \ + "$PREV_EXISTING_DATABASE_USERNAME_PREFIX" \ + "$EXISTING_DATABASE_USERNAME_PREFIX_DEFAULT")" if [[ -z "$EXISTING_DATABASE_HOST" || "$EXISTING_DATABASE_HOST" == REPLACE_ME* ]]; then echo "Error: valid ExistingDatabaseHost is required for existing DB mode." >&2 @@ -1776,10 +1776,17 @@ if [[ "$DB_MODE" == "2" ]]; then echo "DB port: ${EXISTING_DB_EFFECTIVE_PORT:-engine default}" echo "DB create user: $EXISTING_DATABASE_CREATE_APP_USER" echo "DB create schema: $EXISTING_DATABASE_CREATE_SCHEMA" - if [[ -n "$EXISTING_DATABASE_APP_USERNAME_PREFIX" ]]; then - echo "DB app user prefix: $EXISTING_DATABASE_APP_USERNAME_PREFIX (app user: ${EXISTING_DATABASE_APP_USERNAME_PREFIX}syncbot_user_${STAGE//-/_})" + echo "DB admin user (parameter): $EXISTING_DATABASE_ADMIN_USER" + if [[ -n "$EXISTING_DATABASE_USERNAME_PREFIX" ]]; then + _dbpfx="$EXISTING_DATABASE_USERNAME_PREFIX" + [[ "$_dbpfx" != *. ]] && _dbpfx="${_dbpfx}." + echo "DB username prefix: $EXISTING_DATABASE_USERNAME_PREFIX" + echo " effective admin (bootstrap): ${_dbpfx}${EXISTING_DATABASE_ADMIN_USER}" + echo " effective app user (if created): ${_dbpfx}syncbot_user_${STAGE//-/_}" else - echo "DB app user prefix: (none — app user: syncbot_user_${STAGE//-/_})" + echo "DB username prefix: (none)" + echo " admin (bootstrap): $EXISTING_DATABASE_ADMIN_USER" + echo " app user (if created): syncbot_user_${STAGE//-/_}" fi echo "DB schema: $DATABASE_SCHEMA" else @@ -1860,7 +1867,7 @@ if [[ "$DB_MODE" == "2" ]]; then PARAMS+=( "ExistingDatabaseCreateAppUser=$EXISTING_DATABASE_CREATE_APP_USER" "ExistingDatabaseCreateSchema=$EXISTING_DATABASE_CREATE_SCHEMA" - "ExistingDatabaseAppUsernamePrefix=$EXISTING_DATABASE_APP_USERNAME_PREFIX" + "ExistingDatabaseUsernamePrefix=$EXISTING_DATABASE_USERNAME_PREFIX" ) else # Clear existing-host parameters on updates to avoid stale previous values. @@ -1875,7 +1882,7 @@ else "ParameterKey=ExistingDatabasePort,ParameterValue=" "ExistingDatabaseCreateAppUser=true" "ExistingDatabaseCreateSchema=true" - "ParameterKey=ExistingDatabaseAppUsernamePrefix,ParameterValue=" + "ParameterKey=ExistingDatabaseUsernamePrefix,ParameterValue=" ) fi @@ -1923,7 +1930,7 @@ else EXISTING_DATABASE_PORT="${PREV_EXISTING_DATABASE_PORT:-}" EXISTING_DATABASE_CREATE_APP_USER="${PREV_EXISTING_DATABASE_CREATE_APP_USER:-true}" EXISTING_DATABASE_CREATE_SCHEMA="${PREV_EXISTING_DATABASE_CREATE_SCHEMA:-true}" - EXISTING_DATABASE_APP_USERNAME_PREFIX="${PREV_EXISTING_DATABASE_APP_USERNAME_PREFIX:-}" + EXISTING_DATABASE_USERNAME_PREFIX="${PREV_EXISTING_DATABASE_USERNAME_PREFIX:-}" [[ -z "$EXISTING_DATABASE_CREATE_APP_USER" ]] && EXISTING_DATABASE_CREATE_APP_USER="true" [[ -z "$EXISTING_DATABASE_CREATE_SCHEMA" ]] && EXISTING_DATABASE_CREATE_SCHEMA="true" SLACK_SIGNING_SECRET="${SLACK_SIGNING_SECRET:-}" @@ -1976,7 +1983,7 @@ if [[ "$TASK_CICD" == "true" ]]; then "${EXISTING_DATABASE_PORT:-}" \ "${EXISTING_DATABASE_CREATE_APP_USER:-true}" \ "${EXISTING_DATABASE_CREATE_SCHEMA:-true}" \ - "${EXISTING_DATABASE_APP_USERNAME_PREFIX:-}" + "${EXISTING_DATABASE_USERNAME_PREFIX:-}" fi if [[ "$TASK_BUILD_DEPLOY" == "true" || "$TASK_BACKUP_SECRETS" == "true" ]]; then diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index 3936a51..ba6390c 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -90,6 +90,7 @@ Parameters: Description: > Database admin user that can create databases and users (e.g. RDS master). Used only when ExistingDatabaseHost is set; the deploy creates a dedicated app user and schema. + When ExistingDatabaseUsernamePrefix is set, use the bare name (e.g. root); the prefix is prepended in the bootstrap Lambda. Type: String Default: "" @@ -153,10 +154,11 @@ Parameters: - "true" - "false" - ExistingDatabaseAppUsernamePrefix: + ExistingDatabaseUsernamePrefix: Description: > Username prefix required by some managed DB providers (e.g. TiDB Cloud cluster prefix "abc123"). - A dot separator is added automatically if missing. The app username becomes {prefix}.syncbot_user_{Stage}. + A dot separator is added automatically if missing. Prepended to ExistingDatabaseAdminUser and to the + dedicated app user ({prefix}.syncbot_user_{Stage}). Use the bare admin name (e.g. root) when this is set. Leave empty for standard databases (RDS, self-hosted). Ignored when creating new RDS in stack. Type: String Default: "" @@ -681,9 +683,9 @@ Resources: - UseExistingDatabase - !Ref ExistingDatabaseCreateSchema - "true" - AppUsernamePrefix: !If + UsernamePrefix: !If - UseExistingDatabase - - !Ref ExistingDatabaseAppUsernamePrefix + - !Ref ExistingDatabaseUsernamePrefix - "" # ============================================================ diff --git a/infra/gcp/README.md b/infra/gcp/README.md index d6d7ff1..f7dad66 100644 --- a/infra/gcp/README.md +++ b/infra/gcp/README.md @@ -48,7 +48,7 @@ Minimal Terraform scaffold to run SyncBot on Google Cloud. Satisfies the [infras | `stage` | Stage name, e.g. `test` or `prod` | | `use_existing_database` | If `true`, use `existing_db_*` vars instead of creating Cloud SQL | | `existing_db_host`, `existing_db_schema`, `existing_db_user` | Existing MySQL connection (when `use_existing_database = true`) | -| `existing_db_app_username_prefix` | Optional (e.g. TiDB Cloud `abc123`). A dot separator is added automatically. When set, `DATABASE_USER` is `{prefix}.syncbot_user_{stage}` and `existing_db_user` is ignored | +| `existing_db_username_prefix` | Optional (e.g. TiDB Cloud `abc123`). A dot separator is added automatically. When set, `DATABASE_USER` is `{prefix}.syncbot_user_{stage}` and `existing_db_user` is ignored | | `cloud_run_image` | Container image URL for Cloud Run (set after first build) | | `secret_slack_bot_scopes` | Secret Manager secret ID for **bot** OAuth scopes (runtime `SLACK_BOT_SCOPES`; default `syncbot-slack-scopes`). The **secret value** must match `oauth_config.scopes.bot` / `BOT_SCOPES` (same requirement as AWS SAM `SlackOauthBotScopes`). | | `slack_user_scopes` | Plain-text **user** OAuth scopes for Cloud Run (`SLACK_USER_SCOPES`). Default matches repo standard (same comma-separated string as AWS SAM `SlackOauthUserScopes`); must match manifest `oauth_config.scopes.user` and `USER_SCOPES` in `slack_manifest_scopes.py`. | diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf index 1452418..0ef39b7 100644 --- a/infra/gcp/main.tf +++ b/infra/gcp/main.tf @@ -46,8 +46,8 @@ locals { db_schema = var.use_existing_database ? var.existing_db_schema : "syncbot" stage_syncbot_user = "syncbot_user_${replace(var.stage, "-", "_")}" normalized_prefix = ( - trimspace(var.existing_db_app_username_prefix) != "" - ? (endswith(trimspace(var.existing_db_app_username_prefix), ".") ? trimspace(var.existing_db_app_username_prefix) : "${trimspace(var.existing_db_app_username_prefix)}.") + trimspace(var.existing_db_username_prefix) != "" + ? (endswith(trimspace(var.existing_db_username_prefix), ".") ? trimspace(var.existing_db_username_prefix) : "${trimspace(var.existing_db_username_prefix)}.") : "" ) db_user = var.use_existing_database ? ( diff --git a/infra/gcp/scripts/deploy.sh b/infra/gcp/scripts/deploy.sh index 10f6ba4..26273dc 100755 --- a/infra/gcp/scripts/deploy.sh +++ b/infra/gcp/scripts/deploy.sh @@ -605,8 +605,8 @@ fi if [[ "$USE_EXISTING" == "true" ]]; then EXISTING_HOST="$(prompt_line "Existing DB host" "$DETECTED_EXISTING_HOST")" EXISTING_SCHEMA="$(prompt_line "Database schema name" "${DETECTED_EXISTING_SCHEMA:-syncbot}")" - EXISTING_DB_APP_USERNAME_PREFIX="$(prompt_line "App username prefix (optional; e.g. TiDB Cloud abc123; blank = enter full DB user next)" "")" - if [[ -n "$EXISTING_DB_APP_USERNAME_PREFIX" ]]; then + EXISTING_DB_USERNAME_PREFIX="$(prompt_line "DB username prefix (optional; e.g. TiDB Cloud abc123; blank = enter full DB user next)" "")" + if [[ -n "$EXISTING_DB_USERNAME_PREFIX" ]]; then EXISTING_USER="" else EXISTING_USER="$(prompt_line "Database user" "$DETECTED_EXISTING_USER")" @@ -615,8 +615,8 @@ if [[ "$USE_EXISTING" == "true" ]]; then echo "Error: Existing DB host is required when using existing database mode." >&2 exit 1 fi - if [[ -z "$EXISTING_USER" && -z "$EXISTING_DB_APP_USERNAME_PREFIX" ]]; then - echo "Error: Database user or app username prefix is required when using existing database mode." >&2 + if [[ -z "$EXISTING_USER" && -z "$EXISTING_DB_USERNAME_PREFIX" ]]; then + echo "Error: Database user or DB username prefix is required when using existing database mode." >&2 exit 1 fi @@ -761,12 +761,12 @@ if [[ "$USE_EXISTING" == "true" ]]; then VARS+=("-var=existing_db_host=$EXISTING_HOST") VARS+=("-var=existing_db_schema=$EXISTING_SCHEMA") VARS+=("-var=existing_db_user=$EXISTING_USER") - VARS+=("-var=existing_db_app_username_prefix=$EXISTING_DB_APP_USERNAME_PREFIX") + VARS+=("-var=existing_db_username_prefix=$EXISTING_DB_USERNAME_PREFIX") VARS+=("-var=existing_db_create_app_user=$EXISTING_DB_CREATE_APP_USER") VARS+=("-var=existing_db_create_schema=$EXISTING_DB_CREATE_SCHEMA") else VARS+=("-var=use_existing_database=false") - VARS+=("-var=existing_db_app_username_prefix=") + VARS+=("-var=existing_db_username_prefix=") fi VARS+=("-var=cloud_run_image=$CLOUD_IMAGE") diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf index 88c83a8..b6fdcf3 100644 --- a/infra/gcp/variables.tf +++ b/infra/gcp/variables.tf @@ -45,13 +45,13 @@ variable "existing_db_schema" { variable "existing_db_user" { type = string default = "" - description = "Existing MySQL user (when use_existing_database = true). Ignored when existing_db_app_username_prefix is set (DATABASE_USER becomes prefix + syncbot_user_{stage})." + description = "Existing MySQL user (when use_existing_database = true). Ignored when existing_db_username_prefix is set (DATABASE_USER becomes prefix + syncbot_user_{stage})." } -variable "existing_db_app_username_prefix" { +variable "existing_db_username_prefix" { type = string default = "" - description = "Optional prefix for app DB username (e.g. TiDB Cloud cluster prefix \"abc123\"). A dot separator is added automatically. When non-empty, DATABASE_USER is {prefix}.syncbot_user_{stage} and existing_db_user is ignored." + description = "Optional prefix for DATABASE_USER (e.g. TiDB Cloud cluster prefix \"abc123\"). A dot separator is added automatically. When non-empty, DATABASE_USER is {prefix}.syncbot_user_{stage} and existing_db_user is ignored." } variable "existing_db_create_app_user" { diff --git a/tests/test_db_setup.py b/tests/test_db_setup.py index aee319c..26973b7 100644 --- a/tests/test_db_setup.py +++ b/tests/test_db_setup.py @@ -105,8 +105,8 @@ def test_safe_ident_rejects_dots(): handler._safe_ident("bad.schema") -def test_handler_app_username_prefix_with_dot(cfn_create_event): - cfn_create_event["ResourceProperties"]["AppUsernamePrefix"] = "pre." +def test_handler_username_prefix_with_dot(cfn_create_event): + cfn_create_event["ResourceProperties"]["UsernamePrefix"] = "pre." handler = _fresh_handler() with ( patch.object(handler, "send"), @@ -117,10 +117,11 @@ def test_handler_app_username_prefix_with_dot(cfn_create_event): ): handler._handler_impl(cfn_create_event, MagicMock()) assert mock_mysql.call_args[1]["app_username"] == "pre.syncbot_user_test" + assert mock_mysql.call_args[1]["admin_user"] == "pre.admin" -def test_handler_app_username_prefix_without_dot(cfn_create_event): - cfn_create_event["ResourceProperties"]["AppUsernamePrefix"] = "pre" +def test_handler_username_prefix_without_dot(cfn_create_event): + cfn_create_event["ResourceProperties"]["UsernamePrefix"] = "pre" handler = _fresh_handler() with ( patch.object(handler, "send"), @@ -131,6 +132,23 @@ def test_handler_app_username_prefix_without_dot(cfn_create_event): ): handler._handler_impl(cfn_create_event, MagicMock()) assert mock_mysql.call_args[1]["app_username"] == "pre.syncbot_user_test" + assert mock_mysql.call_args[1]["admin_user"] == "pre.admin" + + +def test_handler_username_prefix_applied_to_bare_root_admin(cfn_create_event): + cfn_create_event["ResourceProperties"]["AdminUser"] = "root" + cfn_create_event["ResourceProperties"]["UsernamePrefix"] = "cluster" + handler = _fresh_handler() + with ( + patch.object(handler, "send"), + patch.object(handler, "get_secret_value", return_value="apppw"), + patch.object(handler, "_assert_tcp_reachable"), + patch.object(handler, "setup_database_mysql") as mock_mysql, + patch.object(handler, "setup_database_postgresql"), + ): + handler._handler_impl(cfn_create_event, MagicMock()) + assert mock_mysql.call_args[1]["admin_user"] == "cluster.root" + assert mock_mysql.call_args[1]["app_username"] == "cluster.syncbot_user_test" def test_handler_custom_port_passed_to_tcp_and_mysql(cfn_create_event): From a05f13d49e4fa2ddea98e4e7f2f89d53f0a858bd Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sat, 28 Mar 2026 16:00:16 -0500 Subject: [PATCH 19/19] Adjustment for app DB username being too long. --- .github/workflows/deploy-aws.yml | 2 ++ CHANGELOG.md | 4 +-- docs/DEPLOYMENT.md | 7 ++--- docs/INFRA_CONTRACT.md | 2 +- infra/aws/db_setup/handler.py | 6 ++++- infra/aws/scripts/deploy.sh | 45 +++++++++++++++++++++++++++----- infra/aws/template.yaml | 21 ++++++++++++--- infra/gcp/README.md | 3 ++- infra/gcp/main.tf | 6 +++-- infra/gcp/scripts/deploy.sh | 8 ++++-- infra/gcp/variables.tf | 10 +++++-- tests/test_db_setup.py | 24 ++++++++++++++--- 12 files changed, 109 insertions(+), 29 deletions(-) diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index 173ad92..eb2e4c0 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -107,6 +107,7 @@ jobs: ExistingDatabaseCreateAppUser=${{ vars.EXISTING_DATABASE_CREATE_APP_USER || 'true' }} \ ExistingDatabaseCreateSchema=${{ vars.EXISTING_DATABASE_CREATE_SCHEMA || 'true' }} \ ExistingDatabaseUsernamePrefix=${{ vars.EXISTING_DATABASE_USERNAME_PREFIX }} \ + ExistingDatabaseAppUsername=${{ vars.EXISTING_DATABASE_APP_USERNAME }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ RequireAdmin=${{ vars.REQUIRE_ADMIN || 'true' }} \ @@ -186,6 +187,7 @@ jobs: ExistingDatabaseCreateAppUser=${{ vars.EXISTING_DATABASE_CREATE_APP_USER || 'true' }} \ ExistingDatabaseCreateSchema=${{ vars.EXISTING_DATABASE_CREATE_SCHEMA || 'true' }} \ ExistingDatabaseUsernamePrefix=${{ vars.EXISTING_DATABASE_USERNAME_PREFIX }} \ + ExistingDatabaseAppUsername=${{ vars.EXISTING_DATABASE_APP_USERNAME }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ RequireAdmin=${{ vars.REQUIRE_ADMIN || 'true' }} \ diff --git a/CHANGELOG.md b/CHANGELOG.md index fb15d66..2c27bd0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,11 +9,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added -- `ExistingDatabasePort`, `ExistingDatabaseCreateAppUser`, and `ExistingDatabaseCreateSchema` deploy parameters for external DB providers (e.g. TiDB Cloud) -- `ExistingDatabaseUsernamePrefix` (AWS) / `existing_db_username_prefix` (GCP): optional prefix prepended to admin and app DB usernames in the AWS bootstrap Lambda; app user is `{prefix}.syncbot_user_{stage}` (required for TiDB Cloud Serverless username rules). Renamed from `ExistingDatabaseAppUsernamePrefix` / `existing_db_app_username_prefix`. +- External DB deploy parameters: `ExistingDatabasePort`, `ExistingDatabaseCreateAppUser`, `ExistingDatabaseCreateSchema`, `ExistingDatabaseUsernamePrefix`, `ExistingDatabaseAppUsername` (AWS) / GCP equivalents — support TiDB Cloud and other managed DB providers with cluster-prefixed usernames and 32-char limits ### Changed +- Shortened default DB usernames: `sbadmin_{stage}` (was `syncbot_admin_{stage}`), `sbapp_{stage}` (was `syncbot_user_{stage}`). Existing RDS instances keep their original master username. - Bumped GitHub Actions: `actions/checkout` v6, `actions/setup-python` v6, `actions/upload-artifact` v7, `actions/download-artifact` v8, `aws-actions/configure-aws-credentials` v6 - Dependabot: ignore semver-major updates for the Docker `python` image (keeps base image on Python 3.12.x line) - AWS Lambda: Alembic migrations now run via a post-deploy invoke instead of on every cold start, fixing Slack ack timeouts after deployment; Cloud Run and local dev unchanged diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index f3224cc..566e272 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -48,7 +48,7 @@ Runs from repo root (or via `./deploy.sh` → **aws**). It: 3. **Bootstrap probe** — Reads bootstrap stack outputs if the stack exists (for suggested stack names and later CI/CD). Full **bootstrap** create/sync runs only if you select it in **Deploy Tasks** (see below). 4. **App stack identity** — Prompts for stage (`test`/`prod`) and stack name; detects an existing CloudFormation stack for update. 5. **Deploy Tasks** — Multi-select menu (comma-separated, default all): **Bootstrap** (create/sync bootstrap stack; respects `SYNCBOT_SKIP_BOOTSTRAP_SYNC=1` for sync), **Build/Deploy** (full config + SAM), **CI/CD** (`gh` / GitHub Actions), **Slack API**, **Backup Secrets** (DR plaintext echo). Omitting **Build/Deploy** requires an existing stack for tasks that need live outputs. -6. **Configuration** (if Build/Deploy selected) — **Database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, optional **ExistingDatabasePort** (blank = engine default; use for non-standard ports e.g. TiDB **4000**), optional **ExistingDatabaseUsernamePrefix** (e.g. TiDB Cloud cluster prefix `abc123`; a dot separator is added automatically; prepended to **ExistingDatabaseAdminUser** and the app user `{prefix}.syncbot_user_{stage}` — use bare admin names like `root` when set), whether to **create a dedicated app DB user** and whether to run **`CREATE DATABASE IF NOT EXISTS`**, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight** using the effective DB port). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. Optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), **deploy summary**, then **SAM build** (`--use-container`) and **sam deploy**. +6. **Configuration** (if Build/Deploy selected) — **Database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, optional **ExistingDatabasePort** (blank = engine default; use for non-standard ports e.g. TiDB **4000**), optional **ExistingDatabaseUsernamePrefix** (e.g. TiDB Cloud cluster prefix `abc123`; a dot separator is added automatically; prepended to **ExistingDatabaseAdminUser** and the default app user `{prefix}.sbapp_{stage}` — use bare admin names like `root` when set), optional **ExistingDatabaseAppUsername** (full app username override when the default would exceed provider limits, e.g. MySQL 32 chars), whether to **create a dedicated app DB user** and whether to run **`CREATE DATABASE IF NOT EXISTS`**, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight** using the effective DB port). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. Optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), **deploy summary**, then **SAM build** (`--use-container`) and **sam deploy**. 7. **Post-deploy** — According to selected tasks: stack outputs, `slack-manifest_.json`, Slack API, **`gh`** setup, deploy receipt under `deploy-receipts/` (gitignored), and DR backup lines. ### GCP: `infra/gcp/scripts/deploy.sh` @@ -189,7 +189,8 @@ Configure **per-environment** (`test` / `prod`) variables and secrets so they ma | Var | `EXISTING_DATABASE_PORT` | Optional; non-standard TCP port (e.g. `4000`). Empty = engine default in SAM. | | Var | `EXISTING_DATABASE_CREATE_APP_USER` | `true` / `false` (default `true`). Set `false` when the DB cannot create a dedicated app user. | | Var | `EXISTING_DATABASE_CREATE_SCHEMA` | `true` / `false` (default `true`). Set `false` when the database/schema already exists. | -| Var | `EXISTING_DATABASE_USERNAME_PREFIX` | Optional. Provider-specific username prefix (e.g. TiDB Cloud `abc123`; dot separator added automatically). Prepended to admin and app usernames in the bootstrap Lambda; use bare `EXISTING_DATABASE_ADMIN_USER` (e.g. `root`). Empty for RDS/standard MySQL. | +| Var | `EXISTING_DATABASE_USERNAME_PREFIX` | Optional. Provider-specific username prefix (e.g. TiDB Cloud `abc123`; dot separator added automatically). Prepended to admin and default app user `{prefix}.sbapp_{stage}` in the bootstrap Lambda; use bare `EXISTING_DATABASE_ADMIN_USER` (e.g. `root`). Empty for RDS/standard MySQL. | +| Var | `EXISTING_DATABASE_APP_USERNAME` | Optional. Full dedicated app DB username (bypasses prefix + default `sbapp_{stage}`). Use if the auto name exceeds provider limits. Empty = default. | | Secret | `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET` | | | Secret | `EXISTING_DATABASE_ADMIN_PASSWORD` | When `EXISTING_DATABASE_HOST` is set | | Secret | `TOKEN_ENCRYPTION_KEY_OVERRIDE` | Optional DR only | @@ -242,7 +243,7 @@ Build and push an image to Artifact Registry, then `gcloud run deploy` or `terra ## Using an existing RDS host (AWS) -When **ExistingDatabaseHost** is set, the template **does not** create VPC/RDS; a custom resource can create the schema and optionally a dedicated app user (`syncbot_user_`) with a generated app password in Secrets Manager—or skip user/schema steps and copy the admin password into the app secret when **`ExistingDatabaseCreateAppUser=false`**. +When **ExistingDatabaseHost** is set, the template **does not** create VPC/RDS; a custom resource can create the schema and optionally a dedicated app user (default `sbapp_`, or **ExistingDatabaseAppUsername** if set) with a generated app password in Secrets Manager—or skip user/schema steps and copy the admin password into the app secret when **`ExistingDatabaseCreateAppUser=false`**. - **Public:** Lambda is not in your VPC; the DB must be reachable on the Internet on the configured port (**`ExistingDatabasePort`**, or **3306** / **5432** by engine). - **Private:** Lambda uses `ExistingDatabaseSubnetIdsCsv` and `ExistingDatabaseLambdaSecurityGroupId`; DB security group must allow the Lambda SG; subnets need **NAT** egress for Slack API calls. diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index fe911e3..afa63a0 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -28,7 +28,7 @@ The application reads configuration from environment variables. Providers must i | `DATABASE_URL` | Full SQLAlchemy URL. When set, overrides host/user/password/schema. **Required for SQLite** (e.g. `sqlite:///path/to/syncbot.db`). For `mysql` / `postgresql`, optional if unset (legacy vars below are used). | | `DATABASE_HOST` | Database hostname (IP or FQDN). Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | | `DATABASE_PORT` | Optional. Defaults to **5432** for `postgresql`, **3306** for `mysql`. Set explicitly for external providers that use a non-standard port (e.g. TiDB Cloud **4000**). | -| `DATABASE_USER` | Username. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. Some providers (e.g. TiDB Cloud Serverless) require a cluster-specific prefix on every SQL user; AWS SAM exposes **`ExistingDatabaseUsernamePrefix`** so the bootstrap Lambda prepends it to **ExistingDatabaseAdminUser** and to the dedicated app user `{prefix}.syncbot_user_{stage}` (a dot separator is added automatically; use bare admin names like `root` when set). On GCP with **`existing_db_username_prefix`** set, Terraform sets `DATABASE_USER` the same way and ignores **`existing_db_user`**. | +| `DATABASE_USER` | Username. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. Some providers (e.g. TiDB Cloud Serverless) require a cluster-specific prefix on every SQL user; AWS SAM exposes **`ExistingDatabaseUsernamePrefix`** so the bootstrap Lambda prepends it to **ExistingDatabaseAdminUser** and to the default app user `{prefix}.sbapp_{stage}` (a dot separator is added automatically; use bare admin names like `root` when set). **`ExistingDatabaseAppUsername`** (AWS) / **`existing_db_app_username`** (GCP) optionally sets the full app username and bypasses prefix + default. New stack-managed RDS uses master **`sbadmin_{stage}`** and app **`sbapp_{stage}`**. | | `DATABASE_PASSWORD` | Password. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | | `DATABASE_SCHEMA` | Database name (MySQL) or PostgreSQL database name (same convention as MySQL). Use alphanumeric and underscore only for PostgreSQL when the app must `CREATE DATABASE` at bootstrap. | | `DATABASE_TLS_ENABLED` | Optional TLS toggle (`true`/`false`). Defaults to enabled outside local dev. | diff --git a/infra/aws/db_setup/handler.py b/infra/aws/db_setup/handler.py index 72a0496..1eff719 100644 --- a/infra/aws/db_setup/handler.py +++ b/infra/aws/db_setup/handler.py @@ -157,7 +157,11 @@ def _handler_impl(event, context): username_prefix += "." if username_prefix: admin_user = f"{username_prefix}{admin_user}" - app_username = f"{username_prefix}syncbot_user_{stage}".replace("-", "_") + app_username_override = (props.get("AppUsername") or "").strip() + if app_username_override: + app_username = app_username_override + else: + app_username = f"{username_prefix}sbapp_{stage}".replace("-", "_") app_password = "" if create_app_user: try: diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index f6dba11..1ff50bd 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -421,6 +421,7 @@ configure_github_actions_aws() { # $16 ExistingDatabaseCreateAppUser: true | false # $17 ExistingDatabaseCreateSchema: true | false # $18 ExistingDatabaseUsernamePrefix (e.g. TiDB cluster prefix; empty for RDS) + # $19 ExistingDatabaseAppUsername (optional full app DB user; empty = default) local bootstrap_outputs="$1" local bootstrap_stack_name="$2" local aws_region="$3" @@ -441,6 +442,7 @@ configure_github_actions_aws() { local existing_db_create_app_user="${16:-true}" local existing_db_create_schema="${17:-true}" local existing_db_username_prefix="${18:-}" + local existing_db_app_username="${19:-}" [[ -z "$existing_db_create_app_user" ]] && existing_db_create_app_user="true" [[ -z "$existing_db_create_schema" ]] && existing_db_create_schema="true" local role bucket boot_region @@ -503,6 +505,7 @@ configure_github_actions_aws() { gh variable set EXISTING_DATABASE_CREATE_APP_USER --env "$env_name" --body "$existing_db_create_app_user" -R "$repo" gh variable set EXISTING_DATABASE_CREATE_SCHEMA --env "$env_name" --body "$existing_db_create_schema" -R "$repo" gh variable set EXISTING_DATABASE_USERNAME_PREFIX --env "$env_name" --body "$existing_db_username_prefix" -R "$repo" + gh variable set EXISTING_DATABASE_APP_USERNAME --env "$env_name" --body "$existing_db_app_username" -R "$repo" else # Clear existing-host vars for new-RDS mode to avoid stale CI config. gh variable set EXISTING_DATABASE_HOST --env "$env_name" --body "" -R "$repo" @@ -514,6 +517,7 @@ configure_github_actions_aws() { gh variable set EXISTING_DATABASE_CREATE_APP_USER --env "$env_name" --body "true" -R "$repo" gh variable set EXISTING_DATABASE_CREATE_SCHEMA --env "$env_name" --body "true" -R "$repo" gh variable set EXISTING_DATABASE_USERNAME_PREFIX --env "$env_name" --body "" -R "$repo" + gh variable set EXISTING_DATABASE_APP_USERNAME --env "$env_name" --body "" -R "$repo" fi echo "Environment variables updated for '$env_name'." fi @@ -1297,6 +1301,7 @@ PREV_EXISTING_DATABASE_PORT="" PREV_EXISTING_DATABASE_CREATE_APP_USER="" PREV_EXISTING_DATABASE_CREATE_SCHEMA="" PREV_EXISTING_DATABASE_USERNAME_PREFIX="" +PREV_EXISTING_DATABASE_APP_USERNAME="" PREV_DATABASE_ENGINE="" PREV_DATABASE_SCHEMA="" PREV_LOG_LEVEL="" @@ -1329,6 +1334,7 @@ if [[ -n "$EXISTING_STACK_STATUS" && "$EXISTING_STACK_STATUS" != "None" ]]; then PREV_EXISTING_DATABASE_CREATE_APP_USER="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseCreateAppUser")" PREV_EXISTING_DATABASE_CREATE_SCHEMA="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseCreateSchema")" PREV_EXISTING_DATABASE_USERNAME_PREFIX="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseUsernamePrefix")" + PREV_EXISTING_DATABASE_APP_USERNAME="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseAppUsername")" PREV_DATABASE_ENGINE="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseEngine")" PREV_DATABASE_SCHEMA="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseSchema")" PREV_LOG_LEVEL="$(stack_param_value "$EXISTING_STACK_PARAMS" "LogLevel")" @@ -1479,6 +1485,7 @@ ENV_EXISTING_DATABASE_PORT="${EXISTING_DATABASE_PORT:-}" ENV_EXISTING_DATABASE_CREATE_APP_USER="${EXISTING_DATABASE_CREATE_APP_USER:-}" ENV_EXISTING_DATABASE_CREATE_SCHEMA="${EXISTING_DATABASE_CREATE_SCHEMA:-}" ENV_EXISTING_DATABASE_USERNAME_PREFIX="${EXISTING_DATABASE_USERNAME_PREFIX:-}" +ENV_EXISTING_DATABASE_APP_USERNAME="${EXISTING_DATABASE_APP_USERNAME:-}" EXISTING_DB_ADMIN_PASSWORD_SOURCE="prompt" EXISTING_DATABASE_HOST="" EXISTING_DATABASE_ADMIN_USER="" @@ -1490,6 +1497,7 @@ EXISTING_DATABASE_PORT="" EXISTING_DATABASE_CREATE_APP_USER="true" EXISTING_DATABASE_CREATE_SCHEMA="true" EXISTING_DATABASE_USERNAME_PREFIX="" +EXISTING_DATABASE_APP_USERNAME="" EXISTING_DB_EFFECTIVE_PORT="" DATABASE_SCHEMA="" DATABASE_SCHEMA_DEFAULT="syncbot_${STAGE}" @@ -1598,6 +1606,14 @@ if [[ "$DB_MODE" == "2" ]]; then "$PREV_EXISTING_DATABASE_USERNAME_PREFIX" \ "$EXISTING_DATABASE_USERNAME_PREFIX_DEFAULT")" + EXISTING_DATABASE_APP_USERNAME_DEFAULT="" + [[ -n "$PREV_EXISTING_DATABASE_APP_USERNAME" ]] && EXISTING_DATABASE_APP_USERNAME_DEFAULT="$PREV_EXISTING_DATABASE_APP_USERNAME" + EXISTING_DATABASE_APP_USERNAME="$(resolve_with_conflict_check \ + "ExistingDatabaseAppUsername (optional; full app user, bypasses prefix+sbapp_{stage}; blank for default)" \ + "$ENV_EXISTING_DATABASE_APP_USERNAME" \ + "$PREV_EXISTING_DATABASE_APP_USERNAME" \ + "$EXISTING_DATABASE_APP_USERNAME_DEFAULT")" + if [[ -z "$EXISTING_DATABASE_HOST" || "$EXISTING_DATABASE_HOST" == REPLACE_ME* ]]; then echo "Error: valid ExistingDatabaseHost is required for existing DB mode." >&2 exit 1 @@ -1691,8 +1707,8 @@ else echo echo "=== New RDS Database ===" echo "New RDS mode uses:" - echo " - admin user: syncbot_admin_${STAGE} (password auto-generated)" - echo " - app user: syncbot_user_${STAGE} (password auto-generated)" + echo " - admin user: sbadmin_${STAGE} (password auto-generated)" + echo " - app user: sbapp_${STAGE} (password auto-generated)" DATABASE_SCHEMA="$(prompt_default "DatabaseSchema" "$DATABASE_SCHEMA_DEFAULT")" fi @@ -1777,23 +1793,34 @@ if [[ "$DB_MODE" == "2" ]]; then echo "DB create user: $EXISTING_DATABASE_CREATE_APP_USER" echo "DB create schema: $EXISTING_DATABASE_CREATE_SCHEMA" echo "DB admin user (parameter): $EXISTING_DATABASE_ADMIN_USER" + if [[ -n "$EXISTING_DATABASE_APP_USERNAME" ]]; then + echo "DB app username override: $EXISTING_DATABASE_APP_USERNAME" + fi if [[ -n "$EXISTING_DATABASE_USERNAME_PREFIX" ]]; then _dbpfx="$EXISTING_DATABASE_USERNAME_PREFIX" [[ "$_dbpfx" != *. ]] && _dbpfx="${_dbpfx}." echo "DB username prefix: $EXISTING_DATABASE_USERNAME_PREFIX" echo " effective admin (bootstrap): ${_dbpfx}${EXISTING_DATABASE_ADMIN_USER}" - echo " effective app user (if created): ${_dbpfx}syncbot_user_${STAGE//-/_}" + if [[ -n "$EXISTING_DATABASE_APP_USERNAME" ]]; then + echo " effective app user (if created): $EXISTING_DATABASE_APP_USERNAME (override)" + else + echo " effective app user (if created): ${_dbpfx}sbapp_${STAGE//-/_}" + fi else echo "DB username prefix: (none)" echo " admin (bootstrap): $EXISTING_DATABASE_ADMIN_USER" - echo " app user (if created): syncbot_user_${STAGE//-/_}" + if [[ -n "$EXISTING_DATABASE_APP_USERNAME" ]]; then + echo " app user (if created): $EXISTING_DATABASE_APP_USERNAME (override)" + else + echo " app user (if created): sbapp_${STAGE//-/_}" + fi fi echo "DB schema: $DATABASE_SCHEMA" else echo "DB mode: create new RDS" echo "DB engine: $DATABASE_ENGINE" - echo "DB admin user: syncbot_admin_${STAGE} (auto password)" - echo "DB app user: syncbot_user_${STAGE} (auto password)" + echo "DB admin user: sbadmin_${STAGE} (auto password)" + echo "DB app user: sbapp_${STAGE} (auto password)" echo "DB schema: $DATABASE_SCHEMA" fi if [[ -n "$TOKEN_OVERRIDE" ]]; then @@ -1868,6 +1895,7 @@ if [[ "$DB_MODE" == "2" ]]; then "ExistingDatabaseCreateAppUser=$EXISTING_DATABASE_CREATE_APP_USER" "ExistingDatabaseCreateSchema=$EXISTING_DATABASE_CREATE_SCHEMA" "ExistingDatabaseUsernamePrefix=$EXISTING_DATABASE_USERNAME_PREFIX" + "ExistingDatabaseAppUsername=$EXISTING_DATABASE_APP_USERNAME" ) else # Clear existing-host parameters on updates to avoid stale previous values. @@ -1883,6 +1911,7 @@ else "ExistingDatabaseCreateAppUser=true" "ExistingDatabaseCreateSchema=true" "ParameterKey=ExistingDatabaseUsernamePrefix,ParameterValue=" + "ParameterKey=ExistingDatabaseAppUsername,ParameterValue=" ) fi @@ -1946,6 +1975,7 @@ else EXISTING_DATABASE_CREATE_APP_USER="${PREV_EXISTING_DATABASE_CREATE_APP_USER:-true}" EXISTING_DATABASE_CREATE_SCHEMA="${PREV_EXISTING_DATABASE_CREATE_SCHEMA:-true}" EXISTING_DATABASE_USERNAME_PREFIX="${PREV_EXISTING_DATABASE_USERNAME_PREFIX:-}" + EXISTING_DATABASE_APP_USERNAME="${PREV_EXISTING_DATABASE_APP_USERNAME:-}" [[ -z "$EXISTING_DATABASE_CREATE_APP_USER" ]] && EXISTING_DATABASE_CREATE_APP_USER="true" [[ -z "$EXISTING_DATABASE_CREATE_SCHEMA" ]] && EXISTING_DATABASE_CREATE_SCHEMA="true" SLACK_SIGNING_SECRET="${SLACK_SIGNING_SECRET:-}" @@ -1998,7 +2028,8 @@ if [[ "$TASK_CICD" == "true" ]]; then "${EXISTING_DATABASE_PORT:-}" \ "${EXISTING_DATABASE_CREATE_APP_USER:-true}" \ "${EXISTING_DATABASE_CREATE_SCHEMA:-true}" \ - "${EXISTING_DATABASE_USERNAME_PREFIX:-}" + "${EXISTING_DATABASE_USERNAME_PREFIX:-}" \ + "${EXISTING_DATABASE_APP_USERNAME:-}" fi if [[ "$TASK_BUILD_DEPLOY" == "true" || "$TASK_BACKUP_SECRETS" == "true" ]]; then diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index ec5699f..b223272 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -158,11 +158,20 @@ Parameters: Description: > Username prefix required by some managed DB providers (e.g. TiDB Cloud cluster prefix "abc123"). A dot separator is added automatically if missing. Prepended to ExistingDatabaseAdminUser and to the - dedicated app user ({prefix}.syncbot_user_{Stage}). Use the bare admin name (e.g. root) when this is set. + default app user ({prefix}.sbapp_{Stage}) unless ExistingDatabaseAppUsername is set. + Use the bare admin name (e.g. root) when this is set. Leave empty for standard databases (RDS, self-hosted). Ignored when creating new RDS in stack. Type: String Default: "" + ExistingDatabaseAppUsername: + Description: > + When ExistingDatabaseHost is set and CreateAppUser is true: optional full app DB username (bypasses + prefix + default sbapp_{Stage}). Use when the auto-generated name exceeds provider limits (e.g. MySQL 32 chars) + or you need a fixed name. Leave empty for default {prefix}.sbapp_{Stage}. Ignored when creating new RDS in stack. + Type: String + Default: "" + DatabaseSchema: Description: > Database/schema name for MySQL or PostgreSQL. Each app sharing an RDS instance @@ -502,7 +511,7 @@ Resources: Engine: mysql # Minor version must match cfn-lint / RDS allowed list (major-only "8.0" fails E3691) EngineVersion: "8.0.40" - MasterUsername: !Sub "syncbot_admin_${Stage}" + MasterUsername: !Sub "sbadmin_${Stage}" ManageMasterUserPassword: true DBName: !Ref DatabaseSchema AllocatedStorage: 20 @@ -535,7 +544,7 @@ Resources: DBInstanceClass: !Ref DatabaseInstanceClass Engine: postgres EngineVersion: "16.6" - MasterUsername: !Sub "syncbot_admin_${Stage}" + MasterUsername: !Sub "sbadmin_${Stage}" ManageMasterUserPassword: true DBName: !Ref DatabaseSchema AllocatedStorage: 20 @@ -649,7 +658,7 @@ Resources: AdminUser: !If - UseExistingDatabase - !Ref ExistingDatabaseAdminUser - - !Sub "syncbot_admin_${Stage}" + - !Sub "sbadmin_${Stage}" AdminPassword: !If - UseExistingDatabase - !Ref ExistingDatabaseAdminPassword @@ -687,6 +696,10 @@ Resources: - UseExistingDatabase - !Ref ExistingDatabaseUsernamePrefix - "" + AppUsername: !If + - UseExistingDatabase + - !Ref ExistingDatabaseAppUsername + - "" # ============================================================ # Lambda Function diff --git a/infra/gcp/README.md b/infra/gcp/README.md index f7dad66..2fea415 100644 --- a/infra/gcp/README.md +++ b/infra/gcp/README.md @@ -48,7 +48,8 @@ Minimal Terraform scaffold to run SyncBot on Google Cloud. Satisfies the [infras | `stage` | Stage name, e.g. `test` or `prod` | | `use_existing_database` | If `true`, use `existing_db_*` vars instead of creating Cloud SQL | | `existing_db_host`, `existing_db_schema`, `existing_db_user` | Existing MySQL connection (when `use_existing_database = true`) | -| `existing_db_username_prefix` | Optional (e.g. TiDB Cloud `abc123`). A dot separator is added automatically. When set, `DATABASE_USER` is `{prefix}.syncbot_user_{stage}` and `existing_db_user` is ignored | +| `existing_db_username_prefix` | Optional (e.g. TiDB Cloud `abc123`). A dot separator is added automatically. When set, `DATABASE_USER` is `{prefix}.sbapp_{stage}` unless `existing_db_app_username` is set; `existing_db_user` is ignored | +| `existing_db_app_username` | Optional full `DATABASE_USER` (bypasses prefix + `sbapp_{stage}` and `existing_db_user`) | | `cloud_run_image` | Container image URL for Cloud Run (set after first build) | | `secret_slack_bot_scopes` | Secret Manager secret ID for **bot** OAuth scopes (runtime `SLACK_BOT_SCOPES`; default `syncbot-slack-scopes`). The **secret value** must match `oauth_config.scopes.bot` / `BOT_SCOPES` (same requirement as AWS SAM `SlackOauthBotScopes`). | | `slack_user_scopes` | Plain-text **user** OAuth scopes for Cloud Run (`SLACK_USER_SCOPES`). Default matches repo standard (same comma-separated string as AWS SAM `SlackOauthUserScopes`); must match manifest `oauth_config.scopes.user` and `USER_SCOPES` in `slack_manifest_scopes.py`. | diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf index 0ef39b7..fb2c46a 100644 --- a/infra/gcp/main.tf +++ b/infra/gcp/main.tf @@ -44,14 +44,16 @@ locals { length(google_sql_database_instance.main) > 0 ? google_sql_database_instance.main[0].public_ip_address : "" ) db_schema = var.use_existing_database ? var.existing_db_schema : "syncbot" - stage_syncbot_user = "syncbot_user_${replace(var.stage, "-", "_")}" + stage_sbapp_user = "sbapp_${replace(var.stage, "-", "_")}" normalized_prefix = ( trimspace(var.existing_db_username_prefix) != "" ? (endswith(trimspace(var.existing_db_username_prefix), ".") ? trimspace(var.existing_db_username_prefix) : "${trimspace(var.existing_db_username_prefix)}.") : "" ) db_user = var.use_existing_database ? ( - local.normalized_prefix != "" ? "${local.normalized_prefix}${local.stage_syncbot_user}" : var.existing_db_user + trimspace(var.existing_db_app_username) != "" ? trimspace(var.existing_db_app_username) : ( + local.normalized_prefix != "" ? "${local.normalized_prefix}${local.stage_sbapp_user}" : var.existing_db_user + ) ) : "syncbot_app" # Non-secret Cloud Run env (see docs/INFRA_CONTRACT.md) diff --git a/infra/gcp/scripts/deploy.sh b/infra/gcp/scripts/deploy.sh index 26273dc..8f28b9b 100755 --- a/infra/gcp/scripts/deploy.sh +++ b/infra/gcp/scripts/deploy.sh @@ -603,6 +603,7 @@ if [[ -n "$EXISTING_SERVICE_URL" ]]; then DETECTED_EXISTING_USER="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_USER")" fi if [[ "$USE_EXISTING" == "true" ]]; then + EXISTING_DB_APP_USERNAME="" EXISTING_HOST="$(prompt_line "Existing DB host" "$DETECTED_EXISTING_HOST")" EXISTING_SCHEMA="$(prompt_line "Database schema name" "${DETECTED_EXISTING_SCHEMA:-syncbot}")" EXISTING_DB_USERNAME_PREFIX="$(prompt_line "DB username prefix (optional; e.g. TiDB Cloud abc123; blank = enter full DB user next)" "")" @@ -611,12 +612,13 @@ if [[ "$USE_EXISTING" == "true" ]]; then else EXISTING_USER="$(prompt_line "Database user" "$DETECTED_EXISTING_USER")" fi + EXISTING_DB_APP_USERNAME="$(prompt_line "Override DATABASE_USER (optional; full username e.g. TiDB-prefixed; blank = prefix+sbapp_{stage} or Database user above)" "")" if [[ -z "$EXISTING_HOST" ]]; then echo "Error: Existing DB host is required when using existing database mode." >&2 exit 1 fi - if [[ -z "$EXISTING_USER" && -z "$EXISTING_DB_USERNAME_PREFIX" ]]; then - echo "Error: Database user or DB username prefix is required when using existing database mode." >&2 + if [[ -z "$EXISTING_USER" && -z "$EXISTING_DB_USERNAME_PREFIX" && -z "$EXISTING_DB_APP_USERNAME" ]]; then + echo "Error: Database user, DB username prefix, or DATABASE_USER override is required when using existing database mode." >&2 exit 1 fi @@ -762,11 +764,13 @@ if [[ "$USE_EXISTING" == "true" ]]; then VARS+=("-var=existing_db_schema=$EXISTING_SCHEMA") VARS+=("-var=existing_db_user=$EXISTING_USER") VARS+=("-var=existing_db_username_prefix=$EXISTING_DB_USERNAME_PREFIX") + VARS+=("-var=existing_db_app_username=$EXISTING_DB_APP_USERNAME") VARS+=("-var=existing_db_create_app_user=$EXISTING_DB_CREATE_APP_USER") VARS+=("-var=existing_db_create_schema=$EXISTING_DB_CREATE_SCHEMA") else VARS+=("-var=use_existing_database=false") VARS+=("-var=existing_db_username_prefix=") + VARS+=("-var=existing_db_app_username=") fi VARS+=("-var=cloud_run_image=$CLOUD_IMAGE") diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf index b6fdcf3..5d87d7d 100644 --- a/infra/gcp/variables.tf +++ b/infra/gcp/variables.tf @@ -45,13 +45,19 @@ variable "existing_db_schema" { variable "existing_db_user" { type = string default = "" - description = "Existing MySQL user (when use_existing_database = true). Ignored when existing_db_username_prefix is set (DATABASE_USER becomes prefix + syncbot_user_{stage})." + description = "Existing MySQL user (when use_existing_database = true). Ignored when existing_db_app_username or existing_db_username_prefix is set." } variable "existing_db_username_prefix" { type = string default = "" - description = "Optional prefix for DATABASE_USER (e.g. TiDB Cloud cluster prefix \"abc123\"). A dot separator is added automatically. When non-empty, DATABASE_USER is {prefix}.syncbot_user_{stage} and existing_db_user is ignored." + description = "Optional prefix for DATABASE_USER (e.g. TiDB Cloud cluster prefix \"abc123\"). A dot separator is added automatically. When non-empty, DATABASE_USER is {prefix}.sbapp_{stage} unless existing_db_app_username is set; existing_db_user is ignored." +} + +variable "existing_db_app_username" { + type = string + default = "" + description = "Optional full DATABASE_USER override when use_existing_database (bypasses prefix + sbapp_{stage} and existing_db_user)." } variable "existing_db_create_app_user" { diff --git a/tests/test_db_setup.py b/tests/test_db_setup.py index 26973b7..8c94a77 100644 --- a/tests/test_db_setup.py +++ b/tests/test_db_setup.py @@ -96,7 +96,7 @@ def test_handler_calls_postgresql_setup(cfn_create_event): def test_safe_username_accepts_dotted_prefix(): handler = _fresh_handler() - handler._safe_username("42bvZAUSurKwhxc.syncbot_user_test") + handler._safe_username("42bvZAUSurKwhxc.sbapp_test") def test_safe_ident_rejects_dots(): @@ -116,7 +116,7 @@ def test_handler_username_prefix_with_dot(cfn_create_event): patch.object(handler, "setup_database_postgresql"), ): handler._handler_impl(cfn_create_event, MagicMock()) - assert mock_mysql.call_args[1]["app_username"] == "pre.syncbot_user_test" + assert mock_mysql.call_args[1]["app_username"] == "pre.sbapp_test" assert mock_mysql.call_args[1]["admin_user"] == "pre.admin" @@ -131,7 +131,7 @@ def test_handler_username_prefix_without_dot(cfn_create_event): patch.object(handler, "setup_database_postgresql"), ): handler._handler_impl(cfn_create_event, MagicMock()) - assert mock_mysql.call_args[1]["app_username"] == "pre.syncbot_user_test" + assert mock_mysql.call_args[1]["app_username"] == "pre.sbapp_test" assert mock_mysql.call_args[1]["admin_user"] == "pre.admin" @@ -148,7 +148,23 @@ def test_handler_username_prefix_applied_to_bare_root_admin(cfn_create_event): ): handler._handler_impl(cfn_create_event, MagicMock()) assert mock_mysql.call_args[1]["admin_user"] == "cluster.root" - assert mock_mysql.call_args[1]["app_username"] == "cluster.syncbot_user_test" + assert mock_mysql.call_args[1]["app_username"] == "cluster.sbapp_test" + + +def test_handler_app_username_override_bypasses_prefix_and_default(cfn_create_event): + cfn_create_event["ResourceProperties"]["UsernamePrefix"] = "pre" + cfn_create_event["ResourceProperties"]["AppUsername"] = "custom.app_user" + handler = _fresh_handler() + with ( + patch.object(handler, "send"), + patch.object(handler, "get_secret_value", return_value="apppw"), + patch.object(handler, "_assert_tcp_reachable"), + patch.object(handler, "setup_database_mysql") as mock_mysql, + patch.object(handler, "setup_database_postgresql"), + ): + handler._handler_impl(cfn_create_event, MagicMock()) + assert mock_mysql.call_args[1]["app_username"] == "custom.app_user" + assert mock_mysql.call_args[1]["admin_user"] == "pre.admin" def test_handler_custom_port_passed_to_tcp_and_mysql(cfn_create_event):