From e50b7639ec716d63711d2ca5702bb36813f773e8 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 6 Mar 2026 10:55:06 -0600 Subject: [PATCH 01/45] Rebuild of codebase to work for individual Region installs. NOTE: This commit is squashed because of a false detection of a Slack bot key that needed to be excluded from history. --- .devcontainer/devcontainer.json | 42 + .devcontainer/docker-compose.dev.yml | 42 + .env.example | 86 ++ .github/workflows/sam-pipeline.yml | 74 +- .gitignore | 6 + .pre-commit-config.yaml | 25 + ARCHITECTURE.md | 151 ++++ Dockerfile | 26 + IMPROVEMENTS.md | 467 ++++++++++ LICENSE | 661 ++++++++++++++ README.md | 585 +++++++++++- assets/icon.png | Bin 0 -> 101485 bytes db/init.sql | 139 +++ docker-compose.yml | 43 + poetry.lock | 1236 +++++++++----------------- pyproject.toml | 52 +- samconfig.toml | 28 + slack-manifest.yaml | 61 ++ syncbot/app.py | 227 ++++- syncbot/builders/__init__.py | 48 + syncbot/builders/_common.py | 158 ++++ syncbot/builders/channel_sync.py | 179 ++++ syncbot/builders/home.py | 498 +++++++++++ syncbot/builders/sync.py | 95 ++ syncbot/builders/user_mapping.py | 350 ++++++++ syncbot/constants.py | 147 +++ syncbot/db/__init__.py | 364 ++++++++ syncbot/db/schemas.py | 221 +++++ syncbot/federation/__init__.py | 58 ++ syncbot/federation/api.py | 653 ++++++++++++++ syncbot/federation/core.py | 676 ++++++++++++++ syncbot/handlers/__init__.py | 135 +++ syncbot/handlers/_common.py | 71 ++ syncbot/handlers/channel_sync.py | 847 ++++++++++++++++++ syncbot/handlers/export_import.py | 508 +++++++++++ syncbot/handlers/federation_cmds.py | 363 ++++++++ syncbot/handlers/group_manage.py | 210 +++++ syncbot/handlers/groups.py | 775 ++++++++++++++++ syncbot/handlers/messages.py | 704 +++++++++++++++ syncbot/handlers/sync.py | 401 +++++++++ syncbot/handlers/tokens.py | 136 +++ syncbot/handlers/users.py | 291 ++++++ syncbot/helpers/__init__.py | 148 +++ syncbot/helpers/_cache.py | 38 + syncbot/helpers/core.py | 104 +++ syncbot/helpers/encryption.py | 72 ++ syncbot/helpers/export_import.py | 524 +++++++++++ syncbot/helpers/files.py | 279 ++++++ syncbot/helpers/notifications.py | 226 +++++ syncbot/helpers/oauth.py | 62 ++ syncbot/helpers/refresh.py | 83 ++ syncbot/helpers/slack_api.py | 220 +++++ syncbot/helpers/user_matching.py | 628 +++++++++++++ syncbot/helpers/workspace.py | 389 ++++++++ syncbot/logger.py | 252 ++++++ syncbot/requirements.txt | 31 +- syncbot/routing.py | 86 ++ syncbot/{utils => slack}/__init__.py | 0 syncbot/slack/actions.py | 206 +++++ syncbot/slack/blocks.py | 95 ++ syncbot/slack/forms.py | 100 +++ syncbot/{utils => }/slack/orm.py | 145 ++- syncbot/utils/announcements.py | 44 - syncbot/utils/builders.py | 137 --- syncbot/utils/constants.py | 32 - syncbot/utils/db/__init__.py | 163 ---- syncbot/utils/db/schemas.py | 80 -- syncbot/utils/handlers.py | 342 ------- syncbot/utils/helpers.py | 313 ------- syncbot/utils/routing.py | 31 - syncbot/utils/slack/__init__.py | 0 syncbot/utils/slack/actions.py | 12 - syncbot/utils/slack/forms.py | 86 -- template.yaml | 625 +++++++++++-- tests/test_db.py | 105 +++ tests/test_handlers.py | 347 ++++++++ tests/test_helpers.py | 243 +++++ 77 files changed, 15867 insertions(+), 2220 deletions(-) create mode 100644 .devcontainer/devcontainer.json create mode 100644 .devcontainer/docker-compose.dev.yml create mode 100644 .env.example create mode 100644 .pre-commit-config.yaml create mode 100644 ARCHITECTURE.md create mode 100644 Dockerfile create mode 100644 IMPROVEMENTS.md create mode 100644 LICENSE create mode 100644 assets/icon.png create mode 100644 db/init.sql create mode 100644 docker-compose.yml create mode 100644 samconfig.toml create mode 100644 slack-manifest.yaml create mode 100644 syncbot/builders/__init__.py create mode 100644 syncbot/builders/_common.py create mode 100644 syncbot/builders/channel_sync.py create mode 100644 syncbot/builders/home.py create mode 100644 syncbot/builders/sync.py create mode 100644 syncbot/builders/user_mapping.py create mode 100644 syncbot/constants.py create mode 100644 syncbot/db/__init__.py create mode 100644 syncbot/db/schemas.py create mode 100644 syncbot/federation/__init__.py create mode 100644 syncbot/federation/api.py create mode 100644 syncbot/federation/core.py create mode 100644 syncbot/handlers/__init__.py create mode 100644 syncbot/handlers/_common.py create mode 100644 syncbot/handlers/channel_sync.py create mode 100644 syncbot/handlers/export_import.py create mode 100644 syncbot/handlers/federation_cmds.py create mode 100644 syncbot/handlers/group_manage.py create mode 100644 syncbot/handlers/groups.py create mode 100644 syncbot/handlers/messages.py create mode 100644 syncbot/handlers/sync.py create mode 100644 syncbot/handlers/tokens.py create mode 100644 syncbot/handlers/users.py create mode 100644 syncbot/helpers/__init__.py create mode 100644 syncbot/helpers/_cache.py create mode 100644 syncbot/helpers/core.py create mode 100644 syncbot/helpers/encryption.py create mode 100644 syncbot/helpers/export_import.py create mode 100644 syncbot/helpers/files.py create mode 100644 syncbot/helpers/notifications.py create mode 100644 syncbot/helpers/oauth.py create mode 100644 syncbot/helpers/refresh.py create mode 100644 syncbot/helpers/slack_api.py create mode 100644 syncbot/helpers/user_matching.py create mode 100644 syncbot/helpers/workspace.py create mode 100644 syncbot/logger.py create mode 100644 syncbot/routing.py rename syncbot/{utils => slack}/__init__.py (100%) create mode 100644 syncbot/slack/actions.py create mode 100644 syncbot/slack/blocks.py create mode 100644 syncbot/slack/forms.py rename syncbot/{utils => }/slack/orm.py (76%) delete mode 100644 syncbot/utils/announcements.py delete mode 100644 syncbot/utils/builders.py delete mode 100644 syncbot/utils/constants.py delete mode 100644 syncbot/utils/db/__init__.py delete mode 100644 syncbot/utils/db/schemas.py delete mode 100644 syncbot/utils/handlers.py delete mode 100644 syncbot/utils/helpers.py delete mode 100644 syncbot/utils/routing.py delete mode 100644 syncbot/utils/slack/__init__.py delete mode 100644 syncbot/utils/slack/actions.py delete mode 100644 syncbot/utils/slack/forms.py create mode 100644 tests/test_db.py create mode 100644 tests/test_handlers.py create mode 100644 tests/test_helpers.py diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..3cce8bb --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,42 @@ +{ + "name": "SyncBot", + "dockerComposeFile": "docker-compose.dev.yml", + "service": "app", + "workspaceFolder": "/app", + + "features": { + "ghcr.io/devcontainers/features/aws-cli:1": {} + }, + + "customizations": { + "vscode": { + "extensions": [ + "ms-python.python", + "ms-python.vscode-pylance", + "charliermarsh.ruff" + ], + "settings": { + "python.defaultInterpreterPath": "/usr/local/bin/python", + "python.testing.pytestEnabled": true, + "python.testing.pytestArgs": ["tests"], + "[python]": { + "editor.defaultFormatter": "charliermarsh.ruff", + "editor.formatOnSave": true + } + } + } + }, + + "forwardPorts": [3000, 3306], + + "postCreateCommand": "pip install --no-cache-dir boto3 pytest && echo '✅ Dev container ready'", + + "remoteEnv": { + "PYTHONPATH": "/app/syncbot", + "LOCAL_DEVELOPMENT": "true", + "DATABASE_HOST": "db", + "ADMIN_DATABASE_USER": "root", + "ADMIN_DATABASE_PASSWORD": "rootpass", + "ADMIN_DATABASE_SCHEMA": "syncbot" + } +} diff --git a/.devcontainer/docker-compose.dev.yml b/.devcontainer/docker-compose.dev.yml new file mode 100644 index 0000000..0f6e82d --- /dev/null +++ b/.devcontainer/docker-compose.dev.yml @@ -0,0 +1,42 @@ +services: + db: + image: mysql:8 + environment: + MYSQL_ROOT_PASSWORD: rootpass + MYSQL_DATABASE: syncbot + MYSQL_ROOT_HOST: "%" + ports: + - "3306:3306" + volumes: + - syncbot-db:/var/lib/mysql + - ../db/init.sql:/docker-entrypoint-initdb.d/01-init.sql:ro + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost"] + interval: 5s + timeout: 5s + retries: 10 + + app: + build: + context: .. + dockerfile: Dockerfile + command: sleep infinity + depends_on: + db: + condition: service_healthy + env_file: + - ../.env + environment: + # Overrides that are always fixed for local dev + LOCAL_DEVELOPMENT: "true" + DATABASE_HOST: db + ADMIN_DATABASE_USER: root + ADMIN_DATABASE_PASSWORD: rootpass + ADMIN_DATABASE_SCHEMA: syncbot + volumes: + - ..:/app:cached + ports: + - "3000:3000" + +volumes: + syncbot-db: diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..fa618e8 --- /dev/null +++ b/.env.example @@ -0,0 +1,86 @@ +# ============================================================================= +# SyncBot Environment Variables +# ============================================================================= +# Copy this file to .env and fill in your values: +# cp .env.example .env +# +# Docker Compose and Dev Containers read .env automatically. +# For native Python development, source it: source .env or export $(cat .env | xargs) + +# ----------------------------------------------------------------------------- +# Database +# ----------------------------------------------------------------------------- +DATABASE_HOST=127.0.0.1 +ADMIN_DATABASE_USER=root +ADMIN_DATABASE_PASSWORD=rootpass +ADMIN_DATABASE_SCHEMA=syncbot + +# DANGER: When set to true, app startup DROPS the database and reinitializes +# from db/init.sql. All data is lost. Only for local/dev reset. +# DANGER_DROP_AND_INIT_DB=false + +# ----------------------------------------------------------------------------- +# Local Development Mode +# ----------------------------------------------------------------------------- +# This lets you run the app without all the Slack credentials. +# LOCAL_DEVELOPMENT=true + +# ----------------------------------------------------------------------------- +# Slack +# These are set via SAM template parameters, not .env, during deploy. +# Uncomment if running locally with OAuth flow. +# ----------------------------------------------------------------------------- +# SLACK_BOT_TOKEN=xoxb-your-bot-token +# SLACK_SIGNING_SECRET=your-signing-secret +# ENV_SLACK_CLIENT_ID=your-client-id +# ENV_SLACK_CLIENT_SECRET=your-client-secret +# ENV_SLACK_SCOPES=app_mentions:read,channels:history,channels:join,channels:manage,channels:read,chat:write,chat:write.customize,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email +# ENV_SLACK_STATE_S3_BUCKET_NAME=syncbot-state +# ENV_SLACK_INSTALLATION_S3_BUCKET_NAME=syncbot-installations + +# ----------------------------------------------------------------------------- +# Encryption (optional) +# ----------------------------------------------------------------------------- +# Passphrase for Fernet bot-token encryption at rest. +# Use any value except "123" to enable encryption. +# PASSWORD_ENCRYPT_KEY=my-secret-passphrase + +# ----------------------------------------------------------------------------- +# Admin Authorization (optional) +# ----------------------------------------------------------------------------- +# Set to "false" to allow all users to configure syncs (default: true). +# REQUIRE_ADMIN=true + +# ----------------------------------------------------------------------------- +# S3 File Storage (optional) +# ----------------------------------------------------------------------------- +# When S3_IMAGE_BUCKET is set, images are uploaded to S3 and referenced by URL. +# When not set, images and videos are re-uploaded directly to each synced +# Slack channel (no external storage required). +# +# AWS_ACCESS_KEY_ID=your-key +# AWS_SECRET_ACCESS_KEY=your-secret +# S3_IMAGE_BUCKET=syncbot-images +# S3_IMAGE_URL=https://syncbot-images.s3.amazonaws.com/ +# +# By default, videos are always posted directly to Slack to avoid large S3 +# storage costs. Set S3_VIDEO_ENABLED=true to store videos in S3 as well. +# S3_VIDEO_ENABLED=false + +# ----------------------------------------------------------------------------- +# Soft-Delete Retention (optional) +# ----------------------------------------------------------------------------- +# Number of days to keep soft-deleted workspace data before permanent purge. +# When a workspace uninstalls the app, its group memberships and syncs are paused. +# If it reinstalls within this window, everything is restored automatically. +# SOFT_DELETE_RETENTION_DAYS=30 + +# ----------------------------------------------------------------------------- +# External Connections (optional, disabled by default) +# ----------------------------------------------------------------------------- +# Set SYNCBOT_FEDERATION_ENABLED=true to activate external connections. +# SYNCBOT_INSTANCE_ID is a unique UUID for this instance (auto-generated if not set). +# SYNCBOT_PUBLIC_URL is the publicly reachable base URL (required for external connections). +# SYNCBOT_FEDERATION_ENABLED=false +# SYNCBOT_INSTANCE_ID=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +# SYNCBOT_PUBLIC_URL=https://your-syncbot.example.com diff --git a/.github/workflows/sam-pipeline.yml b/.github/workflows/sam-pipeline.yml index b715108..2e83638 100644 --- a/.github/workflows/sam-pipeline.yml +++ b/.github/workflows/sam-pipeline.yml @@ -7,65 +7,103 @@ jobs: sam-build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 with: python-version: '3.11' - uses: aws-actions/setup-sam@v2 with: use-installer: true - - uses: aws-actions/configure-aws-credentials@v3 + - uses: aws-actions/configure-aws-credentials@v4 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-region: us-east-2 - # sam build - run: sam build --use-container - # Run Unit tests- Specify unit tests here - # Publish artifact (need to publish app?) - name: Publish artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: build-artifact path: './.aws-sam/build' sam-deploy-test: - runs-on: "ubuntu-latest" - environment: "test" + runs-on: ubuntu-latest + environment: test needs: sam-build steps: - - uses: aws-actions/configure-aws-credentials@v3 + - uses: aws-actions/configure-aws-credentials@v4 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-region: us-east-2 - name: Download artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: build-artifact path: './.aws-sam/build' - - run: sam deploy -t .aws-sam/build/template.yaml --no-confirm-changeset --no-fail-on-empty-changeset --stack-name ${{ vars.AWS_STACK_NAME }} --s3-bucket ${{ vars.AWS_S3_BUCKET }} --capabilities CAPABILITY_IAM --region us-east-2 --no-disable-rollback --force-upload --parameter-overrides "SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} Stage=${{ vars.STAGE_NAME }} DatabaseHost=${{ secrets.DATABASE_HOST }} DatabasePassword=${{ secrets.ADMIN_DATABASE_PASSWORD }} PasswordEncryptKey=${{ secrets.PASSWORD_ENCRYPT_KEY }} StravaClientID=${{ secrets.STRAVA_CLIENT_ID }} StravaClientSecret=${{ secrets.STRAVA_CLIENT_SECRET }}" + - uses: aws-actions/setup-sam@v2 + with: + use-installer: true + + - name: Deploy to test + run: | + sam deploy \ + -t .aws-sam/build/template.yaml \ + --no-confirm-changeset \ + --no-fail-on-empty-changeset \ + --stack-name ${{ vars.AWS_STACK_NAME }} \ + --s3-bucket ${{ vars.AWS_S3_BUCKET }} \ + --capabilities CAPABILITY_IAM \ + --region us-east-2 \ + --no-disable-rollback \ + --force-upload \ + --parameter-overrides \ + "Stage=${{ vars.STAGE_NAME }} \ + SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ + SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ + DatabasePassword=${{ secrets.DATABASE_PASSWORD }} \ + PasswordEncryptKey=${{ secrets.PASSWORD_ENCRYPT_KEY }}" sam-deploy-prod: - runs-on: "ubuntu-latest" - environment: "prod" + runs-on: ubuntu-latest + environment: prod needs: [sam-build, sam-deploy-test] steps: - - uses: aws-actions/configure-aws-credentials@v2 + - uses: aws-actions/configure-aws-credentials@v4 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-region: us-east-2 - name: Download artifact - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: build-artifact path: './.aws-sam/build' - - - run: sam deploy -t .aws-sam/build/template.yaml --no-confirm-changeset --no-fail-on-empty-changeset --stack-name ${{ vars.AWS_STACK_NAME }} --s3-bucket ${{ vars.AWS_S3_BUCKET }} --capabilities CAPABILITY_IAM --region us-east-2 --no-disable-rollback --force-upload --parameter-overrides "SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} Stage=${{ vars.STAGE_NAME }} DatabaseHost=${{ secrets.DATABASE_HOST }} DatabasePassword=${{ secrets.ADMIN_DATABASE_PASSWORD }} PasswordEncryptKey=${{ secrets.PASSWORD_ENCRYPT_KEY }} StravaClientID=${{ secrets.STRAVA_CLIENT_ID }} StravaClientSecret=${{ secrets.STRAVA_CLIENT_SECRET }}" + - uses: aws-actions/setup-sam@v2 + with: + use-installer: true + + - name: Deploy to prod + run: | + sam deploy \ + -t .aws-sam/build/template.yaml \ + --no-confirm-changeset \ + --no-fail-on-empty-changeset \ + --stack-name ${{ vars.AWS_STACK_NAME }} \ + --s3-bucket ${{ vars.AWS_S3_BUCKET }} \ + --capabilities CAPABILITY_IAM \ + --region us-east-2 \ + --no-disable-rollback \ + --force-upload \ + --parameter-overrides \ + "Stage=${{ vars.STAGE_NAME }} \ + SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ + SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ + DatabasePassword=${{ secrets.DATABASE_PASSWORD }} \ + PasswordEncryptKey=${{ secrets.PASSWORD_ENCRYPT_KEY }}" diff --git a/.gitignore b/.gitignore index ff501f8..9aaf78d 100644 --- a/.gitignore +++ b/.gitignore @@ -156,9 +156,15 @@ env.json .aws-sam/ data/cache/ +# Local OAuth file stores (dev only) +syncbot/.oauth-data/ + # PyCharm # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ + +# Cursor +.cursor/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..83dfa57 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,25 @@ +# Pre-commit hooks for SyncBot +# +# Install: pip install pre-commit && pre-commit install +# Run all: pre-commit run --all-files +# +# See https://pre-commit.com for more information. + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.6.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + args: ["--maxkb=500"] + - id: check-merge-conflict + - id: detect-private-key + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.8.6 + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix] + - id: ruff-format diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md new file mode 100644 index 0000000..1689921 --- /dev/null +++ b/ARCHITECTURE.md @@ -0,0 +1,151 @@ +# Architecture + +## Module Overview + +SyncBot is organized into five top-level packages inside `syncbot/`: + +| Package | Responsibility | +|---------|----------------| +| `handlers/` | Slack event and action handlers (messages, groups, channel sync, users, tokens, federation UI, backup/restore, data migration) | +| `builders/` | Slack UI construction — Home tab, modals, and forms | +| `helpers/` | Business logic, Slack API wrappers, encryption, file handling, user matching, caching, export/import (backup dump/restore, migration build/import) | +| `federation/` | Cross-instance sync — Ed25519 signing/verification, HTTP client, API endpoint handlers, pair payload (optional team_id/workspace_name for Instance A detection) (opt-in) | +| `db/` | SQLAlchemy engine, session management, `DbManager` CRUD helper, ORM models | +| `slack/` | Block Kit abstractions — action/callback ID constants, form definitions, ORM elements | + +Top-level modules: `app.py` (entry point), `routing.py` (event dispatcher), `constants.py` (env-var names), `logger.py` (structured logging + metrics). + +## Message Sync Flow + +When a user posts a message in a synced channel, SyncBot replicates it to every other channel in the Sync group: + +```mermaid +sequenceDiagram + participant U as User (Workspace A) + participant S as Slack API + participant AG as API Gateway + participant L as Lambda (SyncBot) + participant DB as RDS MySQL + participant S3 as S3 (Images) + participant SB as Slack API (Workspace B) + + U->>S: Posts message in #general + S->>AG: POST /slack/events + AG->>L: Proxy event + L->>L: Assign correlation ID + L->>L: Acknowledge (ack) + L->>DB: Look up sync group for channel + DB-->>L: SyncChannel + Workspace records + + alt Message has images (streamed with size cap) + L->>S: Download image via URL + S-->>L: Image bytes (streaming) + L->>S3: Upload (with HEIC→PNG conversion) + end + + L->>S: users.info (resolve sender) + S-->>L: display_name, avatar URL + + loop For each target channel + L->>L: Re-map @mentions (cached user matching) + L->>SB: chat.postMessage (as sender) + SB-->>L: ts (timestamp) + L->>DB: Save PostMeta record + end + + L->>L: Emit metrics (messages_synced) + L-->>AG: 200 OK + AG-->>S: 200 OK +``` + +The same pattern applies to edits (`chat.update`), deletes (`chat.delete`), thread replies (with `thread_ts`), and reactions (threaded reply with emoji attribution). + +## AWS Infrastructure + +```mermaid +flowchart TB + subgraph Slack["Slack Platform"] + WA["Workspace A"] + WB["Workspace B"] + end + + subgraph AWS["AWS Account"] + subgraph APIGW["API Gateway"] + EP["/slack/events
/slack/install
/slack/oauth_redirect
/api/federation/*"] + end + + subgraph Lambda["Lambda Function"] + APP["app.py → routing.py"] + HAND["handlers/"] + BUILD["builders/"] + HELP["helpers/"] + FED["federation/"] + end + + subgraph Storage["S3 Buckets"] + S1["OAuth State
(1-day TTL)"] + S2["Installations
(versioned)"] + S3["Images
(90-day TTL, public read)"] + end + + subgraph Database["RDS MySQL"] + T1["workspaces"] + T2["workspace_groups"] + T2a["workspace_group_members"] + T3["syncs"] + T4["sync_channels"] + T5["post_meta"] + T6["user_directory"] + T7["user_mappings"] + T8["federated_workspaces"] + end + + subgraph Monitoring["CloudWatch"] + CW["Alarms:
Lambda Errors
Throttles
Duration
API 5xx"] + LG["Logs:
Structured JSON
Correlation IDs
Metrics"] + end + + EB["EventBridge
(keep-warm every 5 min)"] + end + + WA & WB <-->|Events & API calls| EP + EP --> APP + APP --> HAND + HAND --> HELP + HAND --> BUILD + HELP --> FED + HELP --> S1 & S2 + HELP --> S3 + HELP -->|SQLAlchemy
QueuePool + retry| Database + EB -->|ScheduleV2| Lambda + Lambda -.->|logs & metrics| Monitoring +``` + +All infrastructure is defined in `template.yaml` (AWS SAM). Dashed lines indicate resources that are conditionally created — when `Existing*` parameters are set, those resources are skipped. + +## Security & Hardening + +| Layer | Protection | +|-------|------------| +| **Input** | File count caps (20), mention caps (50), federation user caps (5,000), federation body size limit (1 MB), `_sanitize_text` on form input | +| **Downloads** | Streaming with 30s timeout, 100 MB size cap, 8 KB chunks — prevents unbounded memory/disk usage | +| **Encryption** | Bot tokens encrypted at rest with Fernet (PBKDF2-derived key, cached to avoid repeated 600K iterations) | +| **Database** | `pool_pre_ping=True` for stale connection detection, retry decorator on all operations, `dispose()` only after all retries exhausted | +| **Slack API** | `slack_retry` decorator with exponential backoff, `Retry-After` header support, user profile caching | +| **Network** | RDS SSL/TLS enforcement, API Gateway throttling (20 burst / 10 sustained), federation HMAC-SHA256 signing with 5-minute replay window | +| **Authorization** | Admin/owner checks on all configuration actions, configurable via `REQUIRE_ADMIN` | + +## Performance & Cost (Home and User Mapping Refresh) + +To keep RDS and Slack API usage low when admins use the **Refresh** button on the Home tab or User Mapping screen: + +- **Content hash** — A minimal set of DB queries computes a hash of the data that drives the view (groups, members, syncs, pending invites; for User Mapping, mapping ids and methods). If the hash matches the last full refresh, the app skips expensive work. +- **Cached built blocks** — After a full refresh, the built Block Kit payload is cached (keyed by workspace and user). When the hash matches, the app re-publishes that cached view with one `views.publish` instead of re-running all DB and Slack calls. +- **60-second cooldown** — If the user clicks Refresh again within 60 seconds and the hash is unchanged, the app re-publishes the cached view with a message: "No new data. Wait __ seconds before refreshing again." (seconds remaining from the last refresh). This avoids redundant full refreshes from repeated clicks. +- **Request-scoped caching** — Within a single Lambda invocation, `get_workspace_by_id` and `get_admin_ids` use the request `context` as a cache so repeated lookups for the same workspace or admin list do not hit the DB or Slack again. The same context is passed through all "push refresh" paths (e.g. when one workspace publishes a channel and other workspaces' Home tabs are updated), so those updates share the cache and stay lightweight. + +## Backup, Restore, and Data Migration + +- **Full-instance backup** — All tables are dumped as plain JSON (no compression). The payload includes `version`, `exported_at`, `encryption_key_hash` (SHA-256 of `PASSWORD_ENCRYPT_KEY`), and `hmac` (HMAC-SHA256 over canonical JSON). Restore inserts rows in FK order; it is intended for an empty or fresh database (e.g. after an AWS rebuild). On HMAC or encryption-key mismatch, the UI warns but allows proceeding. After restore, Home tab caches (`home_tab_hash`, `home_tab_blocks`) are invalidated for all restored workspaces. +- **Data migration (workspace-scoped)** — Export produces a JSON file with syncs, sync channels, post meta, user directory, and user mappings keyed by stable identifiers (team_id, sync title, channel_id). The export can include `source_instance` (webhook_url, instance_id, public_key, one-time connection code) so import on the new instance can establish the federation connection and then import in one step. The payload is signed with the instance Ed25519 key; import verifies the signature and warns (but does not block) on mismatch. Import uses replace mode: existing SyncChannels and PostMeta for that workspace in the federated group are removed, then data from the file is created. User mappings are imported where both source and target workspace exist on the new instance. After import, Home tab caches for that workspace are invalidated. +- **Instance A detection** — When instance B connects to A via federation, B can send optional `team_id` and `workspace_name` in the pair request. A stores them on the `federated_workspaces` row (`primary_team_id`, `primary_workspace_name`) and, if a local workspace with that `team_id` exists, soft-deletes it so the only representation of that workspace on A is the federated connection. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..84faf21 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,26 @@ +FROM python:3.11-slim + +WORKDIR /app + +# Install system dependencies for cryptography and pillow-heif +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + libffi-dev \ + default-libmysqlclient-dev \ + libheif-dev \ + && rm -rf /var/lib/apt/lists/* + +# Install Python dependencies +COPY syncbot/requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +RUN pip install --no-cache-dir boto3 + +# Copy application code +COPY syncbot/ ./syncbot/ + +WORKDIR /app/syncbot + +EXPOSE 3000 + +CMD ["python", "app.py"] diff --git a/IMPROVEMENTS.md b/IMPROVEMENTS.md new file mode 100644 index 0000000..a7f39d2 --- /dev/null +++ b/IMPROVEMENTS.md @@ -0,0 +1,467 @@ +# SyncBot Improvements Summary + +This document outlines the improvements made to the SyncBot application and additional recommendations for future enhancements. + +## ✅ Completed Improvements + +### 1. Database Management Fixes +- **Added `@staticmethod` decorators** to all `DbManager` methods for proper static method usage +- **Fixed session management** - All database methods now properly close sessions in finally blocks +- **Improved error handling** in database operations + +### 2. Code Quality Improvements +- **Removed duplicate constant definitions** in `constants.py` (SLACK_STATE_S3_BUCKET_NAME, SLACK_INSTALLATION_S3_BUCKET_NAME, etc. were defined twice) +- **Fixed type hints**: + - `get_request_type()` now correctly returns `tuple[str, str]` instead of `tuple[str]` + - `apply_mentioned_users()` now correctly returns `str` instead of `List[Dict]` + +### 3. Error Handling Enhancements +- **Replaced bare `except Exception:` clauses** with proper error logging: + - `handle_remove_sync()` now logs warnings when failing to leave channels + - `handle_join_sync_submission()` now logs errors with context + - Added null check for `sync_channel_record` before use +- **Improved exception handling** in `announcements.py`: + - Replaced print statements with proper logging + - Better handling of rate limiting errors + - More descriptive error messages + +### 4. Logging Improvements +- **Replaced all `print()` statements** with proper logging: + - `orm.py`: Added logger and replaced print statements with `logger.error()` and `logger.debug()` + - `announcements.py`: Replaced print statements with appropriate log levels (info, warning, error) + - `handlers.py`: Removed debug print statement +- **Added logging module** where needed + +### 5. Database Connection Pooling +- **Replaced `pool.NullPool` with `pool.QueuePool`** (`pool_size=3`, `max_overflow=2`, `pool_recycle=3600`) for connection reuse across warm Lambda invocations +- **Added `pool_pre_ping=True`** to detect and replace stale connections transparently +- **Added `_with_retry` decorator** on all `DbManager` methods to automatically retry on transient `OperationalError` (up to 2 retries with engine disposal between attempts) +- **Simplified `close_session()`** to return connections to the pool instead of disposing the entire engine + +### 6. Rate Limiting Handling +- **Created `slack_retry` decorator** with exponential backoff for all Slack API calls: + - Honors `Retry-After` headers on HTTP 429 responses + - Retries on transient 5xx server errors + - Configurable max retries (default 3) with exponential backoff (capped at 30s) +- **Refactored `parse_mentioned_users()`** to use individual `users.info()` calls instead of the heavy `users.list()` endpoint that is easily rate-limited +- **Refactored `apply_mentioned_users()`** to use `users.lookupByEmail()` for individual lookups instead of `users.list()` +- **Added user profile caching** (`_get_user_profile()`) with a 5-minute TTL to avoid redundant API calls for the same user +- **Applied `@slack_retry`** to `post_message()`, `delete_message()`, `_users_info()`, and `_lookup_user_by_email()` + +### 7. Error Recovery +- **Added error isolation in sync loops** - a failure syncing to one channel no longer prevents syncing to the remaining channels: + - `_handle_new_post()`: Individual channel failures are caught and logged; remaining channels continue + - `_handle_thread_reply()`: Same per-channel error isolation + - `_handle_message_edit()`: Same per-channel error isolation + - `_handle_message_delete()`: Same per-channel error isolation +- **Guard against empty post lists** - `DbManager.create_records()` is only called when there are records to persist + +### 8. Type Safety +- **Added `EventContext` TypedDict** for the parsed message event context, replacing untyped `dict` +- **Updated all sub-handler signatures** (`_handle_new_post`, `_handle_thread_reply`, `_handle_message_edit`, `_handle_message_delete`) to use `EventContext` +- **Added comprehensive type hints** across the codebase: + - `helpers.py`: `safe_get()`, `get_user_info()`, `post_message()`, `delete_message()`, `update_modal()`, `parse_mentioned_users()`, `apply_mentioned_users()` and all new functions + - `handlers.py`: `_build_photo_context()`, `_get_team_name()` return types + - `schemas.py`: `GetDBClass` mixin methods (`get_id`, `get`, `to_json`, `__repr__`) +- **Improved exception handling in `safe_get()`** to also catch `AttributeError` and `IndexError` + +### 9. Testing +- **Created unit test suite** with 40 tests across 3 modules: + - `tests/test_helpers.py`: `safe_get()` (9 tests), encryption roundtrip/failure/wrong-key (5 tests), TTL cache (4 tests), `get_request_type()` (4 tests), `slack_retry` decorator (3 tests) + - `tests/test_db.py`: `_with_retry` decorator (4 tests), engine QueuePool verification (1 test) + - `tests/test_handlers.py`: `_parse_event_fields()` (4 tests), `EventContext` TypedDict (1 test), `_sanitize_text()` (5 tests) +- **Added pytest configuration** in `pyproject.toml` with `testpaths` and `pythonpath` + +### 10. Code Organization (Medium Priority - Previously Completed) +- **Refactored `respond_to_message_event()`** (170+ lines) into a thin dispatcher and focused sub-handlers: `_parse_event_fields`, `_build_photo_context`, `_get_team__name`, `_handle_new_post`, `_handle_thread_reply`, `_handle_message_edit`, `_handle_message_delete` + +### 11. Configuration Management (Medium Priority - Previously Completed) +- **Added `validate_config()`** startup validation for required environment variables +- **Fails fast in production** (raises `EnvironmentError`); warns in local development +- **Separate required lists** for always-required and production-only variables + +### 12. Database Schema (Medium Priority - Previously Completed) +- **Added soft deletes** for `sync_channels` via `deleted_at` column with index +- **Created SQL migration scripts**: `migrate_001_security.sql`, `migrate_003_soft_deletes.sql` +- **Created Python migration script**: `migrate_002_encrypt_tokens.py` for encrypting existing tokens +- **Updated all queries** to filter out soft-deleted records + +### 13. Security (Medium Priority - Previously Completed) +- **Bot token encryption** at rest using Fernet (AES-128-CBC + HMAC-SHA256) +- **Fail-closed decryption** - refuses to use tokens that fail decryption +- **Input sanitization** via `_sanitize_text()` on user-submitted form data +- **RDS SSL/TLS enforcement** (server-side parameter group + client-side connect_args) +- **API Gateway throttling** (20 burst / 10 sustained requests per second) + +### 14. Performance (Medium Priority - Previously Completed) +- **In-process TTL cache** for `get_sync_list()` (60s TTL) and user info lookups (300s TTL) +- **Hoisted `get_user_info()` calls** outside loops where possible +- **Connection pooling** reuses DB connections across invocations in warm Lambda containers + +### 15. Infrastructure as Code +- **AWS SAM template** (`template.yaml`) defining complete VPC, RDS, S3, Lambda, API Gateway stack +- **Free-tier optimized** (128 MB Lambda, db.t3.micro RDS, gp2 storage, no NAT Gateway) +- **CI/CD pipeline** (`.github/workflows/sam-pipeline.yml`) for automated build/deploy +- **SAM config** (`samconfig.toml`) for staging and production environments + +### 16. Documentation (Low Priority - Completed) +- **Added module-level docstrings** to all Python modules across all packages +- **Added function-level docstrings** to all public functions across the codebase (encryption helpers, cache functions, Slack API wrappers, DB helpers, OAuth flow, photo upload, mention parsing, modal updates, request dispatch) +- **Added inline docstrings** to routing table dicts and action ID constants +- **Documented API endpoints** in the README (HTTP routes, subscribed events) +- **Documented deployment process** in the README (first-time deploy, subsequent deploys, CI/CD, migrations, shared infrastructure) + +### 17. Monitoring & Observability (Low Priority - Completed) +- **Added structured JSON logging** via `StructuredFormatter` — every log entry is a single JSON object with `timestamp`, `level`, `correlation_id`, `module`, `function`, `message`, and optional extra fields +- **Added correlation IDs** — a unique 12-character ID is assigned at the start of each incoming Slack request (`set_correlation_id()`) and automatically included in every log line during that request +- **Added metrics emission** via `emit_metric()` — structured log entries for key operational metrics: + - `request_handled` (with `duration_ms`, `request_type`, `request_id`) + - `request_error` (with `request_type`, `request_id`) + - `messages_synced` (with `sync_type`: `new_post`, `thread_reply`, `message_edit`, `message_delete`) + - `sync_failures` (with `sync_type`) +- **Added CloudWatch Alarms** in `template.yaml` (within free-tier's 10-alarm limit): + - `LambdaErrorAlarm` — fires on 3+ errors in 5 minutes + - `LambdaThrottleAlarm` — fires on any throttling + - `LambdaDurationAlarm` — fires when average duration exceeds 10 seconds + - `ApiGateway5xxAlarm` — fires on 5+ server errors in 5 minutes +- **X-Ray distributed tracing** was already enabled (`Tracing: Active` in SAM template) + +### 18. Code Style (Low Priority - Completed) +- **Configured `ruff`** as the project linter and formatter (added `[tool.ruff]` section to `pyproject.toml` with rules for pycodestyle, pyflakes, isort, pyupgrade, flake8-bugbear, flake8-simplify, flake8-logging) +- **Ran `ruff format`** across the entire codebase (all Python files in `syncbot/` and `tests/`) +- **Ran `ruff check --fix`** to auto-fix 123 issues (import sorting, deprecated typing imports, style modernization) +- **Manually fixed remaining issues**: mutable default argument (`orm.py`), `return` inside `finally` (`db/__init__.py`), `raise ... from None` for exception chaining (`helpers.py`), ternary ordering (`handlers.py`) +- **Created `.pre-commit-config.yaml`** with hooks for: + - `trailing-whitespace`, `end-of-file-fixer`, `check-yaml`, `check-added-large-files`, `check-merge-conflict`, `detect-private-key` + - `ruff` lint (with `--fix`) + - `ruff-format` + +### 19. Architecture Diagrams (Low Priority - Completed) +- **Added message sync flow sequence diagram** (Mermaid) to README showing the full request path from user message through API Gateway, Lambda, DB lookup, image upload, mention re-mapping, cross-workspace posting, and metric emission +- **Added AWS infrastructure diagram** (Mermaid) to README showing the relationships between API Gateway, Lambda, S3 buckets, RDS, EventBridge keep-warm, and CloudWatch monitoring + +### 20. Admin Authorization and Security Hardening (Completed) +- **Added admin/owner authorization** — only workspace admins and owners can run `/config-syncbot` and all related configuration actions (create sync, join sync, remove sync) + - `is_user_authorized(client, user_id)` checks `is_admin` / `is_owner` from the Slack `users.info` API, with caching + - `get_user_id_from_body(body)` extracts the user ID from any Slack request type (commands, actions, views) + - Unauthorized users receive an ephemeral message: ":lock: Only workspace admins and owners can configure SyncBot." +- **Defense-in-depth** — authorization checks are enforced at both the entry points (`build_config_form`, `build_join_sync_form`, `build_new_sync_form`) and the mutation handlers (`handle_remove_sync`, `handle_join_sync_submission`, `handle_new_sync_submission`) +- **Configurable via `REQUIRE_ADMIN` env var** (default `"true"`) — set to `"false"` to allow all users (for small teams) +- **Removed `/send-syncbot-announcement` command** — the broadcast command could be triggered by any admin in any connected workspace, affecting all workspaces; removed entirely as a security risk +- **Fixed input validation in `handle_remove_sync`** — `int()` conversion now wrapped in try/except to prevent crashes on malformed payloads +- **Fixed join-sync ordering in `handle_join_sync_submission`** — `conversations_join` now runs before `DbManager.create_record` so the DB record isn't created if the bot can't actually join the channel + +### 21. Cross-Workspace User Matching (Completed) +- **Persistent user matching pipeline** — @mentions in synced messages are resolved to the correct user in the target workspace using a multi-step algorithm: email lookup → name-based directory matching → bracketed fallback +- **New database tables**: + - `user_directory` — cached copy of each workspace's user profiles (slack_user_id, email, real_name, display_name, normalized_name), refreshed every 24h + - `user_mappings` — cross-workspace match results with TTL-based freshness (email: 30d, name: 14d, manual: never expires, none: 90d) +- **Name normalization** (`_normalize_name`) — trims trailing title/qualifier from display names (e.g., "Johnny B (Title)" → "Johnny B") while preserving original casing and spacing +- **Reactive matching via `team_join` event** — when a new user joins a connected workspace, their profile is added to the directory and all unmatched mappings targeting that workspace are re-checked automatically +- **Admin UI in `/config-syncbot`** — "User Matching" button opens a child modal showing: + - Stats: "X matched, Y unmatched" + - Refresh button to re-run auto-matching across all linked workspaces + - Unmatched users with native Slack user-picker dropdowns for manual matching (saved as `match_method='manual'`) + - Matched users with "Unlink" buttons to remove mappings +- **Fallback display** — unmatched mentions render as `[Display Name]` in square brackets instead of broken `@mentions` +- **Migration script** — `db/migrate_004_user_matching.sql` for existing deployments + +### 22. Bot Message Syncing (Completed) +- **Selective bot filtering** — only messages from SyncBot itself are ignored (to prevent infinite loops); messages from all other bots are synced normally +- **Bot identity detection** (`get_own_bot_id`) — resolves SyncBot's `bot_id` using `context` or `auth.test`, with caching +- **Bot attribution** (`get_bot_info_from_event`) — extracts `username` and `icons` from bot message events so synced bot messages preserve the original bot's name and avatar +- **Unit tests** for `_is_own_bot_message` (own bot, other bots, user messages, message_changed events, auth.test fallback) and `get_bot_info_from_event` + +### 23. Simplified Sync Creation (Completed) +- **One-step sync creation** — replaced the two-step flow (create sync title → join channel) with a single channel picker modal +- **`ConversationsSelectElement`** — new Block Kit element that shows both public and private channels (with `exclude_bot_users: true`) +- **Auto-naming** — the sync is named after the selected channel (resolved via `conversations.info`) +- **Combined operation** — on submit, the handler joins the channel, creates the `Sync` record, creates the `SyncChannel` link, and posts a welcome message in one step +- **Private channel support** — the "Join existing Sync" channel picker also upgraded to `ConversationsSelectElement` so private channels are now selectable + +### 24. Workspace Pairing with Directed Trust Codes (Completed) +- **Directed workspace pairing** — the Workspace Pairing screen lists every workspace that has SyncBot installed, with its pairing status (Paired, Pending, or Not paired) +- **Pairing flow**: Admin A sees Workspace B listed as "Not paired" → clicks "Generate Code" → a code locked to Workspace B is created → Admin A shares the code out-of-band → Admin B enters the code → pairing is activated bidirectionally +- **Locked codes** — pairing codes are generated for a specific target workspace; if a different workspace tries to redeem the code, it is rejected +- **New database table** — `workspace_pairings` with `initiator_workspace_id`, `partner_workspace_id`, `invite_code`, `status` (`pending`/`active`), `created_at`, `paired_at` +- **Code validation** — codes are 7-character alphanumeric with format `XXX-XXXX`; pending codes expire after 24 hours; self-pairing, wrong-workspace, and duplicate pairing are all rejected +- **Pairing UI in `/config-syncbot`** — "Workspace Pairing" button opens a modal showing: + - All installed workspaces with status: Paired (with Remove button), Pending (with code displayed and Cancel button), or Not paired (with Generate Code button) + - "Enter Pairing Code" button at the top for the receiving side +- **Cascading unpair** — removing a pairing soft-deletes all `SyncChannel` records shared between the two workspaces and has the bot leave those channels +- **Migration script** — `db/migrate_005_workspace_pairings.sql` for existing deployments + +### 25. Config Screen Redesign — Channel Sync & User Matching Overhaul (Completed) +- **Three-button config screen** — replaced the four-button layout (Join existing Sync, Create new Sync, User Matching, Workspace Pairing) with three focused buttons: **Workspace Pairing**, **User Matching**, **Channel Sync** +- **1-to-1 Channel Sync (publish/subscribe model)**: + - A workspace "publishes" one of its channels to a specific paired workspace, making it available for syncing + - The paired workspace "subscribes" by selecting one of their own channels to receive messages + - Each publish is scoped to exactly one pairing — publishing to workspace B and workspace C are separate operations + - Channel Sync modal shows: published channels (with Unpublish buttons), available channels from partner (with Subscribe buttons), and a Publish Channel button + - Welcome messages are posted in both channels when a subscription is established + - Unpublishing cleans up both sides (soft-deletes SyncChannels, bot leaves channels) +- **Database changes** — added `pairing_id` column to `syncs` table (FK to `workspace_pairings`, `ON DELETE CASCADE`), removed UNIQUE constraint on `syncs.title` (same channel can be published to multiple pairings) +- **Workspace picker pattern** — both Channel Sync and User Matching now show a workspace picker modal when multiple pairings exist; auto-selects when only one pairing is active +- **User Matching improvements**: + - **Auto-sync on pairing activation** — when a pairing code is accepted, both workspaces' user directories are refreshed and auto-matching runs immediately in both directions + - **Scoped to pairing** — user matching is now filtered to the selected paired workspace instead of showing all linked workspaces at once + - **Filtered unmatchable users** — users with no possible candidate in the target workspace (by normalized display name or email) are hidden from the unmatched list + - **Override dropdowns for matched users** — matched users now show a `UsersSelectElement` pre-populated with the current match, allowing direct reassignment without unlinking first +- **New action constants** — ~12 new Block Kit action/callback IDs for channel sync flows, workspace pickers, publish/subscribe, and user matching workspace selection +- **New form templates** — `WORKSPACE_PICKER_FORM`, `PUBLISH_CHANNEL_FORM`, `SUBSCRIBE_CHANNEL_FORM` +- **Prefix-match routing** — added entries for `CONFIG_UNPUBLISH_CHANNEL` and `CONFIG_SUBSCRIBE_CHANNEL` (suffix contains sync/channel IDs) +- **ORM fix** — `update_modal` now supports `submit_button_text="None"` to render modals without a submit button (consistent with `post_modal`) + +### 26. Docker Local Development (Completed) +- **Dev Container support** — added `.devcontainer/devcontainer.json` and `.devcontainer/docker-compose.dev.yml` for full in-editor development inside a Docker container (Cursor / VS Code) + - Python, Pylance, and Ruff extensions pre-configured with format-on-save + - `PYTHONPATH` and database env vars set automatically + - Ports 3000 (app) and 3306 (MySQL) forwarded to host + - AWS CLI feature included for SAM operations + - `pytest` and `boto3` installed on container creation +- **Docker Compose** — added `Dockerfile` and `docker-compose.yml` for standalone container-based development without the Dev Container extension + - MySQL 8 with automatic schema initialization via `init.sql` mount + - App code mounted as a volume for live editing without rebuilds + - Named volume for database persistence across restarts +- **README updated** with three local development options: Dev Container (recommended), Docker Compose, and native Python + +### 27. App Home Tab Migration (Completed) +- **Replaced `/config-syncbot` slash command** with a persistent **App Home tab** — all configuration is now managed through the Home tab instead of slash commands and nested modals +- **Inline content** — workspace pairings and channel syncs are rendered directly on the Home tab instead of requiring modal navigation +- **Per-pairing sections** — each paired workspace shows its own section with a "Manage User Matching" button and channel sync controls (publish/unpublish/subscribe) +- **Simplified modal flow** — sub-screens (enter pairing code, publish channel, subscribe channel, user matching) now open as standalone modals (`views.open`) instead of stacked modals (`views.push`) +- **Auto-refresh** — all mutations (generate code, cancel, remove pairing, publish/unpublish/subscribe channel) automatically re-publish the Home tab +- **Manifest updated** — added `app_home_opened` to bot events, removed `slash_commands` section and `commands` OAuth scope +- **Non-admin users** see a locked message on the Home tab instead of an error + +### 28. Uninstall Soft-Delete & Reinstall Recovery (Completed) +- **Soft-delete on uninstall** — when a workspace uninstalls SyncBot, its record, pairings, and sync channels are soft-deleted (`deleted_at` timestamp) rather than hard-deleted +- **Automatic reinstall recovery** — if the workspace reinstalls within the retention period, all pairings and sync channels are automatically restored +- **Lifecycle notifications** — consistent notification model using channel messages and admin DMs: + - **Started** — new pairing activated: admin DMs in both workspaces + - **Paused** — workspace uninstalls: admin DMs + channel messages in partner workspace + - **Resumed** — workspace reinstalls: admin DMs + channel messages in partner workspace + - **Stopped** — manual removal: admin DMs + channel messages in partner workspace + - **Purged** — auto-cleanup after retention period: admin DMs to partner workspace +- **Paused indicator** — Home tab and pairing form show `:double_vertical_bar: Paused (uninstalled)` for soft-deleted partner workspaces with no action buttons +- **Configurable retention** — `SOFT_DELETE_RETENTION_DAYS` env var (default 30 days) controls how long soft-deleted data is kept before permanent purge +- **Lazy daily purge** — stale soft-deleted workspaces are hard-deleted via `ON DELETE CASCADE` during the first `app_home_opened` event each day +- **Manifest updated** — added `tokens_revoked` to bot events, `im:write` to OAuth scopes +- **Migration** — `db/migrate_007_uninstall_soft_delete.sql` adds `deleted_at` to `workspaces` and `workspace_pairings` + +### 29. External Connections — Cross-Instance Federation (Completed) +- **Cross-instance sync** — independent SyncBot deployments (e.g., on separate AWS accounts, GCP, or Cloudflare) can now connect and sync messages, edits, deletes, reactions, and user matching across instances +- **Connection pairing flow** — admin generates a connection code on one instance, shares it out-of-band, and the other admin enters it to establish a secure connection + - Codes encode the instance's public URL and a unique instance ID in a base64 payload + - On acceptance, both sides exchange a shared secret and store a `federated_workspaces` record +- **HMAC-SHA256 request authentication** — all inter-instance webhook calls (except the initial pairing handshake and health checks) are signed using the shared secret, with replay protection via 5-minute timestamp validation +- **Federation API endpoints** — seven new HTTP endpoints for cross-instance communication: + - `POST /api/federation/pair` — accept an incoming connection request + - `POST /api/federation/message` — receive forwarded messages (new posts and thread replies) + - `POST /api/federation/message/edit` — receive message edits + - `POST /api/federation/message/delete` — receive message deletions + - `POST /api/federation/message/react` — receive reaction add/remove + - `POST /api/federation/users` — exchange user directory for mention matching + - `GET /api/federation/ping` — health check / connectivity test +- **Transparent message forwarding** — the core message handlers (`_handle_new_post`, `_handle_thread_reply`, `_handle_message_edit`, `_handle_message_delete`) detect whether a sync target is local or remote and dispatch accordingly — local channels are posted to directly, remote channels are forwarded via the federation webhook +- **User directory exchange** — when a connection is established, both instances exchange their user directories so @mention resolution works across instances +- **Image handling** — images use existing S3 URLs which are publicly accessible; the receiving instance uses them directly in Slack blocks +- **Retry with exponential backoff** — all outgoing federation HTTP calls retry up to 3 times with 1s/2s/4s backoff on transient failures (5xx, timeouts, connection errors) +- **Home tab UI** — "External Connections" section on the Home tab with "Generate Connection Code" and "Enter Connection Code" buttons, active connection display with status and remove button, and pending code display with cancel button +- **Connection label prompt** — generating a connection code prompts for a friendly name (e.g. "East Coast SyncBot") which is displayed on the Home tab and used as the remote workspace's display name +- **Code delivery via DM** — both internal pairing codes and external connection codes are sent as a DM to the admin for easy copy/paste (Slack Block Kit does not support clipboard buttons) +- **Opt-in feature flag** — external connections are disabled by default; set `SYNCBOT_FEDERATION_ENABLED=true` to enable. All UI, handlers, and API endpoints are gated behind this flag +- **New database table** — `federated_workspaces` (instance_id, webhook_url, public_key, status, name) +- **Schema change** — `federated_workspace_id` added to group members (NULL = local workspace, non-NULL = remote) +- **Environment variables** — `SYNCBOT_FEDERATION_ENABLED` (opt-in flag, default `false`), `SYNCBOT_INSTANCE_ID` (auto-generated UUID), `SYNCBOT_PUBLIC_URL` (required when enabled) +- **Federation package** — `syncbot/federation/core.py` (signing, HTTP client, payload builders), `syncbot/federation/api.py` (API endpoint handlers) +- **Migration** — `db/migrate_009_federated_workspaces.sql` + +### 30. Reaction Syncing (Completed) +- **Threaded reaction messages** — emoji reactions (`reaction_added` / `reaction_removed`) are synced to all linked channels as threaded replies on the corresponding message +- **Bidirectional** — reactions work in both directions across workspaces +- **User attribution** — reaction messages display the reacting user's display name and workspace +- **Permalink reference** — each reaction message includes a link to the original message +- **PostMeta lookup** — uses the existing `PostMeta` table to resolve source timestamps to target message timestamps for accurate threading +- **File message timestamp extraction** — `_extract_file_message_ts` uses a retry loop on `files.info` (up to 4 attempts) to reliably capture the message timestamp for files uploaded via `files_upload_v2`, ensuring reactions work on image and video messages + +### 31. GIF Syncing (Completed) +- **Slack GIF picker support** — GIFs sent via Slack's built-in `/giphy` picker or GIPHY integration are detected and synced +- **Nested block parsing** — `_build_file_context` extracts `image_url` from nested `image` blocks within `attachments`, which is how Slack structures GIF picker messages +- **Direct ImageBlock posting** — GIFs are always posted as `ImageBlock` elements via `chat.postMessage` using their public URLs, ensuring a proper message `ts` is captured for `PostMeta` (enabling reactions on GIFs) +- **No S3 required** — GIF URLs are already publicly accessible; no download or S3 upload needed + +### 32. Video & Image Direct Upload (Completed) +- **S3 is now optional** — images and videos can be synced without S3 by using Slack's `files_upload_v2` directly +- **`S3_IMAGE_BUCKET` defaults to empty** — when not set, all media is uploaded directly to target channels +- **`S3_VIDEO_ENABLED` env var** — when `true` and S3 is configured, videos are also stored in S3; when `false` (default), videos always use direct upload regardless of S3 configuration +- **User attribution** — direct uploads include "Shared by User (Workspace)" in the `initial_comment` +- **Fallback text** — `post_message` supports a `fallback_text` argument for messages that contain only blocks (no text), satisfying Slack's accessibility requirements + +### 33. Pause/Resume/Stop Sync (Completed) +- **Sync lifecycle controls** — individual channel syncs can be paused, resumed, or stopped from the Home tab +- **`status` column** on `sync_channels` — supports `active` and `paused` states +- **Paused syncs** — messages, threads, edits, deletes, and reactions are not processed for paused channels; the handler checks `status` before dispatching +- **Stop with confirmation** — stopping a sync shows a confirmation modal before soft-deleting; the bot leaves the channel and notifies the partner workspace +- **Admin attribution** — pause/resume/stop actions are attributed to the admin who performed them in notification messages +- **Home tab indicators** — paused syncs show a `:double_vertical_bar: Paused` status on the Home tab with a Resume button + +### 34. User Profile Auto-Refresh (Completed) +- **`user_profile_changed` event** — subscribed in manifest and handled by `handle_user_profile_changed` +- **Directory update** — when a user changes their display name, real name, or email, the `user_directory` record is updated automatically +- **Mapping re-check** — after updating the directory, all user mappings involving the changed user are re-evaluated to detect new matches or update stale data + +### 35. Member Joined Channel Handler (Completed) +- **`member_joined_channel` event** — subscribed in manifest and handled by `handle_member_joined_channel` +- **Untracked channel detection** — when SyncBot is added to a channel that is not part of any active sync, it posts a friendly message and leaves automatically +- **Self-check** — the handler verifies the joined user is SyncBot itself (via `get_own_bot_user_id`) before acting + +### 36. Direct Pairing Requests (Completed) +- **Request-based pairing** — admins can send a direct pairing request to another workspace instead of manually sharing codes +- **DM notifications** — the partner workspace's admins receive a DM with Accept/Decline buttons and context about the requesting workspace +- **Home tab notification** — pending inbound pairing requests are shown on the partner's Home tab with Accept/Decline buttons +- **Bidirectional activation** — accepting a request activates the pairing on both sides, refreshes user directories, runs auto-matching, and updates both Home tabs +- **DM cleanup** — pairing request DMs are replaced with updated status messages when accepted, declined, or cancelled + +### 37. Home Tab UI Enhancements (Completed) +- **Synced-since with year** — channel sync dates always display the full year (e.g., "February 18, 2026") using Python `datetime` formatting instead of Slack's `` token which omits the current year +- **Message count** — each sync displays the number of tracked messages from `PostMeta` (e.g., "Synced since: February 18, 2026 · 42 messages tracked") +- **Remote channel deep links** — target channel names in the Home tab and subscription modals are rendered as deep links using `slack://channel?team=T...&id=C...` URLs +- **Consolidated published channels** — all synced channels across pairings are shown in a single sorted list on the Home tab +- **Partner Home tab refresh** — all mutations (publish, unpublish, subscribe, pause, resume, stop, pairing changes) automatically re-publish the partner workspace's Home tab + +### 38. User Mapping Screen Redesign (Completed) +- **Dedicated Home tab screen** — user mapping is now a full-screen Home tab view instead of a nested modal, providing more space and a better experience +- **Remote user avatars** — each mapped/unmapped user row displays the remote workspace user's profile photo as a right-aligned `ImageAccessoryElement` +- **Section headers with icons** — `:warning: *Unmapped Users*`, `:pencil2: *Soft / Manual Matches*`, `:lock: *Email Matches*` with `DividerBlock` separators +- **Edit modal avatars** — the user mapping edit modal also displays the remote user's avatar +- **Back navigation** — "Back to Home" button returns to the main Home tab view +- **Avatar caching** — `_avatar_lookup` fetches and caches profile photo URLs from the remote workspace + +### 39. Code Refactoring — Module Split & Package Structure (Completed) +- **Flattened `utils/` directory** — all modules moved to top-level packages under `syncbot/` (no more `utils/` nesting) +- **Split monolithic files** into focused packages: + - `helpers.py` → `helpers/` package (`core.py`, `slack_api.py`, `encryption.py`, `files.py`, `notifications.py`, `user_matching.py`, `workspace.py`, `oauth.py`, `_cache.py`) + - `handlers.py` → `handlers/` package (`messages.py`, `groups.py`, `group_manage.py`, `channel_sync.py`, `users.py`, `tokens.py`, `federation_cmds.py`, `sync.py`, `_common.py`) + - `builders.py` → `builders/` package (`home.py`, `channel_sync.py`, `user_mapping.py`, `sync.py`, `_common.py`) + - `federation.py` + `federation_api.py` → `federation/` package (`core.py`, `api.py`) +- **Renamed `logging_config.py` to `logger.py`** — shorter, clearer module name +- **Added `__init__.py` re-exports** — `helpers/__init__.py` and `handlers/__init__.py` re-export public APIs for clean imports +- **Updated `pyproject.toml`** — `ruff` `known-first-party` updated, `per-file-ignores` for `app.py` E402 + +### 40. Security Audit — Dependency Updates & Hardening (Completed) +- **Dependency updates** — updated `cryptography`, `urllib3`, `certifi`, `requests`, and `pillow` to latest versions +- **Path traversal prevention** — file name sanitization via `_safe_file_parts` strips non-alphanumeric characters from file IDs and extensions +- **PyMySQL SSL hardening** — explicit SSL context with `certifi` CA bundle, `check_hostname=True`, `PROTOCOL_TLS_CLIENT` +- **URL-escaped credentials** — database username and password are `urllib.parse.quote_plus`-escaped in the connection string +- **Silent exception logging** — replaced bare `except: pass` blocks with `contextlib.suppress` or proper logging + +### 41. Hardening & Performance Pass (Completed) +- **Critical bug fixes**: + - Fixed broken import: `_users_list_page` was imported from `helpers.slack_api` instead of `helpers.user_matching` where it's defined + - Fixed `str.format()` crash: messages containing literal curly braces (`{` or `}`) caused `KeyError`/`IndexError` in `apply_mentioned_users`; replaced with iterative `re.sub` using a lambda +- **Performance — Fernet caching**: Added `@functools.lru_cache(maxsize=2)` to `_get_fernet()` to cache the derived Fernet instance, eliminating 600,000 PBKDF2 iterations on every encrypt/decrypt call +- **Performance — `auth.test` consolidation**: Merged `get_own_bot_id` and `get_own_bot_user_id` into a single cached `_get_auth_info` call, halving Slack API round-trips for bot identity +- **Performance — `DbManager.count_records()`**: Added `SELECT COUNT(*)` method and replaced `len(find_records(...))` calls that were fetching all rows just to count them +- **Performance — module-level constants**: Moved `_PREFIXED_ACTIONS` tuple to module scope (avoids rebuilding on every request); cached `GetDBClass` column keys in a class-level `frozenset` +- **DoS — file download streaming**: All `requests.get` calls for files now use `stream=True` with 30s timeout, 8 KB chunks, and a 100 MB size cap +- **DoS — S3 client reuse**: `_get_s3_client()` creates the boto3 client once instead of per-file inside upload loops +- **DoS — input caps**: File attachments capped at 20 per event, mentions at 50 per message, federation user ingestion at 5,000 per request, federation images at 10 per message +- **DoS — federation body limit**: Local dev federation HTTP server enforces 1 MB max request body +- **DoS — connection pool safety**: `GLOBAL_ENGINE.dispose()` now only fires after all retries are exhausted, not on every transient failure (prevents disrupting other in-flight queries) +- **DoS — `decrypt_bot_token` reuse**: Eliminated duplicate `decrypt_bot_token` calls in the message edit handler +- **DRY — `_parse_private_metadata`**: Replaced 6 inline `import json; json.loads(private_metadata)` blocks across 4 handler files with a shared helper in `_common.py` +- **DRY — `_toggle_sync_status`**: Merged `handle_pause_sync` and `handle_resume_sync` (near-identical 60-line functions) into a single parameterized helper +- **DRY — `_activate_pairing_users`**: Extracted duplicated 30-line user directory refresh + seed + auto-match blocks from two pairing handlers +- **DRY — `_find_post_records`**: Extracted duplicated PostMeta query pattern (3 call sites) in `federation/api.py` +- **DRY — `_find_source_workspace_id`**: Extracted duplicated source-workspace lookup loop (5 call sites) in `messages.py` +- **DRY — user directory upsert**: Refactored `_refresh_user_directory` to call `_upsert_single_user_to_directory` instead of duplicating the upsert logic +- **DRY — `notify_admins_dm`**: Added optional `blocks` parameter for Block Kit support, consolidating the text-only and block DM paths +- **Lint clean**: All `ruff` checks pass with zero warnings + +### 42. Workspace Groups Refactor — Many-to-Many Collaboration (Completed) +- **Replaced 1-to-1 Workspace Pairings with many-to-many Workspace Groups** — workspaces can now create or join groups, and a single workspace can belong to multiple groups with different combinations of members +- **New database tables**: + - `workspace_groups` — group record with `name`, `invite_code`, `created_by_workspace_id`, `created_at` + - `workspace_group_members` — junction table with `group_id`, `workspace_id`, `joined_at`, `deleted_at` (soft-delete) +- **Removed `workspace_pairings` table** — all pairing logic replaced by group membership +- **Schema changes to `syncs`** — replaced `pairing_id` with `group_id` (FK to `workspace_groups`), added `sync_mode` (`direct` or `group`), `target_workspace_id` (for direct syncs), and `publisher_workspace_id` (controls unpublish rights) +- **Schema changes to `user_mappings`** — replaced `pairing_id` with `group_id` (FK to `workspace_groups`) +- **Two sync modes**: + - **Direct** — publish a channel 1-to-1 to a specific workspace in the group (behaves like legacy pairings) + - **Group-wide** — publish a channel for any group member to subscribe independently +- **Selective stop sync** — when a workspace stops syncing, only that workspace's `PostMeta` and `SyncChannel` records are deleted; other group members continue uninterrupted +- **Publisher-only unpublish** — only the workspace that originally published a channel can unpublish it; the `Sync` record persists until the publisher explicitly removes it +- **Invite code flow** — creating a group generates a `XXX-XXXX` invite code; any workspace can join by entering the code; any existing group member can accept join requests +- **User mapping scoped per group** — user matching operates per workspace pair within a group; remote users displayed as "Display Name (Workspace Name)" and sorted by normalized name +- **Home tab redesign** — groups displayed as sections with member lists, inline channel syncs, "Publish Channel" button per group (no separate group selection step), and "Leave Group" button +- **Federation integration** — federated connections now create `WorkspaceGroup` and `WorkspaceGroupMember` records (with `federated_workspace_id`) instead of `WorkspacePairing` records +- **Leave group with cleanup** — soft-deletes the membership, removes associated `PostMeta`/`SyncChannel` records, leaves channels, removes user mappings, notifies remaining members, and deletes the group if empty +- **New handler modules** — `handlers/groups.py` (create/join) and `handlers/group_manage.py` (leave) replace `handlers/pairing.py` and `handlers/pairing_manage.py` +- **Removed modules** — `handlers/pairing.py`, `handlers/pairing_manage.py`, `builders/pairing.py` +- **Updated tests** — renamed test classes and methods to group terminology; updated action ID constants + +### 43. Block Kit Shorthand & UI Polish (Completed) +- **Block Kit shorthand** — builders and handlers use `slack.blocks` helpers (`header`, `divider`, `context`, `section`, `button`, `actions`) instead of verbose `orm.*Block` constructors where applicable; `section` alias for section-style blocks in `slack/blocks.py` +- **Parameter shadowing** — in modules that take a `context` (request/Bolt) parameter, the blocks context helper is imported as `block_context` to avoid shadowing (e.g. `builders/home.py`, `builders/user_mapping.py`) +- **Synced Channels display** — Home tab Synced Channels rows no longer show the remote channel link; each row shows the local channel plus bracketed workspace list including the local workspace (e.g. _[Any: Sprocket Dev, Sprocket Dev Beta]_) +- **Deactivated/deleted users** — `UserDirectory` has `deleted_at`; deactivated users are soft-deleted and mappings purged; users no longer in `users.list` are hard-deleted; mapping UI, edit modal, and federation export filter out deleted users +- **Mapped display names** — synced messages in the target workspace use the mapped local user's name and icon when available; otherwise source name/icon with workspace indicator +- **Display name normalization** — `normalize_display_name()` used in user mapping UI and synced message display; user mapping screen shows "Display Name (Workspace)" with normalized names + +### 44. Home and User Mapping Refresh — Performance & Cost (Completed) +- **Content hash** — Home tab and User Mapping Refresh handlers compute a stable hash from minimal DB queries (groups, members, syncs, pending invites; for User Mapping, mapping ids/methods). When the hash matches the last full refresh, the app skips the expensive path (no N× `team_info`, no directory refresh, no full rebuild). +- **Cached built blocks** — After a full refresh, the built Block Kit payload is cached (in-process, keyed by team/user and optionally group for User Mapping). When the hash matches, the app re-publishes that cached view with one `views.publish` instead of re-running all DB and Slack calls. +- **60-second cooldown** — If the user clicks Refresh again within 60 seconds and the hash is unchanged, the app re-publishes the cached view with a context message: "No new data. Wait __ seconds before refreshing again." The displayed seconds are the current remaining time from the last refresh (recomputed on each click). Cooldown constant: `REFRESH_COOLDOWN_SECONDS` (default 60) in `constants.py`. +- **Request-scoped caching** — `get_workspace_by_id(workspace_id, context=None)` and `get_admin_ids(client, team_id=None, context=None)` use the request `context` dict when provided: one DB read per distinct workspace, one `users.list` per distinct team per request. Reduces duplicate lookups when building the Home tab or when multiple workspaces' Home tabs are refreshed in one invocation. +- **Context through push-refresh paths** — When a change in one workspace triggers Home tab refreshes in others (e.g. publish channel, join group, user mapping refresh), the handler's `context` is passed into `_refresh_group_member_homes` and `refresh_home_tab_for_workspace`, so all `build_home_tab` calls in that request share the same request-scoped cache. Call sites updated in `channel_sync.py`, `group_manage.py`, `users.py`, `groups.py`, and `sync.py`. +- **User Mapping Refresh** — Same pattern applied to the User Mapping screen: content hash, cached blocks, 60s cooldown with message, and `build_user_mapping_screen(..., context=..., return_blocks=True)` for caching. Request-scoped `get_workspace_by_id` used when building the screen. + +### 45. Backup, Restore, and Data Migration (Completed) +- **Slack UI** — Home tab has **Backup/Restore** (next to Refresh) and **Data Migration** (in External Connections when federation is enabled). Modals for download backup, restore from JSON, export workspace data, and import migration file; confirmation modals when HMAC or encryption-key/signature checks fail with option to proceed anyway. +- **Full-instance backup** — All tables exported as JSON with `version`, `exported_at`, `encryption_key_hash` (SHA-256 of `PASSWORD_ENCRYPT_KEY`), and HMAC over canonical JSON. Restore inserts in FK order; intended for empty/fresh DB (e.g. after AWS rebuild). On HMAC or encryption-key mismatch, payload stored in cache and confirmation modal pushed; after restore, Home tab caches invalidated for all workspaces. +- **Workspace migration export/import** — Export produces workspace-scoped JSON (syncs, sync channels, post meta, user directory, user mappings) with optional `source_instance` (webhook_url, instance_id, public_key, one-time connection code). Ed25519 signature for tampering detection. Import verifies signature, resolves or creates federated group (using `source_instance` when present), replace mode (remove then create SyncChannels/PostMeta/user_directory/user_mappings), optional tampering confirmation; Home tab and sync-list caches invalidated after import. +- **Instance A detection** — Federated pair request accepts optional `team_id` and `workspace_name`; stored as `primary_team_id` and `primary_workspace_name` on `federated_workspaces`. If a local workspace with that `team_id` exists, it is soft-deleted so the federated connection is the only representation of that workspace on the instance. + +## Remaining Recommendations + +### Low Priority + +1. **Dependencies** + - Update SQLAlchemy to 2.0+ (currently pinned to <2.0) + - Review and update other dependencies + +2. **Database Migrations** + - Consider adopting Alembic for formal migration management + +3. **Advanced Testing** + - Add integration tests for database operations + - Add tests for Slack API interactions (using mocks for full handler flows) + - Add end-to-end sync workflow tests + +## Notes + +- The codebase is organized into focused packages (`handlers/`, `builders/`, `helpers/`, `federation/`, `db/`, `slack/`) with clear separation of concerns +- The routing system using mappers is clean and maintainable +- Database layer benefits from connection pooling, automatic retry with safe disposal, and `SELECT COUNT(*)` for counting +- All Slack API calls have rate-limit handling with exponential backoff +- Error isolation in sync loops ensures partial failures don't cascade +- 60 unit tests cover core helper functions, encryption, caching, event parsing, bot filtering, invite codes, and sync creation +- Structured JSON logging with correlation IDs enables fast CloudWatch Logs Insights queries +- Pre-commit hooks enforce consistent code style on every commit +- Admin/owner authorization enforced on all configuration actions with defense-in-depth +- Cross-workspace user matching resolves @mentions persistently with email, name, and manual matching (scoped per group) +- Bot messages from third-party bots are synced with proper attribution; only SyncBot's own messages are filtered +- Workspace Groups support many-to-many collaboration with invite codes, ensuring syncs are only established between explicitly trusted workspaces +- Channel sync supports both direct (1-to-1) and group-wide publish modes +- User matching auto-runs on group join; unmatchable users are filtered; matched users have inline override dropdowns +- Dev Container and Docker Compose configs provide zero-install local development with live editing +- Reactions, images, videos, and GIFs are all synced bidirectionally with proper user attribution +- Individual syncs can be paused, resumed, and stopped with selective history cleanup and publisher-only unpublish +- User profile changes (display name, email) are detected automatically and trigger mapping re-evaluation +- SyncBot self-removes from unconfigured channels with a friendly message +- All foreign key relationships use `ON DELETE CASCADE` for clean data removal +- File downloads are streamed with timeouts and size caps to prevent DoS +- Fernet key derivation is cached for performance; bot identity is resolved in a single API call +- Duplicated code has been consolidated into shared helpers throughout handlers and federation modules +- Home and User Mapping Refresh buttons use content hash, cached blocks, and a 60s cooldown to minimize RDS and Slack API usage when nothing has changed; request-scoped caching and context passing through push-refresh paths keep multi-workspace updates lightweight \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..be3f7b2 --- /dev/null +++ b/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/README.md b/README.md index 14936bf..e411fed 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,578 @@ # SyncBot +SyncBot Icon -[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +SyncBot is a Slack app originally developed for the [F3 Community](https://github.com/F3Nation-Community/syncbot) and has been forked here for general use by other Slack Workspace admins. It is intended to provide a replication ("Sync") service for messages and replies across Slack Workspaces on the free tier. Once configured, messages, threads, edits, deletes, reactions, images, videos, and GIFs are automatically mirrored to every channel in a Sync group. -SyncBot is a Slack app that replicates ("syncs") posts and replies across Slack workspaces. Once configured, this will happen automatically in synced channels. +## End-User Quick Start -## Installation and Getting Started +1. Click the install link from a desktop browser (make sure you've selected the correct workspace in the upper right) +2. Open the **SyncBot** app from the sidebar and click the **Home** tab (requires workspace admin or owner) +3. The Home tab shows everything in one view: + - **SyncBot Configuration (top row)** — **Refresh** and **Backup/Restore** (full-instance backup download and restore from JSON) + - **Workspace Groups** — create or join groups of workspaces that can sync channels together + - **Per-group sections** — for each group you can publish channels, manage user mapping (dedicated Home tab screen), and see/manage channel syncs inline + - **Synced Channels** — each row shows the local channel and workspace list in brackets (e.g. _[Any: Your Workspace, Partner Workspace]_), with pause/resume and stop controls, synced-since date, and tracked message count + - **External Connections** *(when federation is enabled)* — Generate/Enter Connection Code and **Data Migration** (export workspace data for migration to another instance, or import a migration file) -Set up is simple: +Things to know: -1. Click [this link](https://utazcizeo0.execute-api.us-east-2.amazonaws.com/Prod/slack/install) from a desktop computer. Make sure you have selected your desired workspace in the upper right! -2. Next, you can configure SyncBot by using the `/config-syncbot` slash command -3. If this is the first workspace you are configuring, use the "Create new Sync" button. Otherwise, use "Join existing Sync". +- Only workspace **admins and owners** can configure syncs (set `REQUIRE_ADMIN=false` to allow all users) +- Messages, threads, edits, deletes, reactions, images, videos, and GIFs are all synced +- Messages from other bots are synced; only SyncBot's own messages are filtered to prevent loops +- Existing messages are not back-filled; syncing starts from the moment a channel is linked +- Do not add SyncBot manually to channels. SyncBot adds itself when you configure a Sync. If it detects it was added to an unconfigured channel it will post a message and leave automatically +- Both public and private channels are supported +- **Workspace Groups**: Workspaces must belong to the same **group** before they can sync channels or map users. Admins can create a new group (which generates an invite code) or join an existing group by entering a code. A workspace can be in multiple groups with different combinations of other workspaces +- **Sync Modes**: When publishing a channel inside a group, admins choose either **1-to-1** (only a specific workspace can subscribe) or **group-wide** (any group member can subscribe independently) +- **Pause/Resume**: Individual channel syncs can be paused and resumed without losing configuration. Paused channels do not sync any messages, threads, or reactions +- **Selective Stop**: When a workspace stops syncing a channel, only that workspace's history is removed. Other workspaces continue syncing uninterrupted. The published channel remains available until the original publisher unpublishes it +- **Uninstall/Reinstall**: If a workspace uninstalls SyncBot, group memberships and syncs are paused (not deleted). Reinstalling within the retention period (default 30 days, configurable via `SOFT_DELETE_RETENTION_DAYS`) automatically restores everything. Group members are notified via DMs and channel messages +- **User Mapping**: Users are automatically mapped across workspaces by email or display name. Admins can manually edit mappings via the User Mapping screen (scoped per group). Remote users are displayed as "Display Name (Workspace Name)" and sorted by normalized name +- **Refresh buttons**: The Home tab and User Mapping screens have Refresh buttons. To keep RDS and Slack API usage low, repeated clicks with no data changes are handled lightly: a 60-second cooldown applies, and when nothing has changed the app reuses cached content and shows "No new data. Wait __ seconds before refreshing again." when you click again too soon +- **Media Sync**: Images and videos are uploaded directly to target channels (or via S3 if configured). GIFs from the Slack GIF picker or GIPHY are synced as image blocks +- **External Connections** *(opt-in)*: Workspaces running their own SyncBot deployment can be connected via the "External Connections" section on the Home tab. One admin generates a connection code and shares it out-of-band; the other admin enters it. Messages, edits, deletes, reactions, and user matching work across instances. **Data Migration** in the same section lets you export your workspace data (syncs, channels, post meta, user directory, user mappings) for moving to another instance, or import a migration file after connecting. Disabled by default — set `SYNCBOT_FEDERATION_ENABLED=true` and `SYNCBOT_PUBLIC_URL` to enable +- **Backup/Restore**: Use **Backup/Restore** on the Home tab to download a full-instance backup (all tables as JSON) or restore from a backup file. Intended for disaster recovery (e.g. before rebuilding AWS). Backup includes an integrity check (HMAC); restore checks the encryption key hash — if it differs, bot tokens will not decrypt until workspaces re-authorize. Restore targets an empty or fresh database +- **Data Migration**: When federation is enabled, **Data Migration** opens a modal to export your workspace data (for moving that workspace to its own instance) or import a migration file. The export can include a one-time connection code so the new instance can connect to the old one in one step. Import uses replace mode (existing sync channels in the federated group are replaced). User mappings are carried over (same Slack workspace, so user IDs match). Exports are signed (Ed25519) for tampering detection; import still proceeds on mismatch but shows a warning -Some notes: - - Bot messages will not be synced, only actual user messages - - Existing messages are not synced, but going forward all posts and their thread replies will be - - Do not add SyncBot manually to channels - SyncBot will add itself to channels you configure. If it detects that it has been added to a non-configured channel, it will leave the channel - - Private channels are not supported +--- -## Feature Request and Roadmap +## Deploying to AWS -I use GitHub Issues for tracking feature requests. Feel free to add some here: https://github.com/F3Nation-Community/syncbot/issues +SyncBot ships with a full AWS SAM template (`template.yaml`) that provisions everything on the **free tier**: -Roadmap: - - Picture sync - - Reaction sync \ No newline at end of file +| Resource | Service | Free-Tier Detail | +|----------|---------|-----------------| +| Compute | Lambda (128 MB) | 1M requests/month free | +| API | API Gateway v1 | 1M calls/month free | +| Database | RDS MySQL (db.t3.micro) | 750 hrs/month free (12 months) | +| Storage | S3 (3 buckets) | 5 GB free | + +--- + +## Architecture + +See [ARCHITECTURE.md](ARCHITECTURE.md) for message sync flow, AWS infrastructure, backup/restore and data migration flows, and performance/cost optimizations (including Refresh button behavior and request-scoped caching). + +--- + +## Backup, Restore, and Data Migration + +### Full-instance backup and restore + +Use **Backup/Restore** (Home tab, next to Refresh) to: + +- **Download backup** — Generates a JSON file containing all tables (workspaces, groups, syncs, channels, post meta, user directory, user mappings, federation, instance keys). The file is sent to your DM. Backup includes an HMAC for integrity and a hash of the encryption key. **Use the same `PASSWORD_ENCRYPT_KEY` on the target instance** so restored bot tokens decrypt; otherwise workspaces must reinstall the app to re-authorize. +- **Restore from backup** — Paste the backup JSON in the modal and submit. Restore is intended for an **empty or fresh database** (e.g. after an AWS rebuild). If the encryption key hash or HMAC does not match, you will see a warning and can still proceed (e.g. if you edited the file on purpose). + +After restore, Home tab caches are cleared so the next Refresh shows current data. + +### Workspace data migration (federation) + +When **External Connections** is enabled, **Data Migration** (in that section) lets you: + +- **Export** — Download a workspace-scoped JSON file (syncs, sync channels, post meta, user directory, user mappings) plus an optional one-time connection code so the new instance can connect to the source in one step. The file is signed (Ed25519) for tampering detection. +- **Import** — Paste a migration file, then submit. If the file includes a connection payload and you are not yet connected, the app establishes the federation connection and creates the group, then imports. Existing sync channels for that workspace in the federated group are **replaced** (replace mode). User mappings are imported where both workspaces exist on the new instance. If the signature check fails, a warning is shown but you can still proceed. + +After import, Home tab and sync-list caches for that workspace are cleared. + +**Instance A behavior:** When a workspace that used to be on Instance A connects to A from a new instance (B) via federation and sends its `team_id`, A soft-deletes the matching local workspace row so only the federated connection represents that workspace. See [ARCHITECTURE.md](ARCHITECTURE.md) for details. + +--- + +### Prerequisites + +| Tool | Version | Purpose | +|------|---------|---------| +| **AWS SAM CLI** | latest | Build & deploy Lambda + infra | +| **Docker** | latest | SAM uses a container to build the Lambda package | +| **MySQL client** *(optional)* | any | Run schema scripts against the DB | + +### Create a Slack app + +Before deploying (or developing locally) you need a Slack app: + +1. Go to [api.slack.com/apps](https://api.slack.com/apps) and click **Create New App** → **From an app manifest** +2. Select your workspace, then paste the contents of [`slack-manifest.yaml`](slack-manifest.yaml) +3. After creating the app, upload the icon from [`assets/icon.png`](assets/icon.png) on the **Basic Information** page under **Display Information** +4. Note these values — you'll need them for deploy and/or local development: + +| Where to find it | Value | Used for | +|-------------------|-------|----------| +| Basic Information → **App Credentials** | Signing Secret | Production deploy | +| Basic Information → **App Credentials** | Client ID, Client Secret | Production deploy (OAuth) | +| **OAuth & Permissions** → **Install to Workspace** → Install, then copy | Bot User OAuth Token (`xoxb-...`) | **Local development** | + +5. After your first deploy, come back and replace the placeholder URLs in the app settings with your actual API Gateway endpoint (shown in the CloudFormation stack outputs) + +> **Why do I need to install the app manually for local dev?** In production, SyncBot uses OAuth so each workspace gets its own token automatically. In local development mode, there's no OAuth flow — you connect to a single workspace using a bot token you copy from the Slack app settings. + +### First-time deploy + +1. **Build** the Lambda package: + +```bash +sam build --use-container +``` + +2. **Deploy** with guided prompts: + +```bash +sam deploy --guided +``` + +You'll be prompted for parameters like `DatabaseUser`, `DatabasePassword`, `SlackSigningSecret`, `SlackClientId`, `SlackClientSecret`, `EncryptionKey`, and `AllowedDBCidr`. These are stored as CloudFormation parameters (secrets use `NoEcho`). + +3. **Initialize the database** — after the stack creates the RDS instance, grab the endpoint from the CloudFormation outputs and run: + +```bash +mysql -h -u -p syncbot < db/init.sql +``` + +4. **Update your Slack app URLs** to point at the API Gateway endpoint shown in the stack outputs (e.g., `https://xxxxx.execute-api.us-east-2.amazonaws.com/Prod/slack/events`). + +### Sharing infrastructure across apps + +If you run multiple apps in the same AWS account, you can point SyncBot at existing resources instead of creating new ones. Every `Existing*` parameter defaults to empty (create new); set it to an existing resource name to reuse it. + +| Parameter | What it skips | +|-----------|---------------| +| `ExistingDatabaseHost` | VPC, subnets, security groups, RDS instance | +| `ExistingSlackStateBucket` | Slack OAuth state S3 bucket | +| `ExistingInstallationBucket` | Slack installation data S3 bucket | +| `ExistingImagesBucket` | Synced-images S3 bucket | + +Example — deploy with an existing RDS and images bucket: + +```bash +sam deploy --guided \ + --parameter-overrides \ + ExistingDatabaseHost=mydb.xxxx.us-east-2.rds.amazonaws.com \ + ExistingImagesBucket=my-shared-images-bucket +``` + +Each app sharing the same RDS should use a **different `DatabaseSchema`** (the default is `syncbot`). Create the schema and initialize the tables on the existing instance: + +```bash +mysql -h -u -p -e "CREATE DATABASE IF NOT EXISTS syncbot;" +mysql -h -u -p syncbot < db/init.sql +``` + +**What about API Gateway and Lambda?** Each stack always creates its own API Gateway and Lambda function. These are lightweight resources that don't affect free-tier billing — the free tier quotas (1M API calls, 1M Lambda requests) are shared across your entire account regardless of how many gateways or functions you have. If you want a unified domain across apps, put a CloudFront distribution or API Gateway custom domain in front. + +### Subsequent deploys + +```bash +sam build --use-container +sam deploy # staging (default profile) +sam deploy --config-env prod # production profile +``` + +The `samconfig.toml` file stores per-environment settings so you don't have to re-enter parameters. + +### CI/CD via GitHub Actions + +Pushes to `main` automatically build and deploy via `.github/workflows/sam-pipeline.yml`: + +1. **Build** — `sam build --use-container` +2. **Deploy to test** — automatic +3. **Deploy to prod** — requires manual approval (configure in GitHub environment settings) + +#### One-time setup + +1. **Create an IAM user** for deployments with permissions for CloudFormation, Lambda, API Gateway, S3, IAM, and RDS. Generate an access key pair. + +2. **Create a SAM deployment bucket** — SAM needs an S3 bucket to upload build artifacts during deploy: + +```bash +aws s3 mb s3://my-sam-deploy-bucket --region us-east-2 +``` + +3. **Create GitHub Environments** — Go to your repo → **Settings** → **Environments** and create two environments: `test` and `prod`. For `prod`, enable **Required reviewers** so production deploys need manual approval. + +4. **Add GitHub Secrets** — Under **Settings** → **Secrets and variables** → **Actions**, add these as **environment secrets** for both `test` and `prod`: + +| Secret | Where to find it | +|--------|-----------------| +| `AWS_ACCESS_KEY_ID` | IAM user access key (step 1) | +| `AWS_SECRET_ACCESS_KEY` | IAM user secret key (step 1) | +| `SLACK_SIGNING_SECRET` | Slack app → Basic Information → App Credentials | +| `SLACK_CLIENT_SECRET` | Slack app → Basic Information → App Credentials | +| `DATABASE_PASSWORD` | The RDS master password you chose | +| `PASSWORD_ENCRYPT_KEY` | Any passphrase for bot-token encryption at rest | + +5. **Add GitHub Variables** — Under the same settings page, add these as **environment variables** for each environment: + +| Variable | `test` value | `prod` value | +|----------|-------------|-------------| +| `AWS_STACK_NAME` | `syncbot-test` | `syncbot-prod` | +| `AWS_S3_BUCKET` | `my-sam-deploy-bucket` | `my-sam-deploy-bucket` | +| `STAGE_NAME` | `staging` | `prod` | + +#### Deploy flow + +Once configured, merge or push to `main` and the pipeline runs: + +``` +push to main → sam build → deploy to test → (manual approval) → deploy to prod +``` + +Monitor progress in your repo's **Actions** tab. The first deploy creates the full CloudFormation stack (VPC, RDS, Lambda, API Gateway, S3 buckets). Subsequent deploys update only what changed. + +> **Tip:** If you prefer to do the very first deploy manually (to see the interactive prompts), run `sam deploy --guided` locally first, then let the pipeline handle all future deploys. + +--- + +## Local Development + +### Option A: Dev Container (recommended) + +Opens the project inside a Docker container with full editor integration — IntelliSense, debugging, terminal, and linting all run in the container. No local Python or MySQL install needed. + +**Prerequisites:** Docker Desktop + the [Dev Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension + +#### 1. Clone the repo and create a `.env` file + +```bash +git clone https://github.com/GITHUB_ORG_NAME/syncbot.git +cd syncbot +``` + +Copy the example env file and fill in your bot token (from the [Create a Slack app](#create-a-slack-app) step above): + +```bash +cp .env.example .env +``` + +At minimum, set `SLACK_BOT_TOKEN` to the `xoxb-...` token you copied from **OAuth & Permissions** after installing the app to your workspace. + +#### 2. Open in Dev Container + +Open the project folder in your VSCodium-based editor, then: + +- Press `Cmd+Shift+P` → **Dev Containers: Reopen in Container** +- Or click the green remote indicator in the bottom-left corner → **Reopen in Container** + +The first build takes a minute or two. After that, your editor is running inside the container with Python, MySQL, and all dependencies ready. + +#### 3. Run the app + +Open the integrated terminal (it's already inside the container) and run: + +```bash +cd syncbot && python app.py +``` + +The app starts on **port 3000** (auto-forwarded to your host). + +#### 4. Expose to Slack + +In a **local** terminal (outside the container), start a tunnel using your favorite platform, for instance [Cloudflare Tunnel](https://developers.cloudflare.com/cloudflare-one/networks/connectors/cloudflare-tunnel/) or [ngrok](https://ngrok.com/docs/what-is-ngrok): + +```bash +cloudflared tunnel --url http://localhost:3000/ +``` +or +```bash +ngrok http 3000 +``` + +Then update your Slack app's **Event Subscriptions** and **Interactivity** URLs to point at the public URL. + +#### 5. Run tests + +```bash +python -m pytest tests -v +``` + +#### 6. Connect to the database + +```bash +mysql -h db -u root -prootpass syncbot +``` + +The database schema is initialized automatically on first run. To reset it, rebuild the container with **Dev Containers: Rebuild Container**. + +--- + +### Option B: Docker Compose (without Dev Container) + +Runs everything in containers but you edit files on your host. Good if you don't want to use the Dev Container extension. + +**Prerequisites:** Docker Desktop + +#### 1. Clone and configure + +```bash +git clone https://github.com/GITHUB_ORG_NAME/syncbot.git +cd syncbot +``` + +Create a `.env` file (same as Option A above — `cp .env.example .env` and set your `SLACK_BOT_TOKEN`). + +#### 2. Start the app + +```bash +docker compose up --build +``` + +This starts both MySQL and the app. The database schema is initialized automatically on first run. The app listens on **port 3000**. + +To run in the background: + +```bash +docker compose up --build -d +docker compose logs -f app # follow app logs +``` + +Code changes require a restart (no rebuild — the code is mounted as a volume): + +```bash +docker compose restart app +``` + +Only rebuild when `requirements.txt` changes: + +```bash +docker compose up --build +``` + +#### 3. Run tests and other commands + +```bash +docker compose exec app python -m pytest /app/tests -v +docker compose exec db mysql -u root -prootpass syncbot +``` + +#### Resetting + +```bash +docker compose down # stop everything +docker compose down -v # stop and delete the database volume +``` + +--- + +### Option C: Native Python + +Run the app directly on your machine with a local or containerized MySQL instance. + +**Prerequisites:** + +| Tool | Version | Purpose | +|------|---------|---------| +| **Python** | 3.11+ | Runtime | +| **Poetry** | 1.6+ | Dependency management | +| **Docker** *(optional)* | latest | Easiest way to run MySQL locally | + +#### 1. Clone and install dependencies + +```bash +git clone https://github.com/GITHUB_ORG_NAME/syncbot.git +cd syncbot +poetry install --with dev +``` + +#### 2. Set up a local MySQL database + +Run a MySQL 8 instance (Docker is easiest): + +```bash +docker run -d --name syncbot-db \ + -e MYSQL_ROOT_PASSWORD=rootpass \ + -e MYSQL_DATABASE=syncbot \ + -p 3306:3306 \ + mysql:8 +``` + +Initialize the schema: + +```bash +mysql -h 127.0.0.1 -u root -prootpass syncbot < db/init.sql +``` + +#### 3. Configure environment variables + +Copy the example env file and fill in your bot token (from the [Create a Slack app](#create-a-slack-app) step): + +```bash +cp .env.example .env +source .env +``` + +At minimum, set `SLACK_BOT_TOKEN` to the `xoxb-...` token from **OAuth & Permissions**. For native Python, also verify the database values match your local MySQL (`DATABASE_HOST=127.0.0.1` by default). See `.env.example` for all available options. + +#### 4. Run the app + +```bash +poetry run python syncbot/app.py +``` + +The app starts a local Bolt server on **port 3000**. Use your favorite tunnel platform to expose it to Slack: + +```bash +cloudflared tunnel --url http://localhost:3000/ +``` +or +```bash +ngrok http 3000 +``` + +Then update your Slack app's **Event Subscriptions** and **Interactivity** URLs to the public URL. + +#### 5. Run tests + +```bash +poetry run pytest -v +``` + +All tests run against mocked dependencies — no database or Slack credentials needed. + +--- + +## Environment Variables Reference + +### Always required + +| Variable | Description | +|----------|-------------| +| `DATABASE_HOST` | MySQL hostname | +| `ADMIN_DATABASE_USER` | MySQL username | +| `ADMIN_DATABASE_PASSWORD` | MySQL password | +| `ADMIN_DATABASE_SCHEMA` | MySQL database name | + +### Required in production (Lambda) + +| Variable | Description | +|----------|-------------| +| `SLACK_SIGNING_SECRET` | Verifies incoming Slack requests | +| `ENV_SLACK_CLIENT_ID` | OAuth client ID | +| `ENV_SLACK_CLIENT_SECRET` | OAuth client secret | +| `ENV_SLACK_SCOPES` | Comma-separated OAuth scopes | +| `ENV_SLACK_STATE_S3_BUCKET_NAME` | S3 bucket for OAuth state | +| `ENV_SLACK_INSTALLATION_S3_BUCKET_NAME` | S3 bucket for installations | +| `PASSWORD_ENCRYPT_KEY` | Passphrase for Fernet bot-token encryption | + +### Local development only + +| Variable | Description | +|----------|-------------| +| `SLACK_BOT_TOKEN` | Bot token (presence triggers local-dev mode) | +| `AWS_ACCESS_KEY_ID` | For S3 uploads during local dev | +| `AWS_SECRET_ACCESS_KEY` | For S3 uploads during local dev | + +### Optional + +| Variable | Default | Description | +|----------|---------|-------------| +| `LOCAL_DEVELOPMENT` | `false` | Set to `true` to skip Slack token verification at startup and use human-readable log output instead of JSON. | +| `REQUIRE_ADMIN` | `true` | When `true`, only workspace admins/owners can configure syncs. Set to `false` to allow all users. | +| `S3_IMAGE_BUCKET` | *(empty)* | S3 bucket name for synced images. When empty, images are uploaded directly to Slack via `files_upload_v2`. | +| `S3_IMAGE_URL` | *(auto from bucket)* | Public URL prefix for S3 images (e.g., `https://mybucket.s3.amazonaws.com/`). Auto-generated from `S3_IMAGE_BUCKET` if not set. | +| `S3_VIDEO_ENABLED` | `false` | When `true` and `S3_IMAGE_BUCKET` is set, videos are also stored in S3. When `false`, videos are uploaded directly to Slack regardless of S3 configuration. | +| `SOFT_DELETE_RETENTION_DAYS` | `30` | Days to keep soft-deleted workspace data before permanent purge. When a workspace uninstalls, its group memberships and syncs are paused; reinstalling within this window restores everything. | +| `SYNCBOT_FEDERATION_ENABLED` | `false` | Set to `true` to enable the External Connections feature (cross-instance sync with other SyncBot deployments). | +| `SYNCBOT_INSTANCE_ID` | *(auto-generated)* | Unique UUID for this SyncBot instance. Auto-generated on first run if not set. Used by external connections. | +| `SYNCBOT_PUBLIC_URL` | *(none)* | Publicly reachable base URL of this instance (e.g., `https://syncbot.example.com`). Required when external connections are enabled. | + +--- + +## API Endpoints and Slack Commands + +### HTTP Endpoints (API Gateway) + +All endpoints are served by a single Lambda function. Slack sends requests to the `/slack/*` URLs after you configure the app. The `/api/federation/*` endpoints handle cross-instance communication for external connections. + +| Method | Path | Purpose | +|--------|------|---------| +| `POST` | `/slack/events` | Receives all Slack events (messages, actions, view submissions) and slash commands | +| `GET` | `/slack/install` | OAuth install page — redirects the user to Slack's authorization screen | +| `GET` | `/slack/oauth_redirect` | OAuth callback — Slack redirects here after the user approves the app | +| `POST` | `/api/federation/pair` | Accept an incoming external connection request | +| `POST` | `/api/federation/message` | Receive a forwarded message from a connected instance | +| `POST` | `/api/federation/message/edit` | Receive a message edit from a connected instance | +| `POST` | `/api/federation/message/delete` | Receive a message deletion from a connected instance | +| `POST` | `/api/federation/message/react` | Receive a reaction from a connected instance | +| `POST` | `/api/federation/users` | Exchange user directory with a connected instance | +| `GET` | `/api/federation/ping` | Health check for connected instances | + +### Subscribed Slack Events + +| Event | Handler | Description | +|-------|---------|-------------| +| `app_home_opened` | `handle_app_home_opened` | Publishes the Home tab with workspace groups, channel syncs, and user matching. | +| `member_joined_channel` | `handle_member_joined_channel` | Detects when SyncBot is added to an unconfigured channel; posts a message and leaves. | +| `message.channels` / `message.groups` | `respond_to_message_event` | Fires on new messages, edits, deletes, and file shares in public/private channels. Dispatches to sub-handlers for new posts, thread replies, edits, deletes, and reactions. | +| `reaction_added` / `reaction_removed` | `_handle_reaction` | Syncs emoji reactions to the corresponding message in all target channels. | +| `team_join` | `handle_team_join` | Fires when a new user joins a connected workspace. Adds the user to the directory and re-checks unmatched user mappings. | +| `tokens_revoked` | `handle_tokens_revoked` | Handles workspace uninstall — soft-deletes workspace data and notifies group members. | +| `user_profile_changed` | `handle_user_profile_changed` | Detects display name or email changes and updates the user directory and mappings. | + +--- + +## Project Structure + +``` +syncbot/ +├── syncbot/ # Application code (Lambda function) +│ ├── app.py # Entry point — Slack Bolt app + Lambda handler +│ ├── constants.py # Env-var names, startup validation +│ ├── routing.py # Event/action → handler dispatcher +│ ├── logger.py # Structured JSON logging, correlation IDs, metrics +│ ├── requirements.txt # Pinned runtime dependencies (used by SAM build) +│ ├── builders/ # Slack UI construction (Home tab, modals, forms) +│ │ ├── home.py # App Home tab builder +│ │ ├── channel_sync.py # Publish/subscribe channel sync UI +│ │ ├── user_mapping.py # User mapping Home tab screen & edit modal +│ │ └── sync.py # Sync detail views +│ ├── handlers/ # Slack event & action handlers +│ │ ├── messages.py # Message sync — posts, threads, edits, deletes, reactions +│ │ ├── groups.py # Group lifecycle — create, join, accept, cancel +│ │ ├── group_manage.py # Leave group with confirmation +│ │ ├── channel_sync.py # Publish, unpublish, subscribe, pause, resume, stop +│ │ ├── users.py # team_join, profile changes, user mapping edits +│ │ ├── tokens.py # Uninstall / tokens_revoked handler +│ │ ├── federation_cmds.py # Federation UI actions (generate/enter/remove codes) +│ │ ├── sync.py # Sync join/remove handlers +│ │ └── _common.py # Shared handler utilities (EventContext, sanitize, metadata) +│ ├── helpers/ # Business logic, Slack API wrappers, utilities +│ │ ├── core.py # safe_get, request classification, admin checks +│ │ ├── slack_api.py # Slack API helpers (retry, bot identity, user info) +│ │ ├── encryption.py # Fernet bot-token encryption (cached PBKDF2) +│ │ ├── files.py # File download/upload (streaming, S3, size caps) +│ │ ├── notifications.py # Admin DMs, channel notifications +│ │ ├── user_matching.py # Cross-workspace user matching & mention resolution +│ │ ├── workspace.py # Workspace record helpers, group lookups +│ │ ├── oauth.py # OAuth install/redirect helpers +│ │ └── _cache.py # Simple in-process TTL cache +│ ├── federation/ # Cross-instance sync (opt-in) +│ │ ├── core.py # HMAC signing, HTTP client, payload builders +│ │ └── api.py # Federation API endpoint handlers +│ ├── db/ +│ │ ├── __init__.py # Engine, session, DbManager (pooling + retry) +│ │ └── schemas.py # SQLAlchemy ORM models +│ └── slack/ +│ ├── actions.py # Action/callback ID constants +│ ├── forms.py # Form definitions +│ ├── blocks.py # Block Kit shorthand helpers +│ └── orm.py # Block Kit ORM (BlockView, SectionBlock, etc.) +├── db/ +│ └── init.sql # Complete database schema (pre-release: single source) +├── tests/ # pytest unit tests (60 tests) +├── .devcontainer/ # Dev Container config (Cursor/VS Code) +├── Dockerfile # App container for local development +├── docker-compose.yml # Full local stack (app + MySQL) +├── template.yaml # AWS SAM infrastructure-as-code +├── samconfig.toml # SAM CLI deploy profiles (staging / prod) +├── slack-manifest.yaml # Slack app manifest (paste into api.slack.com) +├── pyproject.toml # Poetry project config + ruff linter settings +└── .github/workflows/ + └── sam-pipeline.yml # CI/CD: build → deploy staging → deploy prod +``` + +## Improvements and Roadmap + +See [IMPROVEMENTS.md](IMPROVEMENTS.md) for a detailed list of completed and planned improvements. + +## License + +This project is licensed under **AGPL-3.0**, which means you can use and modify it, just keep it open and shareable. See [LICENSE](LICENSE) for details. diff --git a/assets/icon.png b/assets/icon.png new file mode 100644 index 0000000000000000000000000000000000000000..b98dc3eeb9f73c12c9e64cbde7ded0bbe8e2f6ec GIT binary patch literal 101485 zcmV)4K+3;~P)0T_ny-$|5;R-%}}p~FF1r$*8WK^MUksX~wvBUp7wPID1*<0M6jtKeEt z5M8_t`WNUbI17TJ2;wY`iY{7?h72h}2jA)8h4;tU-qrl2NA&@oQnGKkl-vz*N6QP-$6ENRo4if zVLtx*Ai+CiYh(U`Y;(-Nk&P?mB~k|vHn(wY8%M3ULn@EiPJ2B91emUOFJ8bA%or$J zRunqtj3osNPAVwKS#-<-JM8nAab8a-($lvgc1=^u(7e#Pfx1vbtgfzYW90u|uY<<% zMG#CE+5I_5zRxjro2|_x`SmDCwjPuI#LMnv-+Rc3AEY-Y-RuK0H+lYQ($((r;ueQL z_N#Ffq>*Yg`1X|js~md6-Ya8gNW+s4RBlju&As~^ZIHS82Y=FM@q_zR0000032;bR za{vGf6951U69E94oEQKBimpjSK~#9!>^=RDUDr`F=iYt0vRx;s<4xMeDgh}$`5+`| zQ9k?;Ar%p%N+9tAqA92nQbJLw_y;N>RHy_f5(x1NP*p-gLZ!bz)h70COn;!lbtNR! zri3I;Y{%XYufJ;Seed2g!|r|f`fBIx%=vhijRQx|-uuowXU?2C=ggUzGiNT;b?v>U zfaJGNMAQONev$>Yd~J{!=hw}LyqfAJby}&tuWo*P+wM5Ou18J2Y*!*swdLEwN(bBW z&8MD-%KaLnG%^ zL+!Q+F}~{L30Ys=ao>_DA&kdYW=iBIt`P?-F@C;9Vs3LA3dByLK8bCrj#eA(s1Zm$}`Vr-;-A(sXGHSOHuh(0SlgFNwg=*WX!Nd;VK*|M`!WmN)tEa1&t~Qgo8-N;1}+vF@g-U5BE-%Dy58rM?Nr zD~wepWnn6-Qh_Y%5?7u^=|vQ*n#ZrRwAj7RPbt& z1?qO%#ES9|$WX!yFta(O%z=~|KsY9{d{=6dbW(EL6 zgeGlyX6ELrk^}@R>FqKDq6V|Yswrb7yB87%yVo)OwIN8FwEv54X7yCIB{3110;a&W zEh9@xs*1V-d6Cm*N^A*h1_$6q1uk5jx%~apUwHguAD^iKW`e1aPf`bf0Gu#G5K*7! z%92!8Zsv?-W|38l|HyXbYzbfcZDH(GViX-0%_$V<(kFZ+CNFuJDM6WFGs`5d_Y8r^ zRt02^s3l4Ut1uDex%~u*mXx1l$CFZiA5*Mww_B>Her@&Ykyp7M8;M7rIB-y6bOi^&u4A<_>cUp4dCg8r6b2yo?Tr1wPRfoOya-^_j&BPaj`Cys&n7arx7)ocX(VRU$SB@fkrk;&h{!|f=cV11*XU(Em)+uH_!Vp&5z(vqFt4F$>$s{rkwuALSxa#v?S1o8))Td-E@pmezi1!YH{WA|7-vYOV7Tx zd~osN;n!9UE?jv2+vi_?ed$VNYyh&wo!#S2=jlvF&_Dh*8Vdb0dlv}DbX8B_IRaX= zqM&RNX;#5p4S<>T#^MK6!tCEDVHeJaV^e8I5D53On4(fV20In4%k=4hmU{UATxrigKq z6l!O3fV8#Qw`Sv@ldwrqXUfhs5R}7ukz;xVBsnVVe6rn~`cK@XOb7{MHEy^^D(6V^|TX~>z#+j%!=ChL6)%kC-untqvzK)G=L2g(1r$Za6|lG z*wg?HEo?lmK70J!p@pSG$5uXj?9|Ci*GGQ6BJat+-|5`Hb=Zd4|0L0FP2O%vD%O#s zW_wXieNY_arT!(LotH$WfP2 zOp6#dDa2Z*I6`on1Ss)qE=?rlv@;Kd6!M0x;-3)JQ$-Sy02Mfy0?{~0oGbE2&t2Sb z1AS&;<(Y-$4GrMnvE?nFXBU??9E1)pu72v+*}r*X`7(_vKO*mmeA5l~+s}ayu;SIf z3!hzV;+QI0vc5os_xY4n_Ndb`QCR#cFjxAe_d2a#WzH=!nXg}4RjLSEq*g^QoG)c4 z1~{`y;H_yLh*?o)c5mT|g9nS$cCyDp{A%<|&X$O2BcIu5(E)m70vq|9C(WL7sTod?Pi9rnQ+V51G>F(8~4U z5agwIg&O9VYWb-Nz4yv>{P6qJM5J8Q4gr$Zv?ymlX zg)&haURz5iQ9c)|nU0^o{N*36kBR{TBLE6`=$HXF6A*h>Q1!_@>n}ZV-+^*whCIz6 za)AhPDJr9u&+hek+xt#n*R`uiS|!5|6eGmf9ydRmt@lrAiI{?9EViX+BP*6gzMLED z0;CL*Vtd_jcL;?o3Y>-9q2Qb+)9J1_Io&k-n900M5Degekr=>X0R(X3`ks~Vp8dTi z=O5m+8^Hq*Fbc$S?Y6qcmc}v!A54MJ^KOwpzIN0vBGT7IW>0s(wXbJOh9r^D0CL9O z7Ikfcw$IYAAR?pnGS^MWGG7+xAm97GLPSMT=xjyeET0APTE!fFQB+CXPTHKBXmRQC zmw#}>y8!}Etc5@mJ8}W zm04@YO<{cm6O^o@TP8%snd0I_B7%SvVgIsmwKstFtI4xu=UX^AHe7v824bR=QBaaG zHA8xoz=iE(_t##WEf9ffo3G<(>l2=N@%*Ln2CyAGhm$d{5%<-@&pf#2mk#XN%>@G? zmJTY0QXpUuC)|A0KX;Zyvsp>68^A;>v7}WjdU+8*ZRx&;k5@Fa%*R1W<-M!D`Q~{0r~Gwp?@V@IBpiEZ&8Lk0D*+#U@8VZpAJ&ZqfQT|kH3D@r z+XoK-gnR1sUp_GVs}J6J7ZAG=!69Hcp4R%bkHNzkV&B{$e=1HCWfFoj=eT}!8|^w_ zOTC<#^Os)K0e%mRD1F>Z1@HVb&nTawsb82(WkC7o`*xD7!Ssj8M_AZQQ^r@Kt*9dj z`*aGMAPTp)2w4vp`P#t!{dvFgUrWFB*hlv{E&+R*GYQuMq5|s&T7EMv_<=f&bB>7G zGP-FY&`gxDJQdwhQcSl8e%+N+!dav>$d zlx$Avg=}3h3MrCHCWfYDs()2o0Btow5=!<<=6HakXV<>+`Z~Ee=eRBAiA)m^V*{#T zRWkrj%&foobZ@MNp3JZl}oXO3P9Cyw%eq=6+q9#5$8Z4hU^Sh7CKX%vdfg=EHIM?L5 zefYfuDqd;l{AmN7aZ>6OoTJ!Wf^HW@B88tQY}yoFmFi~Sb*)Z8CRzhXcza?j>y*`V zWNNATW*W<4gm-#eq-ce79CIY z4aM#=veW~{bObJsg zs}!4ivd>t8Bp1W1ZBDYCP^jRXDO)H4bSW5X?{4z4V>;n>Gzh585OW}#l0~pbmtDT6 zz9=@FOuKo6s5_=u^Fe`Fkb$wO>i~UG0t{DS?yrCF<10T|{mM`5-{o8ZzEjI7(pHj4 zWV;52Z^rrLmD{#XOf?@v9Nq#AGG+v=uoaQ7SJ`lsQ8TZp>RXpiC6i<4xPjV+TE(7j z5>!Gk6-HSgklCe87YQd}c)~ktY7(NnO;y>J#9yJIZe&ncMwDG5fu1$=(Fs|hv3j|x2iPk&mQ$6gyl-#d?L(&Lfc~qpspw129O>j^v=Egr|00!y zBlH7VO!}qS_oee~X!-J2j&{Nf%*5;rN(Lu#IAr+d>g?M0-hFW+VuTwplmzI2m9bF9 zSC#AS5pVP5Sm#K!dKvSq(zKq-^&8u+zPc3wG*nKvaj*?pnRJ#A}`fvYlK2%38V~(rKdPxgm@81AKM(3owzD`PUfC0 zaWM9t{*xS+U?Z6A4;FQ>WN(>vuyYvyn_gTRBq#uCK>CrN`JIN4M>XT;X0=2 z4xt~HF=$CS0zQC*AQEWo=8G0#nqo)k$-UGA*jf z5%}_B^+@}#PDxoS(+yY2iP{4Di^IX2A8l52pBXqR03tFIxHX*n+JD?wy|(`A56s`; zfVmJ+r;T=dAW^VFY2Rb8jXaVvx(u5iZXw@4Ex&wzjF~837^z<-I3vZq4?tGv=YE~- z&yXB{QT&v)!`t$g%#`qR@=fg|+zyze=G!jJ%0?S*oseML|IKlc5Nd#bI<@@rTTFn? zxlUo4I9>!@$m7m{)BxI62usH!Xdn%Ib?8p~+~fCtq;Lfw0Jx?)qj4S}MIwEI1?oUa z8k;SaDYcU4d5f@KKK&kpeP&?u0SVl6SW45>Wtk{y>$6Liu{8jtnu%gh{>V`WGIB-b zpBG8u_8n|CYT)%QCqx*Q(K;nQvh^sFg^7tRa7e_OW|=)=(L&bv3&u?h#}zXcOdj1j z7RRsf+R3wDeC#6!24z_|U}z0Ux?5D>m?|58glWR2q&>2+t4YT;=UhbLzxiUmIn4sw zOE#4-8(2FsE$3R$-PtL?w(%=V=v*p-aVPW z?($NXZl!2A*A*%J)|?(wK1vDA*x~JSd>PolG|Nnc-)*-GT)`nWOppX3j-c%$DMuJR0L)TNFu2HrKl}HKt3S2=>5qSSHzG1Q2EYyy z_Q{@tZG^L+BJ2n5rlu`OcEOZVV{Z(|eACY`OA*{;k9(aU9F znMy&3L`E-LP4wjLL5Pdm;S@h2m9lob`UKK3%!=lin(P1)eWs|H$p{TjfIZ)+KxX8* z$_-0qnklb%_}ZJp3&ZoDd0_utI6x+HfQt+sD`9#mjB(rfIwMXS9{prIbues0jXpI)_DfT-)oo1)JX!&z&@HQ>eAR~; zpj~OLTaIBR3)JFqn;y+H02sIi{_f?$g%iua^TfRm%oc;P1|E=ej)XNSey>e-S`V?f zW-fN+CW-wy#(0ZdXn*x&3qOgIDPXYv?LFzB#X)AV?ze! zxQLl}8Vo%tr7a|taWHeY?xcw{sV`axf$gqo$T5LFI>D>@@%H4fsd`r(hCUde|O5huWl22&F zJAqS=xs~vr=5Q(ka@1llze1m2*hy$5$%Thh5P4z@&0{#flG(PTmmr^h%Ve} z5p^Rvf7}ypm`A)RDPXZ<`Me`k{HuJT~ z`(tolJyoB*e*Uu$&CgR=A_6#snh2b8UHQv&9X1)Ix&HHghvZ95dpN@KM$zwf2m zM|7jKre!oxx+P$Ni!#f!nR>jJSp1^*^V>fOV!oeou-(9OkA-P$&__T-A^@PKEsul@ z1t1gL!0H=sUHa;~RPhWT0U~VU?SlegVs_4re`P7>8uAKFwD}SskRKrZ+#Y!GXYaXh zmK@KF6&S^Ot`JT4bxB}VQ8#r`^$dGM?R_oVFzvqNx0v6-R5!R6S)#})Cpn0kR zisp%c7@=mYm}`b>L~A#ym7!m`He4Fg#p}bhAvb)VJUDhUgax>Q0c%Fj=zv-@Sk7MI z9ZTE9=|tm(RC@*t|IOZ;fLnG|<)Z&T=Gyyo)ux(MlS(RmpOA!v&?5+fh=@w3*l>}H zGg)7-&GA3pd$rd$zq9t9+2`y! zIaTEu-~LYRT6?Y8uQA6Q%|GgD8HWIvnY#{)C(3`>pSt*UbTfyXZfqVm|BUk;VS(<-x^0U;$)}?1GDgp^}+P zfg%J#1X#)iv=FCYk4(m$qZ1DuDz}eLZk_a96LC)&TJABJ)27*^51L?t(Ls;_1W`f+ zU=oOmdJHA&a|H51KqcedSo(ji-TRsgmOp9T;8HbUkzxk;q@N688CITWANgKnO30jJ z>h0`9eNpS(@k<#$nz_A+{pJIZl%3SvE0>3T@4Y9BEW3X_?;PGGNj<`a<}}MnrDKoa zv{V8U?%T(w?>#v2@WILZ$GGFr#QqX3YbrDZ3X~|g10~JLAfrS&peT?CJX}5n0u&so z8s_zJsH@$(be8Cayt5Yrk+i`B%*CZP_9G{cyyTPvh!& z=?5+_Ur3&X$e_UpZ3bKF`lP`QhK%OP#YdPYW z((c|(XW~#PMXDu#B>G*uM{nCdec%46d&c8!m#ZYFZ6hKSbJTq(hXrOpIE7w zvu2>jgtw9w{d^YoP(tzuw~#>+0ScfKEf;$XO7ex=~L_sr807S^AJneYV2?|VFo=&qQ%m4*SM=#MWQIF2@jgpcJ zga83XRwn>wbTk8|3g;S4{QP+*JmJJ8OCZo@g)-(I2|$w37&17Kv&y{Ge@dl(Gv&>H zBz*dP0nXVA!9()8%A!|!KTG|~5eqhO$0N-lJ#LAb(<@4aTV{ z24~nx{Ka<<>~6*9owQ<1)Oae~y&DKaVMt^8(7>mp>3>k`Cv~oIhn*@9we3jh~OCBOBF4z)6Q2V2)^@ z%fV8df5D*Uk%QjFxI+F>g@nRAZI^z5| zt&;SWc9UM~SPhLQUCF(?IrJG<^7j*=`mYle357T{9OH{NgqNJX`UHdkhZU3r(3nB` zlxxer2mkb2|1mN$bn*IgF5hs*=8@GS3M2qTV*>&p86biYPJtv`FlL(FBNlA$F8}@H zjp$<|nXky+OpW<7!F;zZ!vXSE*aEYpyoix5*353PfID;Al4IQ%&^zCst2;jpFsFbP zA!$Oqe`@;LZ3k}LF@Dz=CmTcVOA!c_L<=kkN-{GfL_nT&N)W^&eO^>;1>wB}IN_u>KJ#UNbnPcUaNpJMzw@e- z22Xgxx--9T)469YT|1~o1gQvsq^CS0v&LrcZ`!+sFQ1L$SUIChI!cZj{p)FRk-+&> zy}g5j=lzj;z!zKqGyTF+?>3lzy}m{ z5F99RdUNHIH=Om%lP_31vTT@*;3SO{)jl>^CUZcJFQ@lsuW??S*03^*8fwo(8V%|%=V^8`wO!coN&r^?(*M0gA^q^AfDaS~UDZeF6 zyADKNzx&`d4;{GWfQ@oM7>Y$pY5@T-Dg?QkKr|wqkc8S9(K;SNB?jwlR3B~4bWIM9 zUMS`Dv+_>Khr;E_tu$|%$5x@NEMNchlP`GYDd(+gE^7)D%nSn5tK2Y$d?P)k z$nHO9fTZ;Ae2d}ry829PneS=83?v_ok&gq&i!;5aD9I$f${42SVcKS$U1*-R0h05& z+vmU6*Q-M&7bu!JqPe+1IuN6e(e67m^`(dQe|zV|E)4;QZMnR1|C9)jard!j9MHwAb)CqCmvJGc`->DZ|vF?mz z7%*BvQH;StdTcUKy{N62f1`V36m!Vl(fcMe5B_icS{-^%@irf=&729`y~s2!@8h2r z6MEU(GfCQxSH1r{Y%sa+_2I2bFi|FlQ*v`42sRs8&YGv*xX{9*e&vxo zNG^b z?MFC29WTMNp!u7T3H#pLMD<;R*gwjJ~~k}1eFGJZ<@x3r0-uFe)huE%h)JP z1SkW$Fa<#bC6{)_v3Fhj$3FYNieKjQffHlNx*6H7dQ`uBO7lDt@& zk&OB|!DM1SyUkIA0t7%Hz*1hgT$}*!Z%uvqzCG7$o!r3@(RBD8b*zYK0~o4bi~@kv z2VWw9w9WF4;Z&8{!OT1vC=pb<#!iEfk;ElcMqN>D;AwLJ5@ZBG@94@RQV>BwhR!Eo zXS*kYBN8y0+*7CSBoZPNfB>A;82ulYZ8&9cpb0Th00D3jMEL~gB0_Y}!(%}xu0L_~1=9gvv z?h=osD|!CCn?FU({APkoZ2*1GYks#_colrge2-lzAalCP*AfKL2oQ>p0S92;8K3;> z-QP3tj*T`~w89X4%1B7*g3dycdHpOb31p>WryPI;qAIYvI4Q7wiGX@Y*{ZBd&&>N( zrEe6(S{o8umq3D;%?TKi1mv8jQ;tsn63A}57X=b7zrD5W55BeawU?~9c*RHqfdBx5 z0CqgJON4QW7f)Gx#`W{wq(JiA}qdXP?M$!!QcFGzF`)umpw%z!j_uqK&x>JAj z^vkbUd&UxPqMAIw93lX?d)12`BLLoU@;U1jvEdLLB$v#)DIzy?fuy`{Gz*B+`Ss2oaD`04ODBz*3iBZ&lnNooiK( z0EB8%s--v2C|N}HlM@EYon$wu8T356NlyR}K_@6D0TC&_f|(^o>HOfQCx8T%)173^ z8ZcAoB&1Qt2_?O2@5BPta75#pa_nc%KjA6sN0x$45~17nM`tvrP!cNRejoeu8~*cC z58bRrh{0e+5lMc0hX*i#q!lfmJ-p`mXJ7Hmlg?Yhq5(6qgcmG2x}0SI!1F6M7gexu zl=sg$;=sD|Z3^z4>lUf_Gp}(~`R!rR)h8XFe1e_X8Q9@KxliGS-J_qncjA_VWvf`C zqNJvQ00f5w2W23YnVs~a%-EI+LmHrzgb+L0@^?i;rnD`EH&SsC)Trng3m^4A@ zi2-f0)H0k+VwKuoYu78a2_%r|dy$4Lcb-AHXoqi^wm`PFhmm)0*}r#c@&%`?8L^Tu zsB@EKG8hGef`#Qa{A*Xd@a$C^-?QaQ6OdaV4s&fk+_B(L1W3gOZkgJ-<%WO%;4NQx z(dkco&Y732!GMg>Bgr1Tq;il0By$*|#{i`*U-$&Y{kM76Ie~Nd9(mQ+{OjU-(TUH8 zp01bU@O&Q*l8Yo9l#50>kW|;Ciw+=NOnGb*5pWqPNpG9e|9FTKr7SX$)>P=XyuQbbLI0-fBY&9 zHYGQaiflF=GMXTp`GRMng8Ii9eJmBNne)mK$4t$8PaToE@;tnlvxPGkQ@Wt^*q_u$ zJ~sNtoV%?gngmD=mMHhZzP)4YGxzVm^B~5X%d9AK?I-FbGewW-v7KspDAOd&8S!sL8PLwkneBiRjRAV+#SGJ(8qQ*wocst z>Pt>ON#3MM0!p%wAhj*T070~SPqI-op19`BKYiAJ`;%{d;;ONS3>3}ePNln%N3W7t zqR@k_J%4%Qr$2V3p#0FXfvX@|?KwqYXx0S*~38mB&R%NPIk{u>T7o$iu5SnYh~EvT~QI|K+23}J&U z`JwZ^w+%f0L<|lhC>Tj2`E^>dT`SNXT|+ z*`tq?c{xdTU566d8W9TY(qA*1+4nZczubcvk%kkS#Myc`R9uy)z7SH?yDV90!4z(Z z!@vK{UDxiOo)#!6?J4~5hn^ZL0t9e`13!P!5B$cZKeV>h3^GrEm|~1-ld0WD5`+*R z*1`AO^4ZsY^3OkV_qTU*5^yTcfd?Ha1$v(xd7-(}!n+}*Mqj#n@53=S-<|~zzMU0h zVBV~~@NE`E2rxm2qS2!i#_6|?l^?z1k?Z$rvN0$*AW#sIBLiS?hy)=DKIimsMN(Hp zO!osa0GPpnjp`_8xtQ1(VrN@RIyg0a+3IgiUk{Q}T4p|)ng9g{0YK^5qU{TnqLZ3g zCdFPl6%;BmS!Fc#Aao>|6A|u!Nz2OdS6{H=nd_Gf)0--crL)N%2#;3<5q8{AVNk40E04rl6RY2nUSO!fa!t8-Q&A| z`?`Pq+FetCdk#>9G2A zT4w2$l-DQUl~A3z=f^f5JuZQyOEpXjR)tr8^H3JuV5`ZB(ZeMHE+Lr?!6m1=xuQjL zIPHV?PyE%j_irsxqLrRJDngCi$WW{uT>gho{fQslcuBJq%jl51cTws>G=X4fM0h~i zz_%y2{l?Y*_~vhY?4I#QTJQ+T9Ri39r8CvL>Wnq&SNxy_g%DH$8kN^aw3_ZV*lbm_d zH^3lDRK3;t!JL)cAPFuU=8E21nU6|2WUEIh;$T# zpFh-i=eM>#Fl`Y;4@3Zh$wqA87cYCkuRQMgE4;~QQs+HDhkIl+YXum{7D|QDCO)<2 zj@Nwlz5lS~D`Vbrd#tquBp3NZFO-+wG8w^g)J+-Jhqur1OZ{gE5%tv zZrlFteT9lp0s;-{!d$wM)pPJl5}kk{NJ2`kwAl`U1Zl~2!UZ^7a04X}20*NIKQM%3 zRX6~UAkyVTSv%I3g1W7}>JiF;P_;76$J_pbO7EXlzeb4wt11F0DIV@DD$qJ3X8@gw zFc5$Vl0Z6UYOy`(3uUNeW>b}hET-?HwGGX#G~h0ZAmjo8W3#l)gZqB@k`3pNG+@mF zQ3xdR9`2GLjjMIjBlrE@H$L`Axi1z(6#xMeBqQOvKK-EnKs@5Z=UN?l%q^cJr0hueN zk5s4;>7s;DasZ?VcFaSfv~B7pqc}lG>fvPoQg{gxq3RO5U?vlgG~0|yyxapzIoMJf&{3fzgc}Z$s?Eo z_UOUrmy~Cf3l5HHm7tOF+MaJ?NS+qqyr+fRrueQKx4q-0eRpb!6Cj-kh$g`)gc--` zy18<;z36C7O@D%5>b70Ye75Z_*b}an+5n*rVIGKWs&&nvKm?-5BQNFk{*nNt71amU z5d{YYN`ONGL>&H3s8gHm|077j%s>K#42%{BT-qO+@4I#4gLmwjl;u}%svZ2Iw@|j=y-1~3Z{m^KQ11!M%m2Csb zMZ-Xq5v*UBp?s)tdT}xGtG5LUAeP_(w=Zlv@X@>WK4Qy2ue1bpxA$H$vULuim!T6y zM1w(XWlRP4pZf39NVNehU6KGv1|v-*K)9;vESO7eR0W};qz$68jS(%ejgbk_B@iZ2 zh$K5%IzrlAl9B_W(g6_=B!H#Mt1$#6!B7yE4x>W}m-pl+Ylq#==t?re>;mQ8O*@>% zaJ4r~2!f%a)Jm`RDTfLGWerYWxuo@)%T8VkqY-el1J0_7({UpiVq6=n(>7LW-@F$^zzbhRiYS#gCkM`A=T(oONs#W+Dm^tlF!`=-f;$h)(zz zvQAt7-B^tKok4T=Sr!CDM8U;!9K~G|t+(H}?frL*?<|%BqlMW zm$KwCOfXi?HU=0JAS0O?0Q0fCbzWJiWuyQYD6s6_yAmjQ*c&QJB@jVRi{VAO5(fYsmJU(y)Y!2g9S1d@%V;3u->dNuQ_vefurRrfti%_(^F?{Udy}$X* zUDuB_CmKaW3B`z_9u41nn1ZB^uEF`Vpy_QL+%#v{Cj|mlfm3z?Y{>K?jTuXJi_Q)R z^#>&fBsri&R8`s?5HJTF1Zk)?0ENR}x`n>CDQzUl3WPzV106Ax>Si{QJ!$8JiY$5M z67zs0L4ab5Mo^gg?!>CMeEt5LM<-fQk&ufZx}$MWfI-2mQLG(U{-&q?#49&Hd8CD= z%OIEl2OV_4rFfY2yHhTCWkiV-sR6Mx-7~%GH^2Cge{$2OcW4Z*5M_dldAYE@_KqNE zgrt7L%KFJ&oaxa5A`4iz0D*kx^Q$&*>Nzg$1pULSnUwGRC$zy^$(cSymPx75#)c*Hi@DxC~gM_eO`pWkG|8U2lJz}%$0Hm^NaMC|ZJDAuWj6hnytR^H0k^}>g#srWv zwf}H|P%2@1SuxHOEBgHk_<+2X{&g}SxUOq#m4RsPA%+yC^o z$vqr$MOEOA00j3$k*y;JsYo)9kmK&DFjYyTa3PJmQt!(^^^qh%w2SR>TCY?PKppk~ za+z88?gSu6CYj`tBv(S1L2NsR_5gNIbW&N=i%suof0$lxD|RSUuz9SVybUyv0tEma zm8&I;-U2v^;dkA<`{VZ>7>{zvsoMQHP2hP;HvaLme&!iVHiwq8bj28=A|$2OTz34V ztLwmEe9!$?|N2$`aQo<1iRLsxP<^tL5c;4#bQQ-r{!e<{@&~9qepa^;?_;2kJB+zD zbsuDp@ClV)+#BC?&5keb8%790QIH5HL`V|Ein0af|2y+bo9eA4Nx(>>8Le(xCYeAG zTYW8Mw2N$0%0?m8)xn+G>}2I0QvpH<0)*6KvR$KUGXhd1NY#9_IdiAVSac*v6d-j# zg=?2t1COh%4WZ(il8{#K?J8>gu`0In!k`fX>DGw0;{A7xzUR(e`=AmKKoG{XFTc=05 zbeGq+nfhxyw*Y!~uhM%}yeWO$1QP;iO2v7mE$n7hHnWaEbp{e7z}ka}`BZIFE0~Dr z^^G<~wrRH|{{J7TtkU0fvPFX=K<{BSGl_P~iaV?q#!9y%F-;Xv^Cu#RPUBA(UEQGW zs3-&g4WIHbJuxuysrx2Baoa;Jj|ABJ%S$<}X@fs?{WJx7z(8x4RY4`AgT50+J%Q}2Ffa)DmcOIBZuBt)?D>C?>Iz4BV=*nUUsvR0Y=H!a-T z4W=ln9j$B>9Hiht@~qN%H&x;}4>u`*PNdA*36P1N2#{ce1l8V8OCxv_d0h7BlAW|ENL+WtO_hL#Ie%9ysPzhx9{I)O$9>Uh9#)&qkUJ&OlU;n4i`MI}$ z{ok+JbC;R9g9fn+1`JenOctzcTcq`@4=RTmhzn~zVD8M@4j`LJ|UX;yv ztJ#VB09GljvzzN|Z8_=dD##%LG=KsGNY!3Nks=|8>>O>7$*smp)&uhiN7W{G?`XJA zAIuuQ>W2>?wOfh+4MB3UGZrb*8reC9E_Lk{K!9prk)u0N?P1P@Yb1?TWvECJXOfhu z8Ji<<@|i29-udLsPgy(E49+5S^!&+a5!3Gm7P4V$p0@sE3&8;x0fMY@Ii#~=pb2nM zKss5DMoZ>C#@657e$|`4`r-RpdnXX1TtJFqR1wKu_(^`l$Oq;1l8oqB)Biqvti@{fiS#8T17Ls=CP{FhYp_^G`hZZ$xjpVxBqbfu)nWhvqAh_EWd7Wc_NB%+ z$Qs@eQNcuK&cXKoQ962oY|8}cz*bB@m)xd7uC&oNoHO$4mz;81vuGL%6+xl*7qIS# zHLh~Y*v?=6>OcMCmajxm7GyyoFd+aL-EUQ%M9H8=8T>1c-0{CY{kJzA*gEaeA+g3c z`bK2S{I?Ix%HzQrN_WqUYzFZ^v*LSDmmj~zfb_o-1hjE%+%b*vTl-q?y=m7@4Mb}o zOuIKFIRPNWnXyC?L!z!V7Lr)^>LE$8&NS0GV*3&ybR#Fg8pEXwl}W%LRR|4^_K}4w zfG7zFj?{K-Y58?Y8GwTs(Arc*StqgE8|c-%xQMEpbZvXOMY(w(8CeP-DS84)nl`|y zyfh?bgj9%voIuyuY=bIfZ0~~Ilx(HW>K-Sm9=U@?0g`Bd4wn$kg9_3bHhpH8_=Sr% zp1FLWSp#&=2-N9a5l+G2EkV$p*2G6{|N4jT`o?4tqeYhyFa`)B2*_n51(NkQ!n?$B z8_E_{IF_sUAD8{;^EO>Pgb)m%0iYlQ)Rp=47zt1EzpTnA?*n4Jk7kd!rjm~HDBs?D zEx4&zEGyE5BwM!i6QdUTD@ z49krw-$GJtpXb)g3Lu}+RIU& z{QUM?U-S9*{q4Qij1CBZgJh?2H0$@>$p901fbnhL{b#dl*kYt^`o7s$3frJEARS<&FB*{g> zGRxKRcHf#pU{yz-(GpRsymFo2|sVreKc z$#Cj(B)}z$8fUzB^pW@8{Kc!c-O(B#jV_ddtP<@&1k-UgTE@s!C(q)2$F9gW!Y-s#v20f)I?LFav>C5C{r_2nanb&D>@{ zc5)FDG&5kgGG+>3<=ZZTv0~+FAJ@qsFm3t_&RgFvg@lw;fuwoNt*g9p(l`qZ9*CsI z^oGQ=f&*(<7hv|8&vjx<9p0Bt6S}200 zP*8ID6m;{!sXx2nk!?O)1^$yR(q#^W&W)@TH8jzgu#D(V01&(SCc0DuT}xt9C4#Ys z%|nPY7oQ6NgC!Ypty(7z&G0Takbu*Ya$?OM5b9W}W|m3OUb~*U7^>s3Oo{+fu+O>! zG*M74$SDwl(>CDYWl`K54`L8Pu+fKhZo(f`bGePkuIgbT6^nM zS~|P=g?l|V1<)!C|HfrM{)~<14ahA7fph`%WE}FQN|N)$M)GOa`)ApGV%X(5aM~(& zFC@+CjB^A%Qu`^V-x~AR^P#i(J(hA5ganME{NiqZ-%XF~3rlIS&HDug6O2wdP*f!l z5I_q^GQ+e#}MXQYjsQFS`7k|fZQ>uJBzk*5umY5}bfz}D$D z@$(n1zv6_Vutq@H)>h?4yUq?G5TaU)+a?dbf6G<>e$RDfp;p5RbkMM6O6~_T60-KO zm-}q$ZQuIvjtigt<7Yf+Lb-f(2v!oS_Ty-g;aem^|+#X z5h;2rlK=>t2}p-@K)lLmk0kDmRrrOvU`);kO)`pr5yt9b##^kO^*5Wi7!9z1Z^Zsz zJ%9LrT(oxmKxl}vLX09rBRH#LQ(2aGkL`KuH$MLRU;F4oI9N8M00qe~2T5qoM{S9f za#NytqeXn=fp5J1>W}QzghWO@mX^f~r|ySe)!F@mgo7nmK-~`uW>!bmmeix(f5B5I zbd1pH_jpw&L5zS=h^}4ozq@VMm$vz27$6ztAOY`efOV0CL=v8`T_Y4EfYHE?DT75p zfrJnZR+Z_j;>E}m_ru_*l+mHWqJh=8OkJm`*k#@g00FtWzaE|xZjTzFK3nv5aRxeD zg6~gAKzfsq_Lnkq0Z_Z0V)ea>QRtxS_jX^H*vYDT+j}kLoe#$jRlPPS!49(OehY~e zM36CvF`%i|MI)`(UbKGmzyOoJ3oU>UNlNU>^Z^MXC>L}nPJR5&YybAHFCUDpX#+(t zc78XV-6Z{vQbj~p6HaYy)T*N&R#F#*mAY(X!*4zHrJITqECgc&Aq2R32d|Vg2km(; z*2KA$Ml$njHwTR)>kA~w@;mvXgF?3{1NmF0j&&_8C~IG~8b*4EqC4-4@g3K1`@&XE zg=UhQPy`NtRBK`4*Iy$CB)R0ZAYpbx^EarSJf$meMfyFB0B3@bkjZVE*VN}hr@KAg zdzeq=^zD+MHjqz1^&vr`o~4~GIN+iuZniTe{TTocNJFw!Gb*$xgG%4cgUE^MBM|M# z4Hym9ps$SMKY7Z)n=U{3)WHE-G_#~+Laa(9*)0-5fPvG%b-N#U!x!KGr+0o~zb0D^ zS8$nk%dmC?-?bj3U;;D?Wf`Z5Z;fvK)i3O3y!> zKHa_dMpP=W&atn@HRaDekz{P$F&5u-)6TmmhFYNv-k4^r&|pl7@tui6EII&!oUyZ# z5J3l`CGxnuLN72d0tNw$gjMAm9HdJ#Q7{$7DkRt-LCDC|O0E!|n`nUmV4cs-smDvw zPC=D%lVC1edj3?$j{X>l;X#R$KETxTL^KytL6i_+NTZY!i7Xf-3UU$VRa2R^$Ds;L z+wzEvGo)ZCb-6+H*Nnl-Qw9#b?);N3T`}Y^Gf*yAA~PzDQYP8qWDlB9qTDk*_V?fY z>c2g3U0KAb!mV@x5GlqktcuEc7+cLe>qHOac_p4Cb4H7ZIwh=l+tXih!IE`e6a|b_ zx=8XKL6UmZUi9u!bIhR?)K4BZtGG&%`G^up$6ydZ|ISW|QQtk(`ajq2x?_6C!-QL- zWGk1wJ(Vn61gI`!ZmM#wBDDl6LC+V`Iikc$?cT%8ruhD>z3Q1RBv(}LSE_34=xwZ| zlswHwFsZbzzIO?2dpHFH(;cYFp2nKcRy!PY+j^6aAi*7E zXqK&KtSaAj#hKsZ{%RKy8EO)u=EShyb z0cZj8YDoeR3%bFm2YB$8zn5Nk|AX$4@Lp<{G~@3P(NU8;=2FCDYyf>2MzX+9dF&vP z$1?a&lnNjMETL~5n*Qr=ZQX9mTT&%6rag~P0F9I+dR0z}^KJ-|10pGuL;*0th$cX( ztYAQ0NVWq{Ec>da#*uFO-p$f$AOHhOkb;>xL3na!K=lZHrc@|E5|Q>LK^$tc#Y8tf zvjJw32y#Z-OlXt2syErqAf%Dcofcc#U#r9vYnw*|(1ps5Q^g&Vf=DLiuyP(U60Yhf z=YeT8B&*X>TDM$CNVG%+=ShoMh)zg~g{%~du*S#!+j%R#fBnh$d+D9a;07JCj$p<(BOHCqX9JPrqux zR)r`;i_karkH7oI-H&j2q*GMqJPM=$DQO3Rq{e#aEddC0h?RS1N`gade^2@jfRJVv zv@!z}Ruq_&+2-Kx4vJ(7Lx-gHJlKF4qzdwkGwcLRnq3`_Dn{dg+PD#xWc3n4m)M}QH`NgDpN_ShuL>Io)xe%lyn!(z3?#ggyH!5i%&jn zPz|&qnkI<(cPB}!;tnagPnix)j(_O(Z~W`s-<)WuY>@6oa~D7<*ENEIGEh$F6kjqY z-b6eaMbn;g?Dwzv=gBKy_`G$OHUR~&v*Rx$^@weg$4>8`p{F-zlqh+3l?Af_kgVIL zduO{xt;$T&oZSXo(?rpV@yiEV@4aFFBN%9)6l2K(fY--_FoB@ZHME^Fp^72`8Gs1J zDzz3s$=l(qbU4wzovCQ1jyh#2rk-7CHz@!xAcE!~DWfAMV`9kyo%vqLonYDqLjciM zk$ZjdNUEgrl-)ox)|*ljRHbPF$%--2I)@HeQnJ!sJJ>F|7UE0e%+1y#X{fdkfoh)s zNH+*T8sG|_p?|NYspGt)UQ*FK#G++Z5+{_SFFtkP$4*~2%ve|v1j1w`QAo-h$|b=> zmrD^UJzl%#{y)F|Gj|-?Sq^{zK_f+^v%sVa$vpP#{psrQ1R!aKn@JvWSc<98V*=fm zvibVQz3|6RxwJ5=R(0NAIrD3#wRt-F&jA_lO-aRkFw=}p=BT26QfN_OTJg*Kr~dN# zUHiil<5Y>JAXHLn%{RZ&ldubvaViuNL`rnXfSHj~OeVO8&i2q>9lojJclG&|3xJUT z1+`m3u-l?XYS@&VB%r!&O6~;H*-4GDv63#0QmD=gDYSC>w`)3=cHLfigwBv0XTUJD zTUZZ+V#je-{iJJvkWi%{Bk4))4w!;Ce!fk<=JNHIE*~mT8c4-R4`@OJyRKf6st#&W z3GJC2egBrP{`=k6P81~zNKqsi06<8h2I~u>4&>dPq@Lcd{y8igQdGbJw%&cyCxG%t zPrIUlA|LHs8=_om=ZQ@(h!C9L>gH$zKwe*VJW~_`R0`Je^6I^!nv@)kQ}nKoAni2pMUP2sePB2#S(Kq_&M2UQda_M;(vFl4vdk zQ;ab8P{grkoQVH^{`$3nX4Vmhr_09X6ufOc9*t91Z@u+B*MIJzxX+0Q!KlMG&nz~9 zI3?O%Oau&}x-BC168%#{VNxDPArHQm^BRILjm_6y^z4_M_JpPlP^AR|p~vLdBp2GZ zXW=nHi@9<9%)WX&XL>YJwdltP~Ej zrl+ngR&{e!U{9LO0uq2EV2vqE4cj(M14+6fNePw2WM=u%Bip-0``H=z!{%)(hYMza zI{~60f&d|)WkF&TBu1MKAfpF|*`byV6!PFLS)&`%ud}>vBO^%^Nwv){Mo^WFohjZ1 z+cLh|MAmj<3GFV|8G7MGvfLnXzQ$1m?IzX$Jp5JRm(4R44M1cbWp2c$%FCnX?d*y(OTFiAdz zP{Gy!aYka61V`AUNE(ne%jaJ|iIq0iAXR#sBpCo$oBBHBG9@uLj8&6Zb5tiu7eKm1 z7@Av)wR5`>VZ!FLx)FddM3btI7rwuZJx02yfmaw~P&%88#kf759LMPM1t(&+AP zuITBgx74uhW;LYhzQY~4>Bn`bQDAEpQ z&^{=EnzH!Uw|u^w_MbZEsfCOb%_P-^s|U}{1;Bq^NL^6Q3n(wpWu7{&bwO6D{L=oZ zcVGX=?qawBry6Bw5e9%p08F<538>Tvfa-E7oG_yjYo@ycU;@dVol8$q`s?%6i&j*K zsdEZhajc{|uT(J zTszbpl8lj%5pdrpDZ06t40Keas_z85*fY{mtpn9b5Fn>yWE2QefFKbN0V7@F>>bIg zzXe;vX#M2b!#}ub!w}_+KrkAFI^WMYx3s)<-L8AT2jhHCYhSAwTS5^jH<+My*Pvd_ zhG71U?h|Oef6LtA2?;jp`d)JRP&8k5=9LB6fX!$_AL(33HZ$bn~f^s>j1X(jF3YJLLW!q*+&``AsI{dUK z3uu!P(U*xd+AiK|-FLFh*V$P;N=gE0lE}~{<|C4pr2-X>dyYH9(%r)D0w+00nWFTv-#8X4Gs}lx2 z9BE8Olwb%DP34TvriVHZARV!@!9Gq5}eUOdWdvO`rM9)?3F0Eyf6xl{yQqs!Y*3W>qtArlL&X48t;WnLK*Gp4lOC zS$b+DCOBrqvRJ(SqUXNklq)S*5Mn^#5WDiR<5*_R`vRpST=ZXPM} z;BYFsUpv}**L6D%VyW45MCfT@k*xnzyUW+tifdm`(ut`r4ChdT?C4{CD2Tc-~XT(olF>@_Exv#b~{iUF_z zg?0p1Gz(q)rj6~;xJq-OTvQ5c$;#{GAX$YhAYC)69|*FrucEs!ym1Fh~v|N@Hq$;r^T6x8tDE?p85@2Qkij1V(4^NSlSqBS3bsJ!B z{!1~Eit(0%)O~_R<(BFGc_s#AbAZUKOKK0YT5LQlLk0oq8kh( zMImoAOVhd3rns%dAX#znPO)qT)W9ujD&RD~6bL2V0R%=M1Xm%qIJJ4HU$i`2xMt;9 zD~Fc`HdThuc1My(wmtC8x_E||nYkO$&dO^90cDqTdd1M0OaM)iXyy>BVWCM55Q3`C z68XRvNlrq5M5D7@kIA1p=Y(giTRIS|?H>@Cw1>2gZ=6Gn4*3{cx9{2US2un3hC>gx z8UzgjT`fa_DlCggjk?_wJ3`)HUXpr%OGy{QtQ5tW{32cNgc3AzmgJ~f@3`*c_5^$G zi5D78Mb}A@)Q3=5TU1APJa8r_c98)itr8$)+==oWcHvPU`ga0TfN=ZN)F0ivbEhwt zO*LX6lo6rM0c+b{5=JRONJx3J3YkCvNr`}Nspu(!qS%xEG@mjhP4eE+lP~rBnN@X3 znj4z8mc)tkmxsr#8NPho+OF-=0E|_-HZT4xG8`HNp-D=fDx1^337<)^-UJh z3GA%0T8M;0kN^luMvkgN=%le~ zk`E~w_4sqwY=3B~h_1p+)wA@fQ*u_PG^baRuoG2CQF(#1A&}r;Td*ps;%wX)YQeqI$Owt^URiYIMh*!XJQYac#;qQ-qU~LlI5p1 zgGF-_fa*n5UOGPF^813b#|+MHkv36;BpMfVH(U4=`$PnTyToE|E57U7JML@^0xh$K z1jzxWLkV&ga3T_EM0HW9Kq%pr{H^q4ZbbWDZwur7q{1gj%-q6LhzJ^I$hFLHfwuHO zG>9?=O$UQuQlWCd%d?i~Nhc0JVZ(-#8lf4SFu)w_eRWC7`;WUs!pxYoR%uhov;+cj zCkY`{brTR!N6GF{k1VM^R)tj34HSK&Mt|v|b(gMcHW3IqK|y$QNts|I2|YNz_uV)A z=jR^Y;wBVkB#`LY00Hcx^6DKXfh45-*)d;QeVz0ljh}^UM)jrPzSTy5|7-v9*6(}8 zg-cI15pc>u>n0u@$3X*m<$OUhZ^X~<=ly5FKMIdy4pJhfcbCPV-Lmz&Qw`Ayg_2Ho z7y;G1H9BiN+d6TImC2U|rEn;$aNLB768o@BfDAQ7b0WgmXXG$*cSi-PV+oi+E#GOS zgHFYRj-wY0k|=O&w&|1%UOn!7?&LhLP#`o0_QI3a7Y$=8sG%x-i+W0a zIbBYF^1++_<~v{9R_;}B0_OC9isS-8qu3R=o3wPayQ20==$k}`h(d+YQ>F28O2CW{2#`*wGUt&QL5yVX zio|x&QXS1dT@XfCX6h@fi$ZLGtIDybpD_J9=b!Mdr=Rlbvrjm8s90ib2mt`;1~5_( zLRw>g2hF5}D(qH0JzJR7lLJ>{o5@GkD>w?Q8M&d{|Aupif9cY-Cl$dcYQPNuC^&lZ zo7_3E|2Mz%kAHgoCwA(fhX!PkXj#XTN3QSntZ?&?ac`b-UFBa}OVp%|;AX%bt-Wvh z>W6k}0yK!3Li%avF&+I%N%_dHZWiS9%W(n+BzPNxc~X-fxqs|KcTG$e1A@G6`tF7# zjtaaJOs_iy5g;It)zGo5hB4A+_~z$VXMuEHLh6f|%#Lb^p6T?}v)#G`GjkF2L{cCU za)N+~AW*gtiUuavhw}S3F8RKbR<0S~0E+rb$f$|)X8FAFaI`Fc=Ih&cPBjG>k&uIM z2_(C@Ex06g$Sj5+z@e1EKtQ_pSHa$b2m%zVG;a+s8j4t+GaO%g;rg>jioyhu3;-p~ z$}_zc<2|E~eD=Yc|MQ;fcT62BY@27s%a)Fv(Ls z7f8*Hm|V}#J0Bf+o;Ye*h6Qf*H9Zcb#N? zrmE}psL{NtUG4x>3BrI}2Bzhk)NllsqlC%^rY{d@U(kLd|me;CP+|dGadyA!>y?w+f=;e@=fQC1QL(|oxR2K zplOmCvNK0k|M&Br{^1{d{hP0N=@V9;)x-d!nPSN(Vjvm-CivhI046ogyZSW0i_H3| z`e^rVRZ*9@hkk#WaK zhVLGn_=j6|Pc}y=08uc&B?mx; zj9YsBY}6`LC=AMqBK+XV7e8~;xwjwK{%?1F`wI`=JgzB)Fa=r)JS##u=$;;h_YNx% z%Fk@isiOKx9aDE`>D*Zh)}U;997<2-YMa^(qghSv%2;@u{oJ*PMUGiG@gaHiS|NMoMPp zfI6D2GwiYfi4<5UL{1M`ap{Up7hQhR&tCA%kKTFBXCAuYAX_bYCU0|}iIO^U!@L$n z()=AYNM;alt5&%pg0-Y~-Tc`NOV>VS-5Hb!W_sFV!;Xn4u$=gq#sSnfWHZaiZpWh1 zKhJ<%a%qp3e|YV#>&6;&rXz_k8r+=}7*G*?1}9K0?%r$g37NB3ZKGCYqKrgE)@eFs z_`D+_lUHTJLiMYGRXH;p$R&edh$LJP!;p_Yb!B+rS!>T(5)8p$Br^%^`T+#4e#*N8 z9ddv9*R~&Q1y`_IwlbH}xQ7{qo05IqBndJ}vSdu^`EBHYlAt`A(-{M8WjX%xbC&$z zNvlT;1`|XwpZ~A^Q|~Cej!5vXu{|HT<(f}#z5aleYVZ372qvS!QIV`#J9X-)<*(H! zk|}nOL+DP_92r(s7uD$m&;@uFQrdDUR)^h<82*!Iz3S{G>)Lb6`GYc()caX>UTw>n zWXSGbkW3}S{GjAbCC4;+s)SMZzyI#un@1a|>rOg2f*_E!?GRmvJC9xuS03yf;qhqO$Ub%MQMW?SneaXN8JOB>MERX(UReF@69CArL z;EO$KjVV`2t|X#B;^*sB5sk12#Uc)rIHNiJ+AB6(I1+-UOAb)Vi;x-JNH4NHNt01H zb7a9;$`>PkNJ8$XW zAOQxqK&t^FCDc)un$nEgJKmg$Bdth+L-j<7W>#J4U58NrYlIPm2$|Yatx$-OXqhWa zk4*XVHpI6*VZ((>NCvQ3h+^zmU52a99(bA2s9+6*Q-{|4_Tyggch7s>k8ZwvC(`m&oa=`0EON`ljz9YL|DDuSrTomyzN7^Ne(1ma zym>r>2AY$_BprWg(qfF?-q-r@U87T>kdsvYDRM3vy*Y=@Q4qUDCw0m#%*D3BwE`8bOMb!Kjdt@$1VV5BJ{{2T33)sNrZ?7~;^`>ubdk6!eY{JS)L!x%?SoanHl)@)ee~s1mUXTLs(ym#_n=8$o^UiCit%dXls?oGLjoEb~Ot{bf+DveDwl4)*(4t8=`G&cv(f)#_) zttYL;>&`!MZL^uSFu&#JQ@ZCL&%d74-$Ff61_rB{s``tqiPX&PhX)Me&Z5W7f zOZT20lDr!8Y%XJ~pn6bfWJYYR{4NcOHk1y~HLUP^SG?r=H(XROz+#2_G&eI<24Zsl z;=<}tS#wJZTy%Re%&gX{;|(21WKg-&@t#|D?2Lmj1jL8{cqV&PI%4jgk#>6;E}1BRBD2Y=$cl|Qs;#jpT!wz!yiRyao!U#0kxHxQn( z>hw#V{p!y@@SVTg^5qAo_qBpc5mF(#bub0zZ4aJdG<`Gc&fV$Xl9Zx+IAmZ56H$M1 z?I%xs_S%a_)>9xrvR|0xW0S0#(2+sONX4RhsL?SM8No@{NABEz%YkCb8UY3q*r6k7 zf7Acggv4IWFWsCyC=ihMI;5&TlAsyZ#ro`YRC)##QK3bX`A|7=`O5hAD>we^nXA_n z7C@1-Ia+$sK})}+>d5-_bX0p%nvs$q^^!TgzFh0kd^MYd0R|WuC=&<^=MNuz%M&;L z_{J5(R?2b~vUKD#d?crC?VU|mEtfpd7?#@551;z@_dfTvKXclXSG1axS^)seyIP(z z>;ExR94VnZc7>!Iz-L+-NSJ_V*v7GUe(j@=v_|F13~BzfT3xIy<0A|ykuN8IV9LAa z+HBF6GDClC5y^4X>lmM`5c&y_4%MfBZju7iT4v`qS78KE zoxF~EZy`~N9QNtc!|1PHyz+N0*>Ls{85}Nall5tlNL4i@`aQJt}cl_0jpPho|tH_5(9JMf)c?lz91IVF4 zbPGGmAFKPB%lU8|A}CkM*1Z$)cfP)3mj*$WNGtV9<0c+?SC}s&s`W<4_!b?uMb4p0jqfF@zwg z<7`NoY&N-hoL8}NXT(o@`GHAmhLQ-D)Z00@-IKX9?~*K(v*n zdD7~o4Z)1_svICWcTJirWqj2ud{E`*@4NA@Z~p9VjFm7WTc(ESQ1W=lC(p@<$yDe2 z4a~@)dDP+bir6jjh5;3M$NSSqWRc>M>F{fp|L_Y=etd%l3KV4vZUmMz9|`yCz6O$t zIhc>TdRa;RCMuhMq6AW*b*R;R&rJ{Rv=Nk;CIjN^_!N5$0Li25Ny(M|76%1EB%~V0 zuf1lG4Y6fz4UL^SIQ`1=HeR-}S;#FkyF?BPAdu-@zu>p|Rg_^IF1aKSy_(zgU`zGmibagJD9X@PYkaDH!38GWv|`L#0tvBde>9UwFrix1xU@OLH}lsjWXaE6%x zJNcm`yQyn?F{{k{j`T`?p))}kRY%~nv@SDq_xd;&?oJ!V_)AY+`ohgChYB?z&cc#iWi)`erbRkD9TDI^lrSJ z%TM|BoG+xyd>M_jO+%~R`s9~C`JwCo@`lgu4C7K{&pt>bbMCy!Xk<#OPi04(+HQVj z?edfPtwA*Ar&tV>3Sh4$-~Nq%eb=-9`wAN-Wwb{%7fH(4x-!5!^M{8xtCCaumZ1l7 z_q?={K}pOTF%}?dOo06A-D98MI_&}|QzM2@mlX04#B)RLl+-8EdOj#G6@QQ~ndRBd zskc6U!^=-!x!gDaf*~N|iWdO~T#EB>?Cq;GL6r-*q+FKfGzWtK?`m-+Xd=qkB+qG% zz3GY#Kfd{dCDsBhIGms?6Q^3y(WVp@Nr2RV;fGJU^lyIP=fD4ivj@G9RC~?meG>Dl zJ^vHQel%|cBMEqfe`jpl-`)77X?ca5I=Ah)j}W=co%2hKG4^S|?A<$6QvRv=+5^aT zER|ULJ=5jye)Hj77@3SBDNulrP||YMVajXd2Dyx+5TVMS#ME^F6RaTbVSQVLM=yec zb(j&a9!!zR1eyf63!)PN#F9AvoRgYAeb%~_4G^Skz5%@YpUTYn;DKrV%vT>QZNN&v zib}{Ayfez9yt9NzIn6}T1KlH*%AdQj@$xfPFK<}W06;KgsJ|ml{SBHYM4B|w+(c}B z{JtCCcjKpaH>aXIV1*C^1W+)r<7!L~TU2fV^p39}CWncn@9b`Gr~};gXt#i;M~%4D z8*hH%%bv0BykMcS69UYf@G>ke10}Ek`D8MSt#=#1oN+?>4~Xbr?IXx3lT6%4y#G6o z?C>SiM$$WCv#=i1kq5{VkS3E701^2Xrg>#Rvd2LaKu`dC8w8>hO|S@WFc1?P4X?g% z&6Cy)4JZmtib5(TFT5Z0Y&}h*B2}eH^K@Q1Q`b%13U@~|XP}*E6aVdkHP2W(Qozg| zGLeAt@R0em<40BEJPK|YM)8uh6&zkTP>!==UP!h#qC0U${BUgoSZpV>v_ zK<1+;BoVoLdUqBIvO`wB^V(0G^_&eS53P!n0TsM>MmMnFcFq1R3I)kI8)WJVyE&li z^)KUeGsPsH9uyrVr;ML{aL;uIJz5Bwin3g8FR3$PL5&C)n0}F~&eJO%k#<^gl07=S zry5VMftaT^%oriT3&erauUuuXy>P?orUwSVAeM|l*3bSruM54G=n8j{45X|=V^#8{ zc82H%#ca_LqJ^bpykxn5FOAp6!Jz^r37ClxMWyS+>gDIvce8?f>pur!zIGXc1A&3z z+$HPY_3T&u<;|b_w|l;6Y75|PoO9*pU7EA;Uzs4sv@cSpuCvpY)O?)-)JEaTXI=k7aob)_?yni`L7bdv5NrBuPFteaq*?b%T?UynSr^BX>@W z7Y*|=%2H{fj~B0!{TGVzV;%r2gaMU5b;i)IUAk%QfE6f$5G@i4vVI_?<{-$Y`te-A zUC$(lRdN;xa8iKsjj#V2#DR#l@!(INIrzrQH*6XjXwYR103tyE<&4<7&dj$)Gb%cI zV8TQ=8AKp1FNS~Rk{7)BNiSOw3v)z++9gunrTizjW1=%ZCdKBq_kE_V+RA3zrT%>@p3AK_@FJX(E>@z&ApZ~fZ8-8QyeK`pZe z7%4(T=$5}MDa7jW2R*75ls~~#$Cj#6IDkCHl-T2DDH4EA#L#=5{n|4IR)e6iK4`uB zeI0FRX?6Z4$@($J9V-biU2(kBzuvp|_R&W5le)7ilAXm>N$v!onS>gIK#Wmh`-8MS z)|PWD9m7h%VQ0UB`hUBnt7QyDEG1Df$SxQjee)AHJZZ&H5ekyM_$u@ge?Jc{4RZ#Z z@QQ=fO@cF^vn2^85~PDA%iyiTixEsbXM?}_%1sw7Z3J>|t(BBPGB}E~{|QM-Swjfn zyd@|8>9c?KM>btzr5U|M3Axu1HQJ;s)$5&=iLqPz>o3I|mSQ5$9lqxgpZv?~K6QxG zEi;@TdF6G$-S=9v>NCFKqtg*3QxoFKVoVDqM0%i>v@?PZK z(2dfnQ&K{Yr3^I31GL#f#o}`HNPy8UevXbLDw!-Ze81(UCcEc9Tf*486`1 zGGQf>B>4YFk^0)MJ3jxwO-`l5(z^`Q?^{#bXy^6tXT2}V%n(RAnvVi|yzzlsw(l!W zFqFw_(YZ&N&CEap39w6+HEG7MkT4eAzpn@v778goZQ1B^@bH)fD#*)u0O;DPE00(t7YjA_*1Xq`Vmr1GQF%e*SUmpS*IUFgt3KTJ+;FEQx}E zNZovJ@|VB0t5pmKj{sO16lLjpa0W=FMM#ueXsnu^{IRn}UbuPHk|G8-(9ntUX$<I>cM{YRqU}*@jk`90fbr}eNDAFX5){7%a{i;I(Or7m3mUNa% zgaGE&YF+EgF3Fh{V69k$;`wXN{*5bNGGc{^qOms1$BI5YAIxmCb>7I_nclPTagX#! zYgxQ?y!EmB4~{hp5Rd@^1Pbh&fUtK-gyfvn9lab;>!IZC-c2_uOw*f=QYufg$=|+w z(^FRt6?Eii;f2<|FmT#l`B5$aAk?*F4+@wjTP7MUz|L<>{nnFKzx34Q%Q)o-KseSD z+x1U%8jCk{-#;qsH#Hoy=hajq}Fqy_Qd z>OChd1%QyE02*jy5Msv?7z3f{Ml9S%%Bkm{(tO!z8&4=?W>jd%U=gzXm{eaf(PXKg z*nDh(`P_V&NVg1SoLhp63`?6Mzy0JFZ``ux1Gj&95*`r<-MNXWAYPae$AJ`=nw z0kzQX9!_joxGHzAE;<2dz}ceSeZ!|NUc33^=8Do_0?E8zO|P?dG?C2;Q+sXz>m(n0 z$%`Sf1)7P@5)j=`Bh@HlFrV)c zla$vl&kH&~DZ=1O$ycBM%wN0Yc}q((&xwE;^SIj7@ny+mPMvL(oTNFz`V$nU=q~NV z*x%gz#VH8vECrg`vFi(ziv!5xljMWoyP@|zc8Q@00Hynbt@s;XeefYJfja@#bdKx} zn%1j8k-s4=Kdp!^38Vm|&Pa(=WvQ6i#pohB6u@X6f=@ngb$HFi>y{#P$EkO~sz&DB zHWozQpKb7$fKq<-{_!_nw^ue~?WktLArLB@Wr4!whJkY8@>P1xxhJg~D9kK#;>o`P z^MQ^xCBtf&u#tA?|$neV^*qQ;VeU8D$_$PX;O~mIRqF4(j>0{ zYn>oKA?>o6VV0SbMF${fW*DvAyV@Zj<%e-7Hs1BrpSfbq+06h7kq8bL>obz!BnxOK z$matfS!C|ZBVc6s;DV_U62Y$oKYI5g+kGeogmxGgGpH4_2Rg1sGZXqDsjGm4nPg=3 z6VvNvoyT7d0aP3;Ctq+UvRoQZ|M7?(08u-VdCzQiBx8#Cx!Zd0YTa z*qe801r%T8lenhrERSR&X~BCgfPkb3+;M2~i`x!1xFVt@27-`4rd{)NDb&_S=JcJe z!bBBQ8^+m4TdY88BuD@ynNi)bFr8%(22{TMjKLS5wt7efnb%ga4S1Vz);hMRtr?bp z6b@Ph13?U`7(H)j;+5xaIB&^_SrnLdHgESLd8A_9`$_AO-&7v6?Kv6^2yV|~IMrNtdj>D`bSQIpnXc=krMMj->Pv zI^vBJ+dh8RH(q+yQN6`3#{9-i29op--g?!Ju>&m#fLKGUI=U(E$4Gad6@qy$$S~>6 zkFoApj*{hP9z68!I}eq#5(Zg`C{;|=C6NI@i3l}eQz06v!+{{y0mTW3)-@NV+R9A> zQBm63q|Fhr^Xdl(pyr|LNxDimnnNrlTN)>R;?$9soVK!QW+>)>=X&hQ{*owwD50x% zkG=7RgJvcmKrR9tKx+dhUVXuuC$C)^ELfkX)%+qq9x2auOF4iWcHjT&U;T&OoNO@$ z1ZOam$%8~^ntT2U)e;n@_$hU8T$d^?$DGcmda2bh0{r>wF8IACzHAsxV-zfTHlG}2 z;oq`6kWn$`6>u~HTmZ6i=M?|#zCF%Cp&f6PX5VG|P6=S}mSiQpXlFwdJ3)OyB)tzE zvC<>3F}J?+R+&hN0k%Zti#IQS>FG-wXaqs*BhB`*;0g%0krzDR7ETFj^F!Lv%K7lOqgPX6^-9B;mo*m_Ii{@YlfUu-Z5k+a! zYr?om`QQ|C=6SP=-iDI$XhIpB6K@|m)IGVTEYey7YQ6mQ!Iz%CtU(hgI^b=@T%U)v za+H!PTQ3w$P;|ysapGrBE`Ia!(>D(f$c(gTWiLB)PU&}a(|oA-^198EFiFxC%Qyd@ zr~mXx8a7v?*D#3Yo=;}B5t?a0WnRp$y;)v;xKz>7Dp9byPTpny>ELvjH;b{1?k0|X1G zVICBN;yE_{8;{@c<7cc}rf88xgacGSj=IiTA8hwH4-&qA$1;@~7^?eSf*47lTpP!K|H&txG1&O$2OjyrovpG_1O~xPo&61zM>UZW zJ!4Jn6B0EJU?e06&cq^50VW4Pn%Y=YRbGx@LX#6ev9bAzb5Eqa!A38~-s4jFlS*@6 z-97rY@9Z0-U%9IInX^})&>S3)KnE@DY5#ivyr=wsDf567(T1uj70q?k&fDL3^#}Jg z+{`F<13cB0s~sQE)5`4VqBA3YnD9hL7drAN7bS;55J0*}642q*0kTJSOq!ortbX5f zUca&!qDKUSj8G4FPz&a3mrwBJ?$&>s7SI|_QgBC*JSyI>fBgDKrb@}dw8N|xoe)GI zpwzN*>c3yS_S~VOQM8_a%G%S1ry>I&%Otz#BGhD}gmTE~?I5q}0~i1r$Qk933ML!C z7 zo{3clgG}U;*Pr!USN_;C4~k_3q|m=2OX|g~nSgbBB$L5SQt!#{L|D|llY2jL@Ac80 zG(iT>M~@%n=S7mm93l_voNiuIMCDs(f(uaGtKuVf?mSQ|p}PpAEL*D9ix6U%Dx{Ze zUiyr+OATIlSVjM_v)4R))1HaO2n8B5OdM5widiQ>V3#^dG~KYstF{rUj0kQtGXp>} z!YoXN$mg!c&z`@wNx=+=l4c9-g5!nK(5-Vu27l|ylQ$0s!3vb1i6F@y4&!|=?#v^{ zKaqy&P;G&t!Jd86MPudEJ8t~Mq~wC5&Tu)0`AuI#Asz5uVi-k(AV8!M6&JEPz(YoW z3*dBNMX7(f?W(75K7Vsq4pI=nJW9fXd(ET`sQ1e<5A?WKFUVP;>WssjK1si3$G-0# z3`A*wplj8zz3j}Qn)nc2#te0wXr@tXVY^I=I42+)$?emc}JcZDLb zb(W3)?vqYlLohU|Lt>2j_Sn?#U9)|xF$5X_h)Qt7XZYijkU$VHCAohk*4>t!Km~CQ zz$A%6hGc+(%;hSm3mON0=kn9m4urr0H9GFS{(DTgT3@T(03H@a_&zM9xf6LAeUj7f z_~ys{b^8smphc50$Q=fxRJ~%JQJU8qqq>c)2GE<%cqCGyz}oI2#ok?hNv>$w-~7Pq z&t9^QE*Pr^Gufe!-h2SFaFSg(%9>ehium$F2OpR!+Cgoa>8t7mRf4gBsgr!{S1#MM zx&XsKx*MB?$1NFr+WG;woB%=6luMULQdx{f0cj+e6C%7?yUi}PSv6RdFknE z1{=o4JXmXvD@tdl?u56<;`^t(zB?EP8GZngKyAO_iswIh`RR=o0YHjGM-sZGx$YIC zSE@gb3=tsbfXR{Gw$I8BZu#nzw<0@a5+r%_IAQ0@y{TO%{*eboR9m-@U6GLjYW))z%?1lCq@EI`m#7>q-(Q#Tx&Y zY8_NnU@!iVBDd|?b>+sh)(tMtdkW+OLVM49TD!kr!3b?K^hWb%(e5+l<|O!)hYmbk z4mhC0G_zp}25Myb&`+JU^zkc46i5=rwBA4uYMj*63r|^Y(dlq0n(D$udL_?dVjHJY zfB+{zs5gI-Xa&G!twXQ5V8z8NhG;O9&Vb{W>h4l~6?L3apMLc7ozc@xA`M|(vF!Jr z@{)}lLUftcYUh#ZGC6Ct9+^K_JSu_^!2`|s!7W!$DrOdHCKK7wa}HpPaaJdw{BY;j z#}PaP2vKg4*w)hj`ucrar`0} zL8%=~%S_@IlATAscD8c}q$#<=xTL(MPc?|?$S@-%BxOP| z9YSHaa(Lq9=dKwfNOlR24n$}I%NzDn=dEv)Qjcf;uHXf zKf>^z_wJdpK|+KfAyOqEs1aM6Z2C2qp1RZuAarX(TR)Eh=#sSqSFT_grlMJ-oT}$i z=yrINQ2+(H@{B|ExdG%7eb`%PG{;|a$w|us$t;ZRaZ0n*R@c#@NY<5dN3{|4F()Q8 zA51C;42>6@{`eo;e7Q+e!37Uedb7KCO#UvZqsRq`Bm@GfE=jCDv(CVpXGe9nREQ8_ z16S?1?YpDf9V7^LjTy>apMQ;Lo@{Oh>RF+49Py>*BA^m}&DOmSPdA!!p$HNZlB;F% zICTQbUwquUjfKICQda{YO97w>SO&iI{1aET#%OXtfNB^I-DC%G?wn4bQ_|ErmcMZE z`jd*n(EylKrMZnfj$cZ9dd&4beU*;Cb67MOILyW`Ui!SpFJ0ddoO1SuB(JtcR+)SN zS<7Yh(_9&=TUodRm2hZB;fE~GDb+6+p3<2_1sH7fG)^39k2JV;>;@GQWBiI zy!GqXY`<%|NXFEv?*bw)QXc%33)lX@hGijy)GIubE@TzFl1zQ+ES z;<47FJk4(!xV*Q>-Q&Ao|GD>UwNVHTR)I&5NlNwatT)W-r(OaSU4SGQg`UU+fD}Pb zSdFK1{pgWTuBxX?%kZA(zV70s8&QLK&nKFG%)s08W!i!T@wch|{ZTvMfHQvkk@1Ho zz1tW?j{|Y?$2JW=eZ!KBO}}GxL1CE}KYZqfQ*bD-;Uw6KB+2>_HHZ2wuRe0oMS+4< zk_eGD+gcpp_zN~Rp0av*^@c2rdLch|Swe>Tiwm8Py6VN|YoCwm=Cxn>@{eG>`4Uba zJmG&_{zHRP5HtFaT#Bw7M|1U~Hpw&A{1}d!fAqGmw_+<=PRZ5>9_oC7OOu?52Aa1k z=5hP-F1;j=q)g=Tpzyoj+8` z1;_;!|NHkz=)acC^?9msVO^f)RnmWqN3t%o#W?+roBs1d4}3GG(X$9MfInGpn5zp6 zT!ayok9AUek0*e3y^&<^DX^%tZ9GBE4?OR6rwpyOHgnH#m`r3q{d+dJeJ*MD>+@1b znuEc9B6q70HTBv zE(LR~!GYg?(#DI1ha17^g)C`a-a6mr(JndV(k)}tzxlPTW5oy&fCxg8*k091!E-fr=jlb&jp&vP8^&lIN0QSM7j|WDauXk}DTBuV<-bRuiMla)c4()jT z7yoK^m?8iKK&-5t&dwSkp(D}`$tut?qNiyhU?2cXBEB^5Z z-mo@|&`3cBf_drwv6QGXXOW`%AwSK%sO081*hIk%gwuJ?c&^f?rckg|U^O%dX z%8p-m`(?hI2oEx068x(z+di?2f?O^U0AUh@ShOZzdDfB_oqFOBTLuHnM5i(izmhV` z*X7$vz45BM9wbNkVX+lXTSy@ean+iJ$1wkMg@a3`%tfW z*e7~g%JavFNj{caPJLC+??}#nzf$Tk6u+Oka{BI4GJ$m9pr(KKDgX>cdR4LjN;11>VBKFn`_(JeP+) zc_zbYkL-GIcRPA?zWEwuB3uzWrlzjhI$@BjfS4=BzJ#Q50p%}-#o?8O1;M15GD%pyGD22u=73`nBg-SY4|aZ z=8;sz0rZ72@Tf|p{8K$2Q9)svt$(_6?+3TZ-J={NP!@4$x|mubUeKKUwI`mkIt0Lh z&~ZC$k0>NC+5aVWeiH})qwa6M=I-lS%jt2NCb&e%Bms0Pbu;KlX?_BeMS>`$VgT{- zk@4TT^0Z;VXvYwEfTTqj{Y9F~Ny}Ri_Obh}d&{-|5*q_)kBc|KN8#TU&nrnKM$f z+LX_jnwQGcUn4!54mz&hIp!{agh_w~V^IzZ8!`F|mz}gah#**W8wK-7kMIb@_S`F% zEeH;9gm~En8bL*4>h-nCOMI1Ych9jXQ*3%RHv9eVK@D>p8cj84rt zP~|ZQQ%ZlGkBz5WT}L+Us4_I4ThY7+%_a0$_AXE9o#}!hf`u@^;(uQ8!WChl1qVUz zY-_ETqx1{ll8)Tk$S>@A2PIS%i1ETdb?0@_qq+B@(y5P9EsS7ZHj$ zUZkpo>Q|1e2+uBbWCycbrs$N8>@eng*T2Y|ucddkD`7jQFv=HV@SeZp;y?x`i@AYEuUe*1ARc+sg>1XxE{Gf5e{>3k#>&g*jQv2i%f|2LOrOOW71 z%k;%<2M$_jfl@G)kcNdPEX6C%-#CCEW)Zv@;EZ1G{`afK{_e9wvgJDt-Tn3tghLY$ z9qJ{^pb^jCu=?yJqv+v8mcb1TUKE;s{-$MTE*YvN$s?deaQP#yyI-^l%-?b%Ip<}z z#ELgO;fIz_g=mi{k}JtRv06#if@}rq)A!yq9b3J~`x!>@m@T;?zXjEQ?PgQ%2}6UN z)$3FRb(GK{x36yZ}RkF2=tf_V1&hSg@G-c!clcHxTVbJh*~+au{ww1-H=q_e|u@?{rpSZkC(A|V3F zv9uE{rn+6E9=3sFX&xJFyz1g-e&yj?c6eEJf&DPPI)_RBdS;aRQ+Uutx#$5tQK$cfF~>;Y*_sD3H_yQgAURx78l(Nu4O_o(08)bvGA?b6|IbTSf8UAA z2F&VJkW^yBEj{}79e2Lw50-pqYY{+FM06)eXy4P$KmXmog^^}p5J_}BrZ^D1oF;cn zm9M+%f$d>w)9DtdXaFE1NL5h<4pmJOU@$P!?I}yf-uOMoe-eO!vr^)dTx3{_l=t;M zhGy9%K&8rmx#OGv=hjb0(j+&fr};EejEAoi5Gat6oB&cjW*tS$G%avdayA)>=i6To zOAd}mJB>^K?s;!m(HJDn#9r7b$vI@>=wEed=#MfU6#a_{B0-38=XAVoPn4P_ceaM( zTf+p;5A08KQ|Xd#lI4SeIStsnlJq5%fcv&Th% zg}7>9;H77-9BfUyAt(|6K`PTt3MEjwR`~c!&RsVG5Dd_wbi7E%`avq6G$P+5Cqy<7 ze(=moFBn-L4Nh10h+n}42VL;qvr6OvbdOY2wjcmf+}=9$?Hza1Wg?I6Ox?wdAzQ6hRl~287 zWnitb2(n5P(61gmy-uoK_^cz&2suUAWZ|EF;D)DdJZD%1O%PM=H!DQ<{Db-ZHN(0q zvTly9{ag9fraq!H<>Z9IHy$2sg=Vy3fPUd%`Reme95NbPRHG}@H9(-0_Wt|V9u$0FwoV5DW9dmQA$o{=@faYTP_}Cal5bgd2dk9Qeua#RgAL1+UDZfACoqCs#IZrXY8BU7W1kdzPE>*rXIzcW);Up9ce4ogxVEU4r5+vOfF z!CObCfBBk+4ulm=wbqBxH(jy$%pq@tB7g)0OVunOgi863gV%3);I;qfgwYm&2uYEy zwznlBQB7g1sp2(1y6H7PUL;n4jHh#yPdNYRrhBg1GmOH7Xb6!`rMDR>k~t?|zNYcY z3)hX9^=RxRxu`2&v~3&{nFIPSEk^(D{)hhCm)<)OO2Imt31zI_ah7P4BPFX`{v%ZB z{Xt5qyV)7p#gyXcL?TimP#Ko1HaANA&x@b`ic_yNqrm#UD5?M8p8f+kivDfi>0dH; z*pW0lEe8qlo7?tJhvmV_<>kbGJAd6-LnuOERa*Lx5p4@BrHKc(KlJN=z3PyQh!BY? zg@iQI1FTu{;QK#4_T-C~U3dmzdGKlgK^QFf6X&0@<;!>PY_4hw5z$l#-Z7}erS%&7 zfiuq-gjY;|k@YWXV-`2d>TdwfT)F-SH(dDfoj1pB$Y^x+2VRLTQ1&$oa9Vg0L-Z(lgIpC5@-p!Om zvVhJSupd8b<a>YvYQO1`Arz%TlPuaFLrV(y_h6oJvhE&f+IHd{cHWlHlNT8FqV=av17m&A@{P?y6YVuT6*JsP4{Vd6|9v(n4trR zOYYu&&!2y!)tatgf--$G3c-Hx^mS(p#~5fZH&04#0|HT?R%7&s&R9D@Lgiu%>LM*} z8+9UN;u%A0o^#4Y?%pLIfCEl~tghF-JhmN?P^U6rvWQnbxJ4uumhxz(?%qGrL+0F+ zF$tv6868<(34P$UNB(2i^tsLQS1;eVELanAfedMO9=0*QOO-@n)A7E)`PlS({_il1 z(My8>svTWIhad+50co=>${eE&obVUFy85XXsxV{dCGq_=yAQtYJ9{TNIPK1){n1c3 z05lO_vJpRf?uG^mKm=zy+KcLiMLL!=`*{g8e^b$A4aCi zPzsPB8kw33Tm%dxK#(8=dAnR8L@a+`_r344%6w zGFdAoVLOu-yB01sPXPMMk^C|nMby+{_Bfljm zp}A!U_|Db?KYH$(b5{+K4Z%f%B$!TQ#r?}xEbrg;;BUTX_5Nu&ONy@G@T7K0l-l7V zB)|&tA20`jmMIAFKfbpAbKmF@EtM!Xjqpb|E`i37P$@+Eh{|VeUI_(&g<5pY!H0K| z=`Yez4!No}rQb6GP5p5vDS{Mn=J1*)Y&wf_g5BzZ3jmZ$ATx(!N~ZlvD4&lZl933| z*S6g;?NI;#Wula`pvd!=K8P{KsssH*ngMgsyLB#HaBAA)7w>)enWvvJ#3aFQZOs4{ zA&X^e_kr7g>76USac`h0Pz7LgN)!PCCZnJv-+us;V(IRZBt@YK?L2YhoPU1X;7KQ1 z5CK_X=z?qt0P&HQe)&sx-)GBXXi)`Rk!&bFe^vR57jGW1+z5AcN?TMnFVZm=$B@o! zzU2Jl(Ocixd+-1J{NGLtv6M(72^qWXLun4-q z$SIlNogNo}NKuZliHF|)Pb1&B(+nO2${~kHvbU^2sgG))$+d3##Jz9*fF>qg63x8( z(GpH@h4E$Qu0lEPYCz_MMJ;VjKKry2hv4W!X4X3ziKInO`9DZX}YkI8vMe|JO2GE0!4G{x|umt0!F{`q@|Bvp=K=QauAG{tPW=^8-RIc3uK!f zby<*H%xAJl3t+14dr{TF-SS}pksM_6N6&py5v!Ljmnph#0VbINACo64ehEM~q0qDa ztv3u+crgiMG+wjwjwzG`Do^xY(ofFckuDt*%UiVbTS_`%IN~Ij3c3g>ymv>4q`FHh&LVoDu!Qy#=dB<12_V8b z;8Q$@c5gbce83IBk133V9H%sJNdN=X1iVZ@NBF3RA z$iS0ltvi?~VR8X95N$+gu|lOmGvW`geCcyGoNov)=947WFXmCy^7N>QTdbcPG<~>R z^{{Ainle!WTo3NL?>GN?)y}c9VIpA$k{|(+d6t@nlZ%JH(+Cm<5CTMzap;?OJoL|> z7bHLg1naVhNiLKB&^afr*VKql{qU))hUqdRLoZU#9L`~jenj&v!%A9|-!s+%%oouq zBc5^kMJHg$q>`d?8O3L{GZ|Q^jOw22bJMnW^L2FN*LU9CqJufq9BSJ=!Pyw z2!s)koP^{es^<#F$&D2seEu6&Xh6X-=n0bQKV_0!_W$Bo|Ldb}dGE!Gwti@sh43q( zRo?sO9~r*tcK0%n9sn~`i$l4SX8Mx|mt;(D7IC;JQ-lpfEE}D?`z`M)$EM^GA$E1< zjpCq+7j0hi(`TR9w31dBKzfG7)L)oID{lW8loqY!3(`%JN9sIk66A#y&pqq%;4B;< z^XBE=V{2If$V*5y^Dg$?pAH4U#BQ0hoqF3YmY%KxZm~g-O0D6a1P4CQ# z4EYlSKtTcwuH7H~?7lbsU9%+RAjRCAOWn*2-uZfE_8~H@p}OMoE?B zpf6+(lHD@0E&9tMe2L*9Dbm;UfXBC>cTco$;-~G8|5y+n^iuqeE zk~03Fxh8fbN)W*)jb8Viy?^xKAwh7M@zM8H&Wd}oZ3QCW1OzjKYybN{edx*ymR@#F z85qEBimZMNjMbW0R8}mYVL5aCm{z9cnR4$EeV#-0ezC{bWSGSdZhqX2H$EVfvfd(H zyIFt5kkHKQQZ|LHlz>sE1a*#lB4xOF*Msq_*a(gGwO%WCOnD3v(3` zAYtn6hwu6Mx2)KDFv2UvJ(XYUhBn1ywWY~h0%}P(iOT9rz+48~dCIEuKKQ1_$|V*G z002>Yut%3CcTex{{{^1!%^zsQF5}*5?+LcPq+|WJZ#$ zv-MKiqvolyqbZom=bcEeY*_IukfO zeg9wlD?f8n0ZMZD?B$?-PYI$*)5;0o`j!ut5ARj$$ zbofJ8{b*Kygdj|@!4tL~y6bHpz{C`y2hGl&lTW44>sFK{^hf!+=9N{cJIyQWSB;Kf z6&b+f`rGu__by*){pS3$bwubn^RLub+r*`8K7GTv1wcY@LXvV1vU8KFmgoF6>i5*f z&E2ajg3>ONtWQ0FE&H||@M%bb0Osc|9lbtRW=0KT^`GZlO&&X2(j0bvNioLJZ{D%% zE&n(&?FL06zb;A4fgWwU5&$aIt&igWPU-^~1=Ip2c{JGy3LNfr$GQ;$zJ|LTN^CihhW?cC5jmrbcGNC+% zUwWNtPbP^NwSD&;zxwBE?mr+Txq16eia*E4gJ8~rG&F)O+c$alJ3eTwR*Mi}>gtW8 zo}&#Hx&LC8f}@Qx?`$_caGf%`?7Rjd#cXKMzHj6C5E`zA*S4ADZzkUszzmQ94->kn zwUaBzAOdycjd$$X8lfOKkyrZ9hjFN0PEnu2`sR}kYh5VF`vBAxZOZ#xLv6Qzk5iL( z{odcKxaA>)$~O}dNX|-RGoiBHI6GHuT}1NqVw6Y`k*Nz2(m zB(Guu@+UbdR;q-yG-z-kp{C=vE_>0Bp7MALMyUs`*jIQ^+J%m@W=Bw_=+V=bWTpgx zQXKo*clN&ZA4f_8@L&b3WBNBEF@NNui;@9s8zv8P!Eq&Jy5MJ=^J=;W@ zt2P?>yvIi#2SQRIskVX7PI)aU&(t#abeIFFb@#&$zVW>)50+}Pc}X;)B4t#%J$WP4 zVV;~!?h^$HJY1U$gyc*%fC@*@eS~)4z5g+O`MJXvo>8DD`$QfIwVQmjD6uZmf)Ghb zz03P~yGx#zP{#8S1v6*~}R+r!V@RRA+3tLjTXYW0~{_f>F4ly)Zfg}xd>%;2Mqb5|S zJ}wb3Fn|UIkpP4M8VDC$ro#_K!cmSKoVfeV@8_XOha3_iLed-nxzbr!Oarh;M<=E7 z{$qK}611|hWjqc_)gA>Z%8^KdJ$_ANdq3*|H!n;(#u(m)tsBuR~$fI zd!7N=s7u_LY(bFHRHZ3quz_j+hiAX`oMk5&XOVqJf zh!@q?i*)RQ6!kAhr{%~Pz{jmWjjaec;DD1#p2~)HGgDWuh@l-}V>+_9dK&~sVGr!z zC8)Z20khy0v%*R47XFv{-lLzpwWfD|@~X+d{KS%$Vqt~g!2|)AttLHhNq460C(;`P zkR8u4E`gMbBuKab!T zpXGi3GHAiQh;I2dNH}lRhUIGP1thP(?+(=(Q!Uf#Hn6lj3t0zfL4e?~|* z36g-K>7M<&bh0YZHjf-UulZW6k6@Dw*46BYiu-Q5?V-2*!?IW?AX2d)AsA@{1gIaL zNV3n=W&{CPJ%Is0z|CZYAo-dH_TBx*AC6@Sut&hVMJ@j#{a45?K(c>LgMn^XUJRbP zVlCi+0Ff}Ww2|E+dnC-w>Q)nZGZp1YPThZCS5(aT3q4jucb}V0Id2ch=ctlY5{-!I zfTHxNZ4ch|%D1n+Z@-&hsfxyXOdOxTm}y4!#X+{ccMB--HBChfMhJiinE*-v2p0vq z!%;Z)4~Er${JImK^Mnu#X66vN=$N=j$4vl4|F|@H;WTSOoK`F*>F>Pu z=~e`X2$*U(KPidr+D+(0OfRQ=AeHgc`%Tr3We8Q8ztlEhFbE(dDWG9Gqq*t>&wj%I zivolI%bf%IpQenW$@wqJkG>bv&^kXi47c}@qI9@Z*J#H+?+ zriASX1{zzYCDYpW2Oo+%_PCtUOIvhHU!-GIr=Nc+@3$^#&hKD1{~U*jGT{6*CtHjn zObCFB0tByExH{INzM`og&C=G8)I5mc107*UwC7sYoz_dUBQ*Gn| z7#Z7=C0X^}IeWeT_ar*|+0UGrtuuFYCELt;#`4@V=j?XQS!?Z8-nDht&V7i00D%IO z&Of~Sxki3X0u5gP-zeN{CkUy(u=#a=we~aDSQYw|CtaXeKXTlml5B~=Li-RXP=r{{ zbI!B|iAE=kH2L};?7aGQe^t--1rYFY;vc4_p()+Qfg(tfY`s|pc3JABtwa{bV@4Wr zk7G_8#=Jnl$zAH- zuzS1Uf&fWAs?ABHLVkCcQ9e6cLdjFM#sJ>$fB!H4xq9DcEQAQb3S7Qx z#QgykqMJta=!SlPsPN%0-~9KVhO35x7J2igGGIzer~NJNO&PPQOnH|ueh`EV5P(f{ ztB+r^F<1cMjnYOVO{I~v#?T>#rK{i+ls?Ixa9Ec7$CHUTNUAyIFZ(FrbFu3GdRu{?c z#Yi0x;XrKiZzJH)7<{(9+af@MLuN-FtSNycNEy~_-M#s>Z`b|>!3rH3VFZwyfW{aV8&y0TLAfGpqLE?`aL56rAr%36+AH8x zghH@omw)?)zy3E#?%qCg0BB_j6FsHFhG$o1G=-?Swkf&osvRmY3WcP)E3NsC6s@z&i}{??x_yJ|}`H!}s2gi(M`_Iw*a7hTkl-n+*&718{eub76;6cXTo5N5%x zZ~DOAuWwG5OyYu@LIX|dFjJY8Na~IhMAF^MA@gHZfj(*V#yRVyBuf^}qHl|xy7p_X`{ov?a+C0G;D zL6G1CnM~vlAj|U}2n-VzrViWNFw^BR#nxLVbt}zVq_0 z-28_3E}M^ml;kdwicOE?s7rMKNKj)MFB1}CoqcYP^w}?EX0ViaC8?y zfG!cRDoEJ>?vHP~{L6~bg;5@e)K_~qHq8u}mXSHkte<4}YZn0lnUufs_P6}xpR{6q zh*BNpViE(xUrew?>4fr*M9modzo{dL+t3OhAPfK*4L9uFPEe$zQG&C(`&}6^wVA$* z@U>VwHDG+lQF$~lhk!j-egC?b{`tDyHHo(6XS^!-5`iS`nNqafjRpxyGC;}l!QutX$pQ)6a1Mou_> z?FKE>5C}jR*a4872zi&yXJTxO)V>G^Ap{w3x^+8Y!ogX*q=|eF4WWz$lry*_dYupqv{ye1y`BLysyL)}4bQ1FU`k6sT}w|pU;qRSpkU@ux}E~_5^yvvCA^<7(j@t^;2p*KSdFw&%!-8`F-ry~xg9IkPqeKoAN(5@+sjNQ@XJ0HjF%^2rxJ_uNNSgpHEk3+Rs>(5r zle3#3`Tm8gU-}nozI=T&g%Cs#P&4`fNIq6tFI$Fsb`Z;jIGiqv$VC9U$(t8IHCaXN z``~}xa{tdC_0Wr9Ap$0W>{3niwwO1x#5A4qZEcG{K?De34$YEZ2*)-Y+Al3e~S0fvh zcR^)ZAT%x`DwM))|5}wz+ayQvN=c)B&HJe9g=^pNp5BMA2!W9j9b{ggjcB3pkNaAx zv^wtI1^T0TfIz~ff#m0WYU90BY<8v&N~`<2{`cRx{H`a>9e*c_=0SlR?A+#weS?s7$H8G6UATEAm{o0=?AmGFg>zSAsYAZqc)GP;2dx^AiER-0-T&9~M2 zqSPZG76Pl{;a0j)v+$IM$v3tco- zb%*xOcM{A-Q&{va1)${-55{v~cw#)#>s|QeSu{&0EGNsA;yFUN5YoGc18+X)^Fe6A245mT3go9#pi!C}R&ao#%wM=^qqyXZL}aMVmQ?q z_oF8+G4gy@LKCvIH6dV~ zLPe6Kj3g)mMiQV`1^|SdOqLB2xvj)Nr(CdVk!9)2kWvH`>&-v7`bBR(`qtP3V?~%j zDNO?t5)1D}+j){3i`e|i;eG{4T|CmMYm$)KNeLRQPc5r?^Pj$d?FHv7JN0;J%HOAS zTgf|Hn`TeAMU`V{HpixTg7(Yx%LPptb%HMrKZ6CI9 zv+w!!FZ@k(n#X=2HG_%xLy2Hs<&x{B?k8+0ca4cu|j8Rz`z3$&!f9WJ zye~l zNPvi-Icf?Kf}nKmVD(}EN}>>OMuPz~ux12kjdz3sK}Z5=gaCq6n!C$kc9O6B;|D*b zl61&x>iWL<&9D8d^vytb2^jDMv?aeKKTr092HNkq zxoH!bNh4+qcjVHg8JSC_-e<4;{@;A$*k67G05dE1+un8@X7!NdVOCU09_DiA6Cfc^ z%GS$j)+9Z40buBAGIr2nq*&R-N1ySi6Yuf2*L+46K!7REogme)odM8Bk_{6FvlmbT z(ZqccOp!48D=VmRG}8=#rTR^P$bngpJ;(0pUCbYkvNtZ{Y3=Mbn7IUm2nPw~e5{FS zjQ~gxq>#vAgvmv)qCtWHhzw1wT8f@I&yy%&fUT{2uek4%PMtfN?tW<8k02-r>Mwl% zFW%k%KmS}g40OwqEZmDYf33$wKqBK%e=L=U>p)TB6Y`zFQdvxF1ZU^eAn|n z_~)DV-Q*P!%s?4w6-hY(fZpCE4?0)r2L~Z`X5UA|%abFv&RJDg8xCShhJylgNfDrw zos*A8t@CM3RWKd9;je5b|AN$0UE@FU{ zvEI680$Y^1P|x4|Z=c!urhi#cGr(dfAU#bKWlwT6#s%8Y%K5*eQq?ggbp$nRw_gVY z(cO>U>DRsTZT&raAooJ}!H;lyEi+o8_J4G+n94XdY^dW+Io6gx|2~gird|+D=m!F! z4D~$DBS(M|B;;>;M|h(0pOVU`pB+_!;yFqm0Xp#SkhzqJ>65x^9Z$dq#sk!zR}evl zXcqbS6E1k{$>+-;s4C!ecb_oHXin8Tzq$FQ-}$Tcd$kZqg2PUi7M0piayzHl!ptZE zl$5MMZBd!Q(jHtrOdV;&MB4Jk-2p%??+B2hkLFEP?JK_ay&t^gA5VDp6H!$Rmfp{T z^Ev;ithP*O6)7c^EPa{&j((BJ`zpt_ErB0pX=hZ1-XuABk0gU5DBNSiNxyRT1K+&) zW%?sYRdi&nK z0Y(wx(kN?XEkl!5+%2n1lN*{vNSLVwVb-=0~n_caOO>x3DFn?-?`(P&-OMCAz(1`GferE~K3-37oL zAa?=Xxzd-Vw#DC(BI5IT-BAtw-50jp&d?gUSM9#>*&l!F zmaqp-!qTrM&y}A1s)6^KgyhWwOaRour$%K30W~6S+f=CH*BrEDw8{GwZQx7_6vUEn z9hFZ|wuDpYyz4E8;OECZMASe8W}>eO%inyzUpRN&@xcNbvMyzPM%@DukqXg$|Ng80 z@NbrX@>??YKxD-1OLsxo5dn3eoSTeJ{Ld%nmF=yWNad1w;y5Vjz!a@`JKGB*uyU-q z^2RN1{D2nb`%s8*stpX=-AHnzZ1J~84VoF@jzUW?qmsUP`tnUryZdExH7kk~;G{@N zDhS#R8txLHk8*oQy1SF?O4^t{RExKP)j!A8QH$L*j^S^Zc zRsXW>8?;e=J^&)efH2E7_jY1UxdnGG_;n05H@K z5gZ@`_^W3==wZj5YZeA1SXJH(mhYAaR1vp)>Z{vc|DM$m6*S*73Mg!U7M;+Pz86}{ z`yB{7OJjy0(3s9yG_@TvdB_3;@Zj^R#M)r46$`%Qf4}prcb$$6N4d!gJ$6VGSPBQ} z5-8&ZJHwJ>v`OmH;O*3aCTS8w5XnXMMa{+Vtb0BDYajal_p~edYzel#nH^we#zP-a zV*N!ALD3gj@}DPjlh~vp2#LxZse2ajK}$lGdnDO52on&YIzI8v=l{%E_pR7#LXnDC zKxO$X&>VGuxm><)^EFpK<+U5H-wkFlFf2XpZJWMZ@uITf4#sy-32zgZNtEY#-aR+Y8f|%z5bF4Uc-SJzlMXnRf$lfmviZetKIZz});OL8fLw$@X+l3Z$)U1+sbc_07W%Rd-8tcej80U5 z{^4`m{{4%fD>XH(c{QcOMarnVwjm%06drWk8IL*fe2Z|&Gvn-7yZ>_C;O_OjCXe+h zbCU9|qP(8@ugqxiv5ToS!66ykP`OT7YwLNg5i-^6tz)11V5Q=uU;uw@57S*EhcIUHfmi zi7p5zx~$9)C*W-^}zMi9}Y&q-i zTSM55;Gd(UK1dKKw(X_`~??2dJaLAMUNR6oDz5F!o(+k z0=qX=XtPV>ldDH#S~bdwpb<3JGVvwnJ?1VOP9hBSp`&W$0hDpa1ORe*oWJq?pSb1a zZ(rpk*?pl$qiK3(jPk)aT!GnflA@*I!Btq(w(~uJ&IZ@ea=eaicK_sK5dfM1808`W zL@0m*K7jTv%Hac{J7y*cggF#!-qd^X&mRAzhr}xMAPrL}(&c3#I;{*dux`^iPS|D{ zq1ILJmEQ98kH6*HA3-2Vg3*vGvyOI67PUZoZRjv0P2j`#R{QO6BhMNShXh5cjK`PG zS-eQganisUcLSbz`U8IDE|(ots#$l#nNtytSnvGo*KdCHyH@wPBnl}leabe$@_=)h zxkQoCRAUt5pm^wh^OBj!nj!^^J`=I?jqlyP`PwQff^98eS}|rylh6`s-{hQCPd@vS zb5|WlSa8;a#@V$OvR=xn-eJy{W{W1+NlYfrrDS}e*ARYe{at?UtOs@(iFHb6034EJ zj(yi}x%wr4zG`obwuqb*PoGhXrR>xERCpk|4xkPFmKHAkHdYdf2?W^GYH%JHvqK_8 zxCBy=8nE1KW+2&qcIj{dLU66!x$nAHzSVc{mdApXa4~5$dGCjdre>=nKF99dGv7Ur z>u@qL2y4R3i!b@f)i%2jDAJr=(o07rY8CJy@?S^IrHuww;87#_k~C_}qk8P9+LzJT zl&HW21W1qwm~_|W$Gz~v$IaRdThwmf(3#9JzjODufAdWnuipvEmP+qn`be7aH7G+X zLrHj!Z^Fu?t~bG{$cWavU}>i56@wtt*?|7R`Z{*&7lRo>blkE5~l zX0{EJLwKU z(%vtB`=(dF^QgWF6?2d7(v!e0SfXVaUbvUzwb*u66XHl+fT?IDfW+(DIBtTOVIaN} z5>!Dw_xN|0@A$}n*+T7EM^n@Pl#Y0jfO3j&01SN4vFAME*t?OmV1NL1VZj30A8=&V zt@M9_-341&TOXCMmQ(qCTwHWeInOx>n3QB%DL(kPyFT^o2hYF=5-^7h1?uJla7KE4 z{S9A#%BzmqyuA)AAgC!DqU2Ok;M2P22V67&&Oy_Bgn|G8MhXQSp>iI8rw2xvtZSk1*eRJ~|sIzB|ef^L9!f~@}1fzsu#5`JE ztxVuH2@oT8-uj&vzUi2&cP#W;$TsDS1|t9s%D|45FDEzQrGE3JqXqs-QRZ^yNN_K-exNzPo!zCWnS4c~EqwXm!tPqmJW>-@f>X$5yK;33Z{< zF?|41y|C}QzxR&WFJIj^R4_N0b7Wo9?7PDW&`1pc@M0+W&;|=LAv>-z0f0rxQDUBH zBxZB~8lKcAG{;E@L%X92piqkG&x{Ohl<+9?(SD18SV+Q@bl-~?{0I)Aa>fh9)8r%GNNafDR;*r z>)y_&>AM5MuipJ3_t|)61%Uue3}f5(a;cKaT}r;=KR&;^~< z*2Ogt1XMDCc_!SP%E9JR{60Axro3gZFZck+%xBWlg0p5V&lAbd8DSGtp%!Q}YaKkB zj7D<+aHHG_8rX#YiW%N|2o=nBTzb|yfARv&&dS`Be%AoD$u!z$O1CM*A;}}`Yum4T z&d2_2XQ*rInOIw_WF3m%di&XKmBeDZk<3fiF1|q|G6GH#5PZN43jr-(qYW*|;)pH& zV6B(Yv+-LQUow?ThGaC+4MC8kO8&TG@A<+D9y4dXNun+d8O<9gMaF&KzV7=kdFxTP zMj2COgE8>xj37mbB^!65athz#ppR4ABtd{wQw?V5C-10?>DsY`TVaBUl3A=BQJ~yy zVhkaG1TgpcE4RG+!-AWPB*`g4=ag;-pc<4>cUyDkpFaCRbfb&zRoyay7J4ai5BqN_ z=hl`dRjv`u7L8T(tL^7VO&(lFBxwdftN=ND#&yBU6Q6tj!z-)U&DNj#DhYs|Dz+}ag9s)VBEb!!rju-G*ER)LrF%EX1{os2VS`TG>fX?P98Lz zq+o~#a+8R+0rV3MGyMxt!hkQNTos~STO(|@Gicc+jWmE}p&OVDQq+GCTv5;o)w-8l z^2F6)Sp`Cl0u@Hv765{4|NK?I`R1d(d4oeS5P}=Ku)KVI#RT~_cKC3bc9%}OPg{*` zT^#<&rNV^ydt+8S9{%X(`h&<6DkZlyd*aov`%CQI13=Q05l^W*9jS+bjWRil@WKlo zwIR&)Tm%c;zoT7V(xRdS6DGAvs7D&ZRo;C^u7)~rMMwt*>F5XQ=s5ebi=S}(+)-`; zLT#EvEu+W26G6ct^!Hu+NAKyq`NJ_F5KcEUSmVrZb>!`wjwbh2$uiyef#jl@07)VR zkxZ3>keTrkZcnY4EvN}-u5zSB*YFSp5&7ei;_(e|Kmd*Ip7{xYL!$4aq;UBPn36@=@{GybRLlqVzS^RU>tfA+N$jDNVHpPP?=Q|>X5tWUB zmK`t3-8N>CrSC+8FKJOdDhMyX$KxJx`~_AOSjyn>!U^UC0jE3S5C7@Y@h?6cpupCS zX2j;GgGq$xOTIrM2qE$1E-3)y;;oQrE)GZn^JWjd4%IUBtCOlCNTOQCqLfZ7L5v=I z?)|rXX!qy89_rfR7JZ8PFs0i|U4fqXsFTk7@ng;jk%S;XY*m0cc%FVMpo63VOtch^ zjipC0%~F~r2oz@eG9ZXgKKZ^6KIuGGI#e4$GZ@7hanBd8-0{l4Ufo9>=*(&Q+52AT zx>G&pC^)3+Zo20Af4Y3v{DO%PsMJAjit4aNgXI5B?bn9@Bm)c}Ac-2U;+3TGpF>wi zkR%aoXzA_dSzqM{UjcxD30wmy!d8F;7hN^oz3xTd__Z6Ze#3jJ{e4%=o1OY>r!)!t z=aPdA5|rJkTJvicKGFzEP&@k9D*>*%&TjaP zi=VW@Dlq~9-KiYi)Q{12?)vI8U$_49Yf({<41zmpAOJR5d4Yt$Q~--$17_e#D!d3f z2neE}API6pKmdglktw7+tPHnjq#8>Csdpt@K!frq-3*fli6o?H8#r{wAUO^SG;Smy zZzOYGb$}27GBled5=xpo;Q1nD7B*AH?j1}aP?k+69~vbW0l8cBTW7+?KYq^Y2j8oz zW)}=|&KPJDtBtwMbZP{jQgiDY06kw_5#M1sFV^Ob@i&WetvoG55h7Zg!^l9Ehq}<%8zRWE| znA7^GSy^AMz1UJd7DShK?u9e&&v8hQJWV5^b+jeeW_rQCy3QV%G1%J3in%z=o9ZM?UsxMK$ zg?4hu$e0kNxm2)fBg#7 zQBFA&BPFlHt17+m{J^Q^-czO!t{UI210$J?d_6?D+tu>dAfFChtV?zaX>`1#J5G_#b}UAbsO(?b`Rh*lh0`y2%jQqk zAgzh^#=!j*dFa6^2C&=<%1uX{%C=!DTaX+P-%GVvTmdkb0gt)UJ%8$qOJ`)V?2@dP z32SgM#t01XH%I^-4f!aexMxy@J}Cb|L2!|556) zWDWZ!at;?HlI~8@vuK(2<0z?hKib-B8}o+)HC{}R%c6FkcI=vGK3bJI4bd4LF4gmN z*FsbsRYyvi?|Z+7OM=|%SX;kOfI?l@sNFyi@xKpBGFtSAenj-(P#x>qjg)(=W9E5pe&danUr94$`5p z)Av93BY*z2eOnd+gQF(QL2^hmAoD*t6m(OHC{PQ}r-k8@$VMm}-fnFavTv(}IC@P; znnct>6@%+OYfgI2CBLwGX0C#PP>YKeEdDLoHe4!HW|lcmB#0!{ObG%Ia?YQ*R~5kK z9Kr;dS?Y|SAaZ0K1|wJnEt^pijf0PfkjSOfRC&uI4fhe1uNqc}vVa-wlH^A}>_T7G z3vMK7geVGR5~Jx_%N!Vx0U%(6;E>o{(x5-do2`dKloNm;cM6ao&`jY#PPrsHESMWj z5QIh1)Z#x)G@C?xj#3ed$T0?Bjb>hWpT|A>6K~z_`{7bxo3G_zJgpulll@#0FG&8S z<1SW`-N z&g14+y>Gj3nug`vrXpnS zc)JQoixBD#IS?YE`MWoN?hmf`CqXq0Etg(0;gTdvJxVcGdr6jYFh$QS@A)M*!vEyy zoIGnm4K$lYzv{K8E; z!rJ9zE(RSEK>|*tn!p0mj!G~Ckx(>Xb6uw^`2k6;oBeEb`@#vF`0Jr zUO>xAh8?`$vCF$F|Fg7)<$~{O@McKDyNE`UqU0o&_3eLM@Q8EP9Zyh{nsO}Q!P&i} zv4k|;{mAStg=h#-*Z$qd__DXdy>cg6Tf%Arpb@cL9Hri-T(2?Mhf$J&3HQ{pt7Mix z;z58wS6{v$u7B$b);{Q-Mk6^a;EaHilw#62CfsFrf7~Z54^V#4NiK=ry7WxHyB*^& zxXnr4VKOIL3e)hAyu!5(|K9#>PygVXx7Z#R48e^Q0naWUu1$L$9xBU^r)#NE(It`! z^|D%1EBF8iwbno1RY-B@^aEHEJ*UZk=EVCy_q@w`p+|M_@pgzGE9C(cu$-25L)y6c zmv35sYpmx)5i*z{7EJIZv^8Xf5+~xA#L&p!&g@>B|CP<{OhFapKFs# zcc*HG;0q?ZZ2h^DQ3ER~CG*%J$S zdg*&v+LtAwq^T=)N@cojTJQu(*su9lf9~)9Wy_c9o>`3;NN_|Wj)uhiWwD{sml_2i zsu5MmY^WfYVqph>b{vlK3>pnCj14n$6wOTmkP09_-IsUN<<$~p z{Uy?f38bo#%QvjU+zfZs5Y5q8#_1z0TxHS1;us4403jpGn6e{}fJ!GTuqW)jb_=BL zUif@2i^drpEAeL}Od$Nrvu-~5Re zt-p4wDguOoqzGj(zd08d0WlwpH5}Td+zl`o7%Ld*RC6sLOYO)!^+gGwBspoa<}SD+ zW#UH+$B{-w@_{9j8Au9Cqop@VUI1&?28Vb%%8M9;v+I?yEZa9XwKIC6eR9 zqW$|#-~NsNc;}uOPy6Q-pq+@+!0ZGGAlZ&uOnQ1oPEv4z1mw!J{&pZu9I>_>em)_o zT_=F>#JCgy2ZSK5j^X7O|J41Dxl1rJu(0Vbu;NABHS2!E%6y+K8v3A{84?i!ScU4& zM>7NfAj&|M1(__h#@Q;!E;dbOlpQS5 zlGcD-Y6YAb9llaZK^E>^>-(XDxh2_Q z)?GR0;QOnJB8W=-m2)5VV@IFevtW~n=W*w5qp(#wsFN*#=;DNPsg-Bex}kUC#waD` z+P0FDEHHN+7>T<1Eg%6R5%TSbTmBfKHb^PK8;I%iYj{ zrp4v|ho(7nU7oJOuLGPx(qg)>>X~Oh_>AS70JtI`Qj+IIK_*`oNi9p!_>!;4Lepqd zcj1KlzONC=igq5+5(`DaY44t=(u@mL6hnIAI~op0EiC6Rk+nkkPUT zg*uFHQi3Ei8(7>flC1F!Cdzcyt)4oHr_{cW4%FU%^H)FqwyQr9s{lb1 z5Hb$#9k{l&J;3?3g{UDz9RZkkzR z#(dc0f$Ndw*P+0HD*)Y5IvJ$E z@QZi7-`BQ&=kvR-m%JvcRzyvq!AT?QICz?wwa#baLWJ7!=xwEcq4J(P-H)=Buek3M zH_fgxvA`|MCaek4ImgTdF2_r>qi)JsYPmJZckj94!N0oc)_&g=$m}Pg0{~w79_0oS zM#1T6JSL>!Iczu*tZAsoHD!Gf*#t?6X8Y=e_5b%3tIj_?fC{9_I{=h#%+&ZZrKucB ztli<1zO!fR(?9g4o3sa@%-9g%9ZWPf;*HDScjoOB5J1}&2#0_K>*7dV$kO>a6a1mlMh-6Ce;o15Q}B z;aTVXcqMRjfqqGcJW5rNAQ0e?!7EWyXWXU6Q&AvM3VnOZX+QSB6YdHIiBKBG;f}s# zjQ#9XTSV(+P^2zy0IwIWe%YTb`{&EiXB@yrYsiE|&M`~S<#<&HNJKyQB?JYMCO8F9 zlCff}C7Iwc?z-&mXZ*>pX?7-nU}y&kOszmun$oyHc!{yZ==(VTTc7^hkNxl)UR8nu zAtQyPt9cF;?;255&JOCKB-LRGM4~p>-2Kr;q2((8w8Jol9s{p?(2WARmzhqjs? z$!Y@QPk{xO0vX%GVpYwZdS@3ZQCYMP)!>7298|wDc$qrMiPl__MJjU!NDi6VzOP;5 z3$@M6$Y5kTXR7s2X-b8}S&aSKW+=kAlFv>#Ie0Nx-*)xvc>8gy1 zj?+csP8D6sPSB2)kDael;qGu5Xo8?bg*GS2b1ui^knHNvmO|7j_Fi$HpE`BfT7#(2(Yl`I!i)}C zZgY&WoeMW%Tu@o79nyu+Uzq>qk3VPqjoVclkZMw9nm$J`;>;{F5D_$VJ7OSO)E)gs zsS6naNs1m3cir!9cX{)RY}P{PjoLO&gW0FEo>CXbK-*B96r&sJ-Ou@#H(nWbPy$I1 zU`u&23@6W8vk}H3A91D9$&{t;N_9H~P>@qbXtw4{@BNdHJmH>ZHdY}?Ne6Wj9AYe> z?)!@*mnM?5WT?X287G+AKx=9!&7=%IF&-Q_4}56sz6kB!D{{WJnxdntnLS+4J|&%Vbrnrj@VosF;@gMrO~4~o^;AZ4?E!=Bqs!t zI)vCwgAVI1Xcn=8~p8Fn5!f+K zk^nN{-;y+4+3TWR>TQYG_RNs9s5P=%F290ts=rTZN=ryVG*rKI?gP)A-B9*N4$DA7 zLqv-TsG=oJzZ_I*lyv6Irr)^a@vGEx#KXPyFSnoj0LCqqUJh<)_gxAG1s4$gyElCM zVZXk1zsxMj!*rT*Tp%L_00DGApp^sgprKrz8lK)wje*auRBpy)W{g3I0Hg@_Ug+I? z@`khj@%L?RhQ_97djtq@s@_ih7Sp9DQ+EhC0f_$foi{w|lW*GA^PVG8h|ICPm{><% zURFh$M2n=@@w+kTa~T!^U|Zp zCJJD0!&EvzP%6k0_u@nRWSIB%f4!jt*t12?n?z0469vwE16s_^Ho&atJ|EmQ`qZ z-$ya;S#H&dG&a3BM__%L;^v`bY?{xH(21#U; zt9s?BwON4O6mZaywn)3?lrI&Y`Bh$wfk7= zSl@E_H@5xKt5>UXIE*s3nk{8ah@-~o1R+2M`oL(Le@BJlG%WD19F8e8fT5%jG$5QD*1h8XKeN%6gNpRA zKzj;HF*zq}`&w*5$bzPRc9Latet z2@JXmNC{l~=Nb@Sk&W~H~*FZosuwTFE(MMOS zq6ol*+diVdcpcrDuI-VfR)o}5C0pQ=QGjW7kw`$EOpyy$e&vs5fxbHf(=xgHP$fD5PDm1Au=JA&sf3$z1j3Jzg1ZMWLO`PYJJStwc-#9= zGEXycss9U?o${ieR~14xJi=)h$?eE0osLiIFn8}o63LDzTivVOim7-oc6t5!&%W>d zTmFln%oGfyr56=8*imxvx?pVQ3FX@Yf({MMRqg@N7{D4nXa3|n-S@e7e|SX#EjYOX z&d=ehAKvb+ns8fjXv@}w5Z0e_`i;xNviPYA154&1+?!KUM~@+rQ;6tCj;PLevRnB^8Iq_=yy_nvXDhko&2ul`=# znb7133L~n9K$6(~{mM@elH%_)V+4IU?BbPYKI5)G9xMP(bZP3yi+o$Ob+Q1**1oEH zSQyu?u)Ck^5Dm(0f&>T=x%~|sS!Jw%IX-S(2$E^a<&-f=HL!Baj%}BJ)q#SvmvlQ~ zJ(BKVEGAJIhHUC?oxGQVcTE{}50#eGc9!!_(62 z_zlc+JlWWp9U|nYvT8Z@^w7QqRY@XEZWkJVh{mwuo@Xq4 z?n+7IU_o{OYT9qU2Pie-dE!PoN)^h{l+y(|134rUkjLe)TmJq(j(*UEn61*nE@}3B zcd>Ch>Wk!I>o+l}rJh~+-9A!xZNf+K5auzF?0%;xFlF1A&p0pHbmmW$Q;vq7+0pgf%P;-uqkGFEy#kf*_{*~!ADV1&dAlR!sJnN+ zGB?M!MOj&B7bHY}xktbGb6?r^%dcM1ucE=sD3~BaB9X~LmyggKU}OI=rtX}8V2wCW zBq3!K%aS*yP`*y-U_NnEetQC_J+u=)@&|LrA1$*0sJNaJyx0@Gz!T%NEM2FCM9rlZ zHcQ0u3%1~;v&&DoZc8T&(oaP$>3{)nM985UHfK&TCjEQx4hoUly|DioAN&8lshd5z zFhjWHf=HzcB@Lhh2tr#t0Lq=il95Qgno*$n3RJJY*ApLb?A=0OS@aG?QS10lb;n#N zS7gb*NV0JiqVkqx@{Okh8_qesziJr>k^})byT!L~MClm800Pt)hN(76#3;yX8J&<( zMyCKRi+=M5FGp0U6$x~h`m|s``I%X2)-0(Ui#?6}nJDP93@qO!86ZGZi2HBZal^j@ z?iNS2BMDLvSI^A7(@ zGdR!WceDK79r>!i;}2L;l*b*{y>tKf{%ym>uj|d61ZEcajHpyc9MR*uuKd*hx#}aa z2X_FToBj`c^}$hzO*@H*f$-2kN|FvBDA4$jP3Qd1#ZOvcv!oN&U2iL(Bj-mZzo+Hi zZ4$L*Nn+CJ!K!5!oCSz3ckiw%IUzu71s4GZ5CW8C{T(ueT15I7;8dT!=DXW&{`3`! zF-U^a_@`0-x2d@((d|D$Ptt9C43auJS_B6OC~EuscR#p%{`wtPzf-=u(1MyE5i>sN ztc&lr=2VyE9KO^kQKr1`6x|tR#tWAp^WqC0zXE}bgiZLImkTD3!~HwCJhL4@H!*Hz z-33(p&q6tu+@lZ$y6@e5-8Db)qN8GPBZvTka7X|MwH^8b5kSw8K@WHYmr!+5Bq=@D>OCMw+D!_wnY&#vGBnqUV| z*jyd~Mm%}%W#68pvYy+?gC&TyoV&NYd*9dpxKh8iYUKs5npt<>5G#ZL7{PRH-mECP zUAuSN(?0s98>+oMNAws?^lD(okoZX=D!=p-D%iC3QQZ^TazG3e8P4g!o&8u&LL{J1I0wN%Q zIgie#nhh-L%UOEE+eao!YcM8vME~>O+;i1;SDbsA0ql0Gj`)jRc3YK?FG0UcJGDi) znn_OH{^9qgERid?JBt0-iNSP8@SS_V|F0GI0A_LT_HVr9n2XNxa10fU5eP$rM=8i~ z(%icLb@zwA@+*J0w+A^;2S6#abG#=x<&C)jRl+Q)m)_?IC(f=XS*+5|Ot2UPL5o}A z^6(ZNL_~LAf~Rdljz-PTU-R0(oO#E`0MHCZgOpW(K~6%3^qJKR063(J4m$uNBmpEx zPRAYTAs%*)Bol9a^aW?V=Gors^*Tr(l2dE@lztR)i6xaEcmay_!Y!ZJ@t?m@VV zzDwcEr>!~lmteEhEXkpgRiptD`s%;tbMJlM*00(^oM$j7A34ng(Wsnpxv~bM1ULW$ z!QqgCU{No+*JB=i%Dv~*LvV_uQ4j~CnB)Q--j~giPPtH}iFYkdF#guFoPdptY{FQD z@^Oq$5(Ib^RzBdo+M}5fP=@kNjXzprve9=WwU)9_HMCtQ-^>6I-v8Gx&i~-%5M!6o zPCuB{$R1N`-kVW-(?cIFYv)69j=Y|*%#QZ7|LaSg4iR2+L1TuHxm-QDBX|x0Y z%0(lxL6RekYhpU$S!aqKBKAN1p{G3m=d228c+|j>w!R;sRIKjt%S&LYo;=6!$lKy4 zdA*l9C2%QjT>0G}AKu4Poh+%2d%kkZr=CmSVXzdyV^(X^qt@NyH)yNK-a)6?kNuC| z@a>m;`dzzc=6iKeFegkV5&(k@29CQ1LcI+T95I;sCf)m}lU{q@Us_$wRxlZj9LoKr z+F=&={qSpVSb5whwfvGO45$cSD^ZMN+0HmZX(pUDR#sbiaxAQ>QIST^IAooAr} zVS*Fsqr~Mo?!Y=gvnG=gq48HE|85axw(q^4s<-WMC_*UYB6RGfMN1UAUaIu4#MGYM zq3W6$9iH(Q`ej3hw$>|0(;=T@`S{Qw6*B`2NLqZ{eceA)+Fe2U1K_awrjPI6{uK;c z%wd?9xb&FQ9&z&dy}mgmJ9xd2!RZ(Q(CSJ&e%Z#~yy%Im!b}CTr27wTO_lll&hP#$ zw|;f`+Or&liAMzJrJPM2V;le`<(L?jpI8&*v7M7 z3J;3@BPT*r(mIWy0kR8)1q=#0|XcQ|I1Xg7v1O4XKp%S z(#;KS5W(T_mwxc#1DCzEvM{~vK8v2{f#XS6k`D9wX`d6Aq@?9V0vD<(_dfRO?Js-# z#6N$@j6THm6FyjQ6E#?Wt-+m%mfh({zZ&v-Y+y<+L(VcV#f-pcHAp%Tf9vN?cuD^Kt)oChtE*{(^COeZ9O6U`1&)Sd9NR@ zX!`!`gSUUQSh+{vx#!~7RR_M+QDqy?RkiW3f7?D zsGJ+`eUDo=uG(m|w|TFH!&enzF|QlF-lqH%TLUTmXX11O(dJ-Lp6S?Z2P& z!l$qsB;AR+vjU>e7#zR~po;#`WbY}AsLZ>R8UJP&u&W|^zW<8eji2dq9txln%mJAx zHw$2_)bm@uuyD*zPHnnxR4159l?w(=S@*@~J@z+0^RAtpd4ov;4Fo*X_(RDk80nSU znNwR{a^J^QRfhnhqREiItlXelOS6Zr{73BgZ!;{=R&6^sc$a~N@VAxT2ziUmS7zhK z?n)XJ@uRJCg0Y&In10x~C?0oa#;lYZ6_7r8AwYou#{iH4kXKI-ge19l$OHfMh2Hme zy5E___lKmE{cwJ0^!{aj=qz(2?QaZSc3#cPyqS|_g`_o*ZYWi zXq^M8p8oo!JNPfL4l?50SmyN&smp%CIvHwm&z4g`+0YTyuHaZO#s2K2`@a0gmERy26>!2&uRHTkXp=-$i2N_>vCDRX z_G@Ms=8fTSmjWM@{%q;JE~Arc&N;2#wyqNaLQ)WrpKk;J<$jz@10LBsJI4f{8I|N4_+B^$ivyT`_{A2a*e|G~xxYUGmQdXOlX2b~tmPi1S zC=Nz~uzb9h-JjJ7rKhM&|Co3dfPk9^hS`UmdD0u7-|2MC0zeKqtL}7Px#J#e;pWT#;AXgk7co!$O0D-VE;rY`1mu0WAlrV6O*@|*=lE%?$MM?rf zSpVoB*^gST;BY9f!4nj?bQHFFd31yzK>$3k;}o5rzi`i&zYDn}=+2mcJ98BjlJVW% zrJwN)1A+j^<-X^@<^R~>yZ~h+ERi-PB|Qa-MDVIMcP{9rFU;-!g6IyAaj2b7a>_u+ zess$jPdM(Jeyj;mfQteF)-K@OsbhZQoJV$P6da(e0RLOd=dS&b-}Z%Qy_!xjol7+CU_748#wbVe! zXTkPR()o0 z^3R`>BBBG3bZ6}79o>LY&T~uhj_&2PLT3^k(Rz01>iO*-F#-fK`yg_wUP04MaQ~GD zZ~PdsP{Z0_nnhwwXXz(c#e9pz4!p(8B;th_$G;Bs@++5cu&%Qu%bX;J(3Ui5n2_^%X2Ku*BnOdCOf z(IoqBQ!?!rNzf9p5@{qL^*G7NNe+Wh?MmS^l!QC>wnVS|4piA-Vy2=b?sbx&dVo`{W(KrFfa%jRSPmcQg8GA zfyx^jDAP$ARncE?I2~7i>M}E}n(F-Ad2j_UOjfJD4aDFSDbfLN%R2H{SGV zlNkg^G{TUD>^LL|l7@$MdSF%W-GBA_fVl$Ak(A*#avI%VIrriBnAut@QhLlO_j&AH z@7HCAFlP}rfF9}tdoO-#*n6S8c66fkYKy@`oY^wD`M!V=0)w+vE|8;>Ca-J*XdeNP z?k~gIl^ZHAK94$h@#RUGuLEsXNTY8oNDQO^C zHvkL=JGSNR&)@KXv(3zD1|pLr?hFpNqZ}}0R<4}?w%l}n{~bu;Nu3{Fl>||63EFwf zSN~vU&v*LPmm5hDl1val(jhXNLxq9fdbA$A>SKD{d4ExDcwnWDtOly#yU8U1sO$Q_ z_g?>ppL_4UHXrxe2mkUK>kxqq4dtx>TYS(XG`s!1b632_)B^;71Qi#|rjC2;sxx0u zVWwsHbW0SqaiRf+r5$v4E40y`Cn8zqugBkJG_E@1#Lj)rs7KL-0nngQ#t=8a00Rvq zIpoe1FwmA;PxnN?rFHYT^|kNsA3PYTK*&m`X1Urs+4&pT9lqen!JE`J~WS0-t= z=er0A1bh3h{-^HV@63mqF&IgJIVvYJH~wOjKI5bV9h=>I`Mc=5YeMXhUa-w1r;G=# zJL!e@dDN>O_@p)J5|+?@lov?o0}-Hvgl`)R{eYIKYh zRibHQ8V?SCPGFLwQm&CSi5>;TTR!%MeSi3_X_*iK0D^H~2V_jun(|gGZy{YD=G9=Z zbW!;ih7^ZPMWlWbxe;Mt^dMyp7#Rut`Xf$w;gc+Mh(Mx@5`L!~aO6>aTC)6QH?x8S z(B9HR@{VHXDCfC#!Gx%Of6tfpef`B%JOm0LK$1}cV59&+G=pNQN1odNl%ZPoYIOo@ zXeONbo2!m{8Usv~FsyjyN^a5n7R&@=1t_`fr9KNZLK5M<*~@PG+Ka2+_vjIl(If*X zqc+@s<9%Oe>=0D|gFt-0!5gOy=U#RbD%7*G`2fnQp%vu*sA#t3q4%9#Kb_iIHY$sT zluSYu0WLS}nGFqTe!oBwkb-OWA3l5F!mHtqJ4yQO)ziPZq9m;VIQtL%>7I05vI;TtCwZVKNw;87x3k)2pitU2*f#wo&DhQ~^3?@`vIB;&6uy1OKjhZ_Z` z%*C|lYSRfEU0|=?{`wE_J^$7fYf45N0{Jiuwba3UVGWGz6^Bk70k{G1MgGGwyK(xH zqzEBq?IO<~K+@eq2&l3D-p8N*?my~Guc48I28C0Qywm6Bh%vuQroFsN3qUK&PD{|N zaHYBU^0Shf3v%=fR+0pmJPNb>t~%eZd=C{FOp+5O696uyzeEn8%N8!(9US|p4QIWO zm^QP*dn^|z0whf$*pFHWvA^TuKet;xO$Z(dMY2*aay3}j6Q^%|@Y_N)BXm%JP8b7l zvrT@50(`wBGs|nU!@{sPlbx3Rf<~PMNfbqRCv1MwkIx2k(HwMP#9{ye142QD0GNpa z14l4^Xf(+fqZ_Bcb;I}G`-xh$Q=Zx`Mi1VJ8#-=TdHJ<0caCZ^NaZLB5aanlCE6`F z_4j|hf6WJFz7oODM6YwxCl_{q6Y%!DHpyWF1spD*zkmDR^Ttn65sD-LRARKQv1icq z@jt0B19S+joN=pQ(JUX{B&`wLU@h&mWdXT8)OEqEQQj&wXb6?t^l@9d4?ow<0J7NO zyoRQJrhxBHkRSzu5a5Cfu2kJH(X1tDPLG?Jh>Dy)|37EHeFbWFmkSi~Q8aA{O^~cR z-Et0lTWLKn+Z-pUkOF=fd(_gILek-#vGzVB1NEP6*gnZ!ee2)6_q*qJ>H~D;HMcxh zT0qTXfwPA$e{YO?-M#Q&*;kb`l7I_!pLKon7Y}WJFF6nODVJOXAfSR`UEO8%agP83 z1i{-Ie)FcwCylJ(O9vw<*7e)^M8JO6&NW)qZFdO63i!< za7fAtq?)r#5&2X|B+G)HBsE$|wMF{+SzY%h7u@xIFP+}FRtD(Kgi}Y~>#-2PI6f!d z-d;zt^nK-}fqd*^)+E8z-+$Ey!htJ5liWyU4H3}TZc#=_j(y*myZ)ooCqA8|nGIt= z3to~E00}O+4qdkUlDACO2b~}cCeQ%7i4YVfk9m-~n*#tuq144OR6ygZ6D1vfFQBH9 zx_wrF=Jk@dTvD6NFI@5TSFZW?brAwJM7hSSD&?xAVJAcyNV>lf@k^$AR1Lv9a-<5g zha)XBFGWda1|W(C8VTvpWA1a>tDbF>Qzn5d%+fL#w;xK9J?7}jLG0~gGIH(Qm@_Y_ z4Ut+oPp9@Isk|l|d)Xa=`t!Gb=HRzp6|g|>gMsw)xF`Id0=Y0rf~=70k1#_&qT3H^ z*FW%$-I=??KviJr4U!zdw0U2OUf;aq>o4o<`%0xMGP)*GC&d7RV`lx2ytTXPtg@oN zxy+hBjp`y?-U};*21>F$Nh<%O#nSR-Gefu9@XL?r0W302%b3b$w`C?XClmE=@58IC z5FkJSQr=KrP4{Ep*zwW-Lcf>p8D;~x6ANg3zxeV~O{uOd$dfz*Q3!WnwVP1r0^O4sZvhh!^lMBNj}82NIe0hk#rJCTKff|AFbd9lz@A z(<@j(HGD_+CLABjsEqW|zGjjwozu$eR??W>JMH5_$DicppxT2_E|2pQ`iOITFMDg~ zT}Qc*4w94rfJTj$Wr2lu2|8c~s@K2mD+{-NZsM3n02m##xL;{u|8?*8n?F5;DuR*G zG?5R{)LY=HbtgUA(G5d1AH&wFq?Xy#C5@5zx)fDVt*F+xEN+*YD=&JIlam{M`KS7? z`g;yj`$#0@0sv+e3hk5LkaQV9K>!FZJXoGIxe|bcpaHK?aA@`3xf@?{{$1bu$JOQy zl2$lF;n6Q?0*{ueT2dh~QNaRvhcIzBJ3nf`QMxkbYof>f+yBmQ{YJ$Y5hxI#T*Es? za`-4vC?V3#9h9ny{XJK{d-KL~+1(H@IU2&6+o2s_oW1P*UH1EQx>=k6U1EARIIB(P zRBO*7vI!iW7&g~oer1KYiaaA20Tvd@h@Sqb-)*VwsC2>MzW2bTk9o=Z9lH^|Hkcz{ z7lvVvD;q_zZ_Rm-4b%ycA?sL7!_WHw`jX1cOevx`H5ZKFzDL~sjMw~*O-&*gfS?hm zfBAi_1bJ$rjUN@=^!VOBV_!Tn_%@vqS-l&a}Cp~@D>CYl340Ma8pkd(}5J91aTYvVd-T(9Z zT)3igIS4>70Wbx1oHbR~?5;WYwbNUFT>zzu}*P(F6lr`NQLjDNW9ZWv7wUEWy#VTM1(j2(?dt^5W}H z`PUPl^_Va*VKNycjo|?@GFLJxEhBk*MxRezEvtG~2cmRNq{ZLpXh{(87>}vIJ1x1Laew zJjjtW0OblLI0cV!_rZ%F^Lv|bn}fV>3WB84$sv(BiEp%C&d~3~;9}WARC7g2qG_+TXFwuY2Z(pF9tpZUvSJbRi??*c|1`tCd#XbE&LgY%iXc55h4d=CQDV z+SgWgC}^LhLsF~LQBm?8*jd{VqV)}(!ImWS8H00f*F}G=8~-(EUg%4N1As_?!9yK-A@=GpvG(4t z2wNUiSzwWbme)ob5uvak$C2G_*$hQ99O*FfHRV7UhfE-Yt(jT-v_~$G1WlmJ1QSe1 zlUrERWs+KmeJf9B`f#zBS({v-CPYJw-iOxrbo0yKd+>_uEyh9sNIAe{3<>dehq#$W zw4y0emjxK0F~?QDbNpR1lCa(){H?X&msFhJf7!cq;G3?<0HSsVk_CFwet<#?b{iE{ z$@+fVR}Sv{oLI};)ETB~r&rOi|N4*AH-9>?*AJ2=gQtB*fNnL#Am^HUbv8c8NQ(ye z*wNH!h8=Jz0-i3e#IjEkKq@wl9j|a@d=%4ROj&>TzKb6F`s?JDrQl+a(B{P0EPL3*L`sQlDAX) z0|H2wlSC$kJ2JJHtR>k@P|n$2$oHKH5~LX6X*6-_w9P;A2CI$(1>|ihQYJIX9Si&Z zd;iy89pX+wkW7Fi`u8gb2nXj8r)Tc>itd)5HZ#ZokfA*XJn9M=KQ5$6vLxBEGhmd@ zr2=YA%i0Qm<~Q5g=?zbPL=DzN;$K=K=Q-g-gq)?BViEGFeVjxyXuiN(xqF0fynN^7 zFMX%x=FMY2$Ux+U>64B9h&z@@IxM8WJ+|xkh2;1(w||!8GGy!9X(T~;+lnC)i0JMg0KhxZ>y2klZun6EgcZ2w6@91U8>L2*0Gfz^WsP&r(3LIV z%X38_+4nwnAH3+%f3WVh{T(n-|2$7`0gt=D_ockP%uMkBb9bP;LXt~PRBz4eHJ3e1bk0BJ!6*IEQ$y8-Spb_8HAcG8aXmmtt|*8I$-`oe7+*pyr^A(0_GcS4uT%Qc zRsv|BL!$lMjxG?0qCC3y_kC^e*IyIj_O8=WNuHejohE?xdBKNL7(YDfQH% zdqo4LKn`cNoXAD^ss5flSG{-RxvvbiHcgtrA<7Tzy!@}j-Y*;b4wDR2+Qm*w31GcO zXT!s$w>|&}8TB762&;Ym-IaszlGKm@vOcwU%`9*kh4ZjEBg=`TaMX<4d*C~d{=*G7 z@98)qE%DzXe7K*hm|anEXz9RkkqlrI)14-xNfIE^2A;Z32T0hk`Xvc*iA%DX#pr?6 z!O3dl>z=jo;pbT}kdQ4BkKtQOaAdnIQkUu-3 zN14=gH!AI^(S_r(MWJPQbE~u2;pUVHPSySX_x61C)tv)hcR6VQMra0+gY2;X<$nh!2q`R)z{LV$pP0Eq$s1t12) zTwhhRYTbQa9kx8Q>y^w#WqAv9rl}xVX&YO07sI ze;X^C>?;z20I29VT$YmB>uQ}&P!u3Q0GMTxR(enz!4^jV6cDQ5!s;G&{_%UNnW{OBUx`#||I1lQ|2&nx)%1Gm7 zV}5%kU?c^gwM73P!>m-oRB35eEwoO_X{xF%k9lza)MJ9v2_QCiMgCM_t4K-+sqLwf z;3P41@!AvX)4ORi|n{-N*O^wrnS?z%!!VU4xvPxJLPdom1jzd4HXe73jf zrdG=Lq+&8K_Rx=U&z=k4YJ0y1c~y6p@VvgLv?V!)CXfIdF6_fcN@br&$b4erz_t6g ze}pm0Yee|`_T3k~t-?I$P0RoRP5@Pe(|YsWnT_{&Qmm%v5PK`3edtPJRI#3OY@)sUvPH0xKJxd4iq;m~Mgu0D>fF zXdlwQd1E3;`eJ;5h}wM9-i51Q^wzt+<@r4E=+WneQUjM4+ zzG3F`mq5`7NiYJCqKqU(z}2(?kW_T0{&10Hcgb?vk&$dbY4y{xforhP=b^Nn zEN|GZGrSZRNji3&xc=04{ND5l$Iv>F>HyK)WbOS=#>zN4b{XG`f7k*WHIiV^3d|ur zDAw-5)g$^ibp5U?-W_iHf^ngTwd)@A)``_;4)xGMN+s zxgXkl>D&DJe=#fo1Rw{f^k7UrbtS#ONJ?0VlJ+|VNdW7OKepwZ-`#iJ$9k7vKsw-{ zg8|6JGJ{9}jJ3LTwPwo$-(1a{G3Mb~HUqR*i?zRVm|D+9#j9yARE`u$IF=eTMXIDG^6_^)# zgeaz(n{_tw!z^=OthC6r>_y^crV0`T`TXuHFYp^aD)yT?qC$-EQ7i@zyFb6@l6M`t z>I38g8392yP`(fVOalFxV;>u)PhQ@9rWqJ1S(rNQxT#-wNdJAG4K8E`SOI05_&{G50xU!n8EPlOnIu3YV1y(H z#^zc{-a+XEpp(+Zt9D-g+`l~IZGTW5y8%{-#zA~>{%>=n1w{Kctia)gr!TCcQ8NOz ziF7UNivXAy3BqIlhTWI{t=;(1>3*yO!5VT`G_!)zjO6l^zlA8`f_#%69L;gCER>YP zbVMO8ugvIA;jO*=rH4U5Y(AlIU9`tC3t0%AUey8$dqbdF&{gW3lnTd;G$G_FyX%W zirX%G_FtaDO7QO&t>n_^&jqNp$=~9K(SO|tA$6bd~pkO^AK52 zKn^r|m(qY3KzPKuQ=T5XTPh^xtSx|f89T2)Fjw09ZZH;5ImoEJSkRsrmBoVce$F?W z0Ltsw-|@lE9emmQra*H!MQdRGH1Kn%7Cfe<>!mj>0;H0^g1+CbB!Ccd`-%yR?*DQ$ z_|SY#1ZIDR5YP{|+TGyvmZ)GW^pza1YnE?pXZv555Ax?a|Qs?0Je_?DEF%TF3K}7 zQt?-;_h+^}7y)!+U+tP=PFGg(KMpC4uA-OPW2*7F#Vt z!BiGbM@RWDU^tKFp(wyWhHBH}9+GWr4aq-g;XF z2;`v2=MP=9@A7w6yT3IR`$Fx=hb90LY2onZ(plc7YY!nXzA4_#O#!GO^SFQ4=jrXh#M7q~!|B&B>AE zl8lVL_t1_1cIc`PPxWrpX8d-&z)`9qT4e`K#ZVIrnZ=s6eL>&BEyEu3C9Aj9tpE@#S^ZLhdYT zr36tx6e?FgpoZ$XUH`M^;sE8>uCwo)YdN2XdYs13UUeR-F(}3|AM>ztMXa^VUj>=~lrJ+l+$8|A9yBv`_6t@Y`#5lF*_yhe8%7ZsrMG~E;vTo`nE&3h z-?;6y&s_hodsjveW&qS!l*Lh`+g9Lpw_Rfe$$A`_ApIK6g9rt2o?_kKyZhR|<+UG~ z@B^e%6J61%Gwxz(+6>4^mQq(YWrX&fEEKU!MQc#i3V|G(kYq1f72eU}goEmDI|zbAvbt zfKoCyHP0GltrLwF2c;r#`l>^{oBrVK^MClHZI63c1?!tS;F7_v1mPaXm7s9FAW9B& z#cP?pD=3Iwi$Lt}|JH#^-pPI6>Yy(K*HmOAB2lHbKxFTT62V}shD}j`T@^@a=uydVO{1vOtS+&`3%E0U@nqQWSM~g>65M(=V3%$9&@|_Xt7X8pHy$#kRlQR6|`Zto_)nXT=&L*?l1H@ zuG?omsj@MqS(>@t#>t{3^dn*wcOJO<@ArTGk30Lm5#WFUUb%PFr>eY80qKJ@*9hgE zQsx!LT>rzEtp9(#R}YFzu&**!=IUb`THh- zs^`iAIHR(YfSxCkV{+g(W_6Cn&ql!j0uaPujKDCBJR811geb&j90PzTrQ8&iFi)HR zse7IA(q~}pOhr>52&&NM(fo7D!a+GVTzi+eLY(BNj-C#8uu^nFlFwfCe-TV!$CnHX z8J*%_$U+)YOgL-E`aE{q!`_STmPS_(XQroug{BSAeET??#yj#z1gVAxKXl-x|D3(_ z{BC`-7NmTqJov@ae00gic zWm1l+Dd*E(w!7WajsZR$&xM~VW#bjuKB!mzif z5tnW9h8|C6hujv7eAlIWa#utFL0bQB7hZDRpPlmBXHVbtF2Msxaa1(Q!u`v+Sf<)# zo#~iPr35O66YTe9cYS96W$&Jzy%ul>ki~WmZkKtJOkP1_>Z!|zWx1Yahs(1xT36|n zR}r(JKD6)33;g?se+ekJ%Ktu$y(z0`o5=Mze5Hvp#!lzt7eD>VUwrwxJ#&N-ii;96BRQHr zooa(mP=MJ<3rEEo(zC;+S0Eq~pdd8wG&7}(Lf_P_C1S&+H(&XyuQ~p;zp>^)XN6D& zSR3jyhAde6wV5=vurv!)uceV~>CKUm(HCyqd*$Ep#!pVheV{u`fB~e$wNr*HP z$abM5ZRF$jyjLNwSwSD&K6jWTIUT)v_NHBzys@+U3lI&E)j$BChDI*$xkq0fV|$cz z<;jYWODbXNq$f_SJqMfs2&j;jxy_sxl5-qhyIOe#HeZ<}JB+7I*)E!9q511d`?^-B zCQd$f^KU(2zH5NF12Dh{khK55EP*ggBUy&Ge!7yt8@%|eB$Vrf5tf#znSn?y%w{Hu zAO*M}b)xr7E{Dg>JLhkG&YN!e$Io)19&jjf5%FGIOyMELA2FBuyG&{g-p#jKh14kLN0B>8xsge%F_GfBg@qcKx?S zmq#knEwMEu2PdMzXya-x?xX^G`iwD$g=2fNxBjekCq3EOB|$nMpv0Y&ovd!h27o*g zxR70>McU$WQjwN!<|~IcSxa#D`T0wq^H=Ns>yiN0R3*ufvNjs>&N~1nNW#+Tg;4-@ z2%sHjtszx$$PD`uI{^W?lDv{cD!C}YNatK9^q=^kGhX;Kt(mMO20$Te+q1R;fR;AQ z@qP0pjmrI%0XZBpCm>M(1gD^S_r7aBI(PjCX6n5)!RYBh!7~$MNNd<70VGp$2s46` zGr;G&cisA9?_{;9x$>LcOMi|OuTThCFZlkqHyC6xs!00x?^>YO2X6SpzN^k()!$i5 zrW&N=B!$o}o+EL4HpJf~<~Q3VXjcrM_!AvR5@KmK*@t zSU=6&kf|^X9K!hXlYEg2tYt~(P(Bj!rHdLefB=OU)Qi1+yDonhZu-^;4r^b=bk=N;zru-QYo8!A>je#>2c|EW8F|6LoMP6R+MkYHo0 zu&KWr49~Lc)0kwzA~XuTa|2}ClUblirjq1kJ72lAX^tQT}gPMsHGGsYkCD_FZ(_#c!NCcp21>bOr{W zs1Zq$Wul)`fP(!aNyZ#U`=4xh?PE=vUrZDTQIJ7>b(|6F9=!3C$B7dHfe2W0r<++D zK&>(z7=vyUZy5dls>RkVIWQG)+>T^c(sKPyQJ`p{v*G6+bl{5@_CNO3DhdH{TD~xHzDczQ_ET-s$1VASCg92obcWlkhgZuvAo%{axq7$C~lEMZJ+I^XF#<1;1%SK0Ey%PV6q%U4|z#H15io= zWQDOG>8y?IA$#*L0ib&y*4ux>$LFrNVA6L;0%=aNAI*V)MYfkC52xu_m)ceOy{ROk z5DVLCw8@EC{mlL8C<=hgNuVz~VdFiY5xJ_8ix2~~9FLu&^17E#Ros14zO~HXjjBOz z`ZhFcKAiS|Sl2i2z4*y5+j_&ENbk7S6)Uf!g*^P7q#9XFB`4W_EC5F&c_mF^!A>BF zY8Vzc2vSev5H2(KtLo^eiu!w);5P?8~mAaiv{ZDjKM1)MhY@$ zc46mN_g(t-iPakrdKfB=+>6b5+N4nV;W^4w%lCDv4^gBCcs<{rgXwG6hU%;&Zb+bMisxni`iW$g(s|J7Tb`TF%v4+!pYI15}}%56Gd*qq}@7-gaZ z4V9fC*T$=Fz413*x9Zn^=D4T-T$rBl;FU85r)*d*$ui1$OQxf}`(yiAGSE#BOi=IU z-PeAwzV^ctej6~==seK?%ozyp3gtaijvkrnwBgXr<#4uTdx%RKQybtGO@R&((dXxH z`t<%QF6i{OgPld%YK3%9D_0~51mFY#TC#6Bz#{0swag75D#hUw6r)&2D#9kW{P@hV zkANFE0tCvuTrDU2ZMa=!Crjx|8wd8Tzuk-pE`NwNu<#Ujw_{>?U044$`+$S&2)DX zF)keX+P;g<@9h8XggZ6k=903TNkUqp)n%%mxuzQ+2{LyR%{oIG%jCHI6pb1rUp6C^ zB`R}s5SXp!Z{BmoyK&Q}JLp$#wbcSZno6eGva~vO!YDZbKoCR#q=qgSA%(sdMQt0@ zXE|k(!~3f0>C-md91MyYd?LLr~Pc0nITag%NXx>ECHx62~N@v9=P#?3s-z_V&S%EwF@928`L6^|5>uT zQcKg1(7kWLoREu#vdM8+UYDa7`Xor2J#_154qW-Zi9_21Ji==OP&q_sOrXHB(`vam zm3;D`#QO_iiMWeEE5^l{XX$V_+=6vhZ9MBas7?e89t9PSt}(Duj&f;^Xxw}NZM6R_ z{h(LKj8+z<-F@NOTdw+*S8crcfQ*1l02v8`rw1Lz6fI$qz1qTdh6GKFD z%s9^$>qLO)a3dtBjte*Kz5LzU@#)HEp%!LC2aG-vdXHov0Qmwc)l=tfe#o21H8^^u zY$X!=L`4|jE}0{m;JvtT!=B6kLAQLe@`dPiy5G!(L;0qoE1!>28mwlhzd?)E7doA^ zVUt~?^(L$HOkT~9_{KI2NL5In91sPtquyMYm^t;i(qtL5yp>8#7 z(o$ya{6hLO`@+P%F|QA7Jk=~NpHwyZottj?_1Eus%mYt;_T#IK>zsxlicW+YZfRka z(p0sC!7A8A$~mG_kp)xjnw{VE-}^7QAS_%t5k0a7IxUO}ghunTN=6kj2r(HT$B_TZ z3WR0WoUU8!B0zEwg1bfZ=4NmD#Qv*3)UB_n>e^8a-*L>oWRh{V!pIT)Ae-7$lH+)M z5iHoxhxR0G5kcmCOoj~)nL7D#q!C;+kT+&{#6FL&%o(3Hw!f1tBe~j#!?iaBAK|ZyUhl>WKHmSUf2qI#nCOt`iKP`ok^lse1PDkYcdxlF1t21qjq+^r-@jyJl(a-c zB|%^|C?X6IHav?EMv?%W$cmZ6fSw_O`?syx`kcpa`MDp}WT06P6iBrqN$o>XmaY0A zB?(?f-?9Jl_xEo6*ks%fS%k;H5cvZ$jzJWmQPx<}2NBUN_NVT*<-u>Zu)5Ig0!U|h zLzU0Vyyl{HJwlWycWA!1=bJmPdS7?4lDk=#{Tk2>hkudq(^qHIP|LV@faj(N7INq4rH$UD zk~AvaU+HLGl1Ii9mR4(`bL`U}b?L>|tor1)6a7`rgk}-|L1K~1AV`kQy9t(E5i>R^ zVJu>BG>QZv6%*18-eBg^a5%_RXpU%Lbd$Oflh^Lv|I&Bw{MhI3^4wooecl;?K_ppv zQ{zXq3_L;#s8jR%|99WT@2vJ;ILV%)a+_zIsA3cf_KCv9)}s>vl7_LDeKWqa&7Cbb zc?1k5pX>K_?78yqaPudp>pgHpn^5Rc7G!a#O-mW|XHHBra5Nv8bPT5O90%RADUuda z$qOW4B%rx!PyzS}+*^xtrOZO4ZLl;H2k1F>KoqLI4>+Hvwc6jgc@F zf)!c>zA3%`E6gD8P<3S~mieMS=j} z1~_`g*jx4Btv~VxaAqWjh-IT5m8ELQ%_vA9CCbFU1l4;E-SFu{*L`_fLs{7 zBb3{>jHz`S2EXrW;p?UJ)6gv#EVT=n;NSlW5#R64Fe@GzFu1tM08l1EbOC^igkbE| zS~qo%=S-jQ*uah$sEqetE2UnduRiwC z5gjU|6LB2=o#ZkyY!|fXV+Mk?4^FDu*Hu`xX{hZryscIPxtwL!enScdIDt+zb^3|h zUioV`|L&VN)ggKNvVcm?UC91SZC+t(t$-#TGn4yA4o40I9_$V@9G z(lBBnLf;tI-TRokJ-P05D-kj>%9)R1xuz*DBWX(6x)OVjyw9p<{pUJva$39NZ1Crw;SRTBFBubR5brZhG0m|SV z%G^+Ix@GVDn?HK-6JK)k`#)LF&Z~|BoG6ziGg*L-9m*oG?~oR5+<1wuT6N5>E<_7x2TL|?ig36(IBno0~5ijse!kQd8mUY1&qhy)BUhX>|Eg1`s}`MXOl zEDj)eetRlZ!p}#pS9Ks|L?B#*6A{{b!iG&x|Cz0i{1Hw~lBUXnI}Ku>()Mj@2Wl2a z4OLWxLt$ZI@3;3{{Pxbl3xn4rCAxc#mYha4EE7Y6PFaBnHpz5A_O1~X>UzyXH=Xxd zvzaknsM{+XQV!50G&%y*qoTYY4_&cq`$xmApQj%RtRXrW1PCHrTs&G3;lq-lLzbM} z@?X%tz??qV;=XgYW*5bOh9IW6CfxETDFPa5FnRo$Nv3SePr&M z4|Mytb%8zvWC3kyN=Mhdjuy+JC0Bx3Wwf#PWVzjD05e4g1R3e{dFjx?&Wrco@G;-< z)k*DhfN&^8b%4GL6tt+`Nz0_Ug)=6DV{=p~W}X;o*c#1tqEt%;I!y?Kpr*ATcXsFq zGKdhnGxz-EiRt?jK_YGWgiBKSoM@F(edUwGG_1Y_FQ7tZsCFn51=o&|T!4-qavnv% zqvQ+M-FoHIUbF6|eWaWbKptU8ql)$AyuT!n#o!50;G!g-ij|3%lni%yczconl!A=t zAYlTR5aovHNm#OxWKJ?^0w_U%E5sPI;x_Q6vaU9jJ686~auH-M0u0tUZtF>J{OvvKx&U+i-EtC8 zBqhwStjGcYHdL_%!Ya*{zB;LtdLLdakFoLWK zwm@3KH5;Ln0L76a1`s3M15E@NM4H39;g$*O{o;e}|GU3xoeuSb5&sEa)aOD3RLi4! zSMR>`@8fO%Iaw>Rb^scfuv$g4bAD%2>0ynjE{e}ingEa@oV58(KfUGLKPD$zis9{T zW`Yw~RBRA%sRkUHs}Egq;QCMXZu*~&-vkFhx(G+AlldYf7y+g??QHBaW6p>Sq4J)h zaKRdbim0p}A{A9gg>_jjZ%P17JfEhjM`=andTr4u6Ka!K)$8uR<=!u`u$dOx$b98{ z%F6IIbn=LizOsf!Wj9bsWpJV~HE5}xkwYFz?_T z6Mx|+tW!}f)p0?LAOJ}VjJ~k@3wtlSVEVx2WEC0QL>WM-dBm1yhASjZ3!q>Yh|#2q z*2ZuXL4i`l98k0Wy@R)WWB$f3YTxBk*k!0Ax};Ic*?dXJ=h-Ncgz_K}0?N99wxz5N zA~-5=WD6rR2fs>sxpM+)lUu(NyM)zKXKy<9`PNxw8mu-f6aALdZ~Slp9NE$e4!Vr% zUC7hjeB}lFkRv|}W$<1}kub0d-7Qb}@r9jx>oNBCt>L4OBBXcH?H>Qk$8O(qfFJl$1t&;S&~k8{$kOWv+jw53 zc9lFLQiT+hWPhd&?-*P{elQDMyxEX1WubA>6R;=&5$c&{k6FL<`A^*R(+^^ILJToj zdm*3(jUl zc~`?E02}ZPs0{(?Ri|t?`?qcKbfE&>q!wKNxXBRP)0rdZ23ihIbg#?$xH3s9#QhH1 z@cEw(n540kVk@sZi_tYVf7y%Qy5>JF6c#K501iSiA8GzbLRZgi0C}F(VFZ!V63r`{ z^h_c5(n;PDHe}vBOCcb#c>t8RYm`8Ms5~qfCY*x$h@sZruDU;SzvF-B7bmu^uPb&y zL)G*mTe2Q%Ah>9PTSVWq`{4EeIeW$V-MA~AIzq_mF@b7$r$`{VlqhJ4J=3zc=|7ll zmn~N1f)OA|fhnqt035XXm?vyH`$a%^7(vglo16e;1Vvxen3GDwiISv4e&~inw|!&w zmM{7KtEadJs?D=CVj8?6U$QNstA@Ms`sR$Z=-5p_y|6D}hO=0&0KTviw|W z!Q~LG7C0Z)tvmbK{~z_&1lTqdw7GF49rk2d4UN+gVtmbPAmVH_a18CUvr1~p_#>gl zC&<~OUYaHiNe;klV%4g>EQhQO$)bNI=}DI{;rEA>ir1&rycZA+iYz#H**Fkfly@Jk|2`B7#iS4aQ$$ixeOwex%Y72${WbA$`H^Elv%g0~gnXKo4 zNO&IMR|W#pys4J&ojrI%+Z~Jj^5|Bw64{ox6 z)>*!}$?s=39Q`szPT(TOZ`s$a03dp_rlvi|;qDL?Zn*W5-+JAa%XWx$N9+Yu(V{t! z*#${SB9%)3R9;^?`Q%DXSiZt!gZC*Nf+a$b&?Y!+&twc~o=c$V;{(Vg0w5$fR2%3W zpf_nd*3WGF?Z+Man1^7p6WA$WoMmiy4N6YZ00^Y|y?x)_dFi{m2QM^VkjI1@V`8Ql zv{a7+0FyDGK;W0BUrtJLQ;*>&B$I&S0L-tfHtlLNNh zux|k#qH2l*-Pa0Z5z^KS`74nCu*ED1WOl1K7J7SbbO;gAB!NytTrk^fsvdEld;IP% zO&qg{p;FuTC4i6taMb3(ednQT|9RoMkIlr}5bN3k6S6ge63Z&lGB&E+xC&%#{F06( z$tw?JKY9>*5}*h~$W`y_&)?QRc*FdmTYUd@{X^H={Ei9jhZZK-lOoL`;1Xeyk>f!I zH8D?FT*HUED6tWxfw1vihY25BGOk}4_dh050OqfheOTI{66E2%bS zPR$WuPPv+{(9F!=N>e8hOlN=!N#rF*x+hWp7H_Jt6(nWPN|KXRBpr%A8?fu-4X6C> zQ`SA?oT}6D5C|rqx4}{0sVD*@U8>=AedxQpFL__J|C=53V44Rqui2Fr z)e0N{?Vw<2kU$y`YMUo$zPq>Mi@n)fdWUvk?v{G?wz|K^;`{^`I_O9CEB1tr5J17r zU<4y-GC(9DA~iHz@uk$g+%OJhdS^Ny0i+-{-ucLURa>G)31DJc20oDYM+OQIsBtpC zG8>!2sW2hU+cZyq+S=oPF>pp;nU0kMbjnFun!-VQpoEn;mB&A9s$stT<78g|kw9+0 zX+O4tHT?Ei1d4&7I^~#?-ts%w{N`V+yM9klgqE6E%Mw5$!I==UXb_zh z5r7(yXf+^;3V;a?)1khmwBn_5tnoE)Z1W6{4^rQsOd@)@D z01JzXabJQwY9s}?MC8{AP>@8inbVkLj4r5a7Tw)_&Uo;QyPW(-PhWM;DWR&!K#~kU zm}0s_icXiS25R+p?7QMUe*M21YeEo305A*glI4!`fYK}psn9accNk!qw~u8ZDMbt_ zVs3ZwfuW&6E~zPPFjyLk6Ddu zM#nTGS+G`?soY3ZaU~FrTN>LdNn^T8j+fW>=mY_bf5=(rRChi0w70$J%ICgu>-L={ z)$rhg(on<4?IgwSb}07*5mXZ_6Dr$6$nC;wDu zdK%v!4@E;-QiX&+<^GdGo}fOUxo5AV|#PtW#uDN+APquo^*0=e(j4 zv6ua50SE-sW=;bU^O}`=?&p)XMCTa@NDZ8h+paDQ3 z60)Mw?jGG|QBpw9(Q!nuj&c3u-Jibd#K$@(!|2+Tg2un#bKG8K)JBm`^60X*)kp-l zd=fU#fPrM9eEi;iECJ=$Pok^PJ>l59z3um|c-|YfUVLk)ESgJ_97b||uL z;%)n{`A~i3hbOe7qSsDiS2BU114y({%6+125D5?x^zVw5_JKwEF6T_FM|Z;(8%qv@OW5If%N_Vxb!+b|Dz?nE7RPF5afo~ z*=ZGmoNcWUMc*-7(Z75Z7{c{+_D>Sr1`1a)-WNG*n$uZfeLI7$ZpsZE! z)X^>FNkohWO>p9*Eq8m{i!OWq8#i8fBLo@&Hqk}$Fg&pG{l{xLy!GicW-~uTc*Cgo$Ljg ziIEiQY(&lZX@(h*0s$wGZ`UldIp|2&8hLS>2tS?}YzZ-R$iBJYCPvDv+BL(OC;r3< z&wNaG?P}2s4bG^zZBkA?N2D*z-S8j#uDW2Nf3r#MQ4k;|WJ*%7G>0@n)&O&Cat{Ku zQZAn4V*R3+jh!Q{UPm-i5(Qe-oT~!oXtK06ocY*ei=0ye1tcgxg9un^Wo*2ieDXQ? zj)JbA4iN|#BW@!Ikrt#nIK^JIY28^bnArRfvsL+gmYwd$XRZr=K`ni#jtbaixl7dw zY02}p@xxw7cDAfPa!PC5i*+KBtVW>)l%K6L=JMN10^ll%3h8nFw`Ea z$VgKKA?!N$gcE=Nm)G6%RFQ13qGi4&MW~8EjOu-F_eB@fyZ>X_XKR>yR^MWem&itm z`WMuxE3Xe!(;Tv)SVzLgl+5Y^@8q( zhad!~?MTbVaAEnivKHLFOW9&!Ey;BIiPmsyp%zP?`>;9bxAQcUB%AR8tb(mwb>^S{ z_S_R6)VENRE{7J8+XcDgA_$kHVWAn4H*s%BGM^Fme1|_>I=fof9CVvL{?9BV25D9PGo&@B-Z1QJm83!&lmx|%UK zcXu)W&@)bd{?jH;*=AM835pH{K?KzHCAvER^>5pI_4^lY{Mdx=2V^lCV`N1ro$pc> zv!=oMW~B&#v?C3PMG;IBJ8tlbNOF9B>ZBQfYx(x=p4WZ#^|_f{cO$eB3Vc6 zK4#U{7d-KpN1Ugr=@0_jB_bvXxr0DW1qreRsP6B&X!qssu!G;6iV#C|Pp{GmNq}ge zC=589s08y#K-rPtLp0HgFfs}aj2EWI3p(Z0N`y#8uc^?3Syc}BD4drOHC@x zPksvtDSdAq=xoqdmg6BLkxDrJ&i*0@hgtqC(<1VAbKCU%F$gUB_?L@d0lnJHoON(p z_ap1ieqQLFN~^}SQ%f%?N$u5xf#eNmRjfTgdQr zv{0mSdIvylOtFQtjW_IziWV|fcF5xnqt^Z_cVB$I_J3ir?#0j*BCT(3F3JdO zNe#j@`poli0LEp`KaxaLph$r#I9Fs%n?<+ zG!0QYdSjwWF|JE9i-{7(Nr+Ti1ue|k%=D>GUUlLV%+?TrMXseqHmbtSZJ%o93~1h& z?jP44u<~TnMs=8NfdgE6yp>Jnj!=f>h@cP+fYHL5bMAKfd;W0GxyRR47O2vGO$g0= zlO#C=(uag36BL~SU5I|`bf}*H*t6gF^7Z#Qi<47URfHL+#%WYGdx~^c!4J&d@UJ_+ z_IusEpPG!dEPy@pK%sB-l7}f#pq3h44G3fyl_O)Bw;+mT^mU%5SuN(L)eTacWHn45 z(nteLGC+W&D8+J%&3cBd-T)e4Kp+9nKdJlW@ea@D&T-&rCDHr6&bC!&J%7zfPgdAS zPUJ#Kc|UTo6zEU$dSQ%Z-A2oF|91czKv~5xkn7K`yP4{N#5hBcjJZBPmJ{2~Zijp$y-@R#OH_GC>RjMMIeA+>)u}|lauaSd5z-Q7$0-v@IUtAkuGw?>JNdoO1sq(o??G$?@j3uVOfba; z$l!30V4ZZ(l(ohzQ>LkKz_M%~`LW1$ii=@Xj4k3?EW6*cJCmoYLm{Js`3{n#fe1H4 z0wjTCF;d8-2hsp5VCgi@_mpZa?=u!9Mv^gU|3xA78J$IoixWWN?oCX?7!7z_4ZbsF zw(bN#A;N88>eLPAJhxhRzu+BMRo2Bf!YxO#R;E9f?^Ig0bS{tD6cVbBrK@jd1S=yu zmJy|gK|$OeAb<+}DP*cSoQ7qHA^7GscmIp$Tz}%f?0)xuZdfn_%;l65;1ZZnX#gNd1l1&?%GLUWvL+JQ%C^v`%YYE2F=D~SviAFuelDfyGNzHEfRgKi7Ex1e zNW#KJa!GGSO($%$7Te0C2nzJ9W+>~V;EU8*PymvrfrCgxOD|H(^q5k?0%Q~*dDUNk z?uL84pfhnYF+r1BM4lFOHLVi9kA4_P8iTTp<$Xl*aIwFQqqTK)`K+};!)RgqJQK4ctocP=)-g@_wZhgi3*6*C_f-WZwfR<)pPq54?WRY%4V3jCGEgb5E z@T3Qy^Q@n*Hm*~r3Lsq|B5Bz>DugU$)y6(U=6lTaG#ZMW_GauXHx9 zXH^LxAtT5z`df~o_Fa7IH(%!0e4wMf6acdrvaLJ(fKLuf83#ufMM@@`m=Cb^I}${| zvJWv{DeCCkm-G}{o8Jf-WNFl?p6ml@U|#Y>Og(*u?>+}eR795yfI$H;$e_iB6d;of z2-!fsKh<)BOp>Jb-af+v9@7=&!qXlskxMzH4=N?XUjy2~U0`*UXqzNE2%0(G(E?(p(*E{bM(*wS*Bz(<&G$={oK{C z0oahW%@*-4UKYtowsS6I5{i~olg$7nM~d7KAOc3KC6@#c1`=?AB0?q@g(2QM%`Ao(_&+86TH!T}PqLd8meo@$YjAF+fh19Jdh@!w z{l)XHf6vEvU-0i6=Hi4Hjpm4fgHx+Zc~wa{M8M^8ESN2vclR@X|CcAuI5t+S%s>`G zo|=muFqx!Us0kA4&s}}s;`6!di&NS!&3IjrwXzL7T2UJzHvoV@G6x`Q%xalf3xzTo zF+@4nY*H@@OMt8Y6HL<52= zHv1+m;Zo(1i7v((PJie<&iKpU(yD0>7H9$?4Rd!J($(k+RAqwe!0b(*-+#s1rxw2F zFmcM|h8nB}?m8MYq~VU zQy#nFl&8}+c<5Mg3W5PD@aD}@u=h2v(i{%d~#R-~P#=2cC*x0PDadI>aS&q;$~*0Fs%hLiqfJ*SzY2 zxO=xDngkL?5ip8qQz)YxI?!LZVb_IkoW1DJCg*RKDD>z~)De_i^2D1r(67pN7XmCV zbRh~6h+rK3=*>Pu3kQD2i~JzKMYx-gJ~?|NK^oMtCc0m)cP)=4CDjp;7jfXhklqu<7JC#!-3aM6HR2&yQQBbi-Xi{*A6 z?pvN8x_cY(ZOrKE@*`yd;tq|tRF*>AU9+=S|Lw>77yQT6d>;`;R#59M zUQWQH!?pKbC!YM;XHK1TZ0t^2r$Z=6fXFaBZX;p(JAGkx_gD8_^5&`8%R*GNNDmH6 z4~hgxQVt^b_VK|ApI{BYdp0=MnBeTCA#NB7BjGnlvBAYBJTd}oVEb6k1?MWDG;b6$ z9eSJXZITAq9xU~NhA;$j!3PQ)d9QMxrF3Sr1ago#ToVh)GrJKXqP28h?gK#=X$z(< z)N{_%o9?^ep3kYKP6J3(K(jv_(@CV9k9X8`CXz<=JRG>`jxfquIhAiUzJ{c%p&Ohr zc>Gv_JAQF@GmA6RCqMJ?`_4LT`>Wr#`R2Xqy9^FDw}kJUGlOYQx3+Jsi+0y)a zLN?7VIz(_xRS%bNz>3qlb;dxLBCq zNFJ807pZ_qb;P=7n%>w|VE0mk-LZ4w4vS zTwb5bHq~;B-1tyDh1akG+W#@xfwHLzfaG!EwmsWl`yPGnyKDL~d5nNS2Z2cE5>I#~ z4{=F?P(>}6_OG7Y@WLl;d-5aD=~QL}i`b>f%<86I9d{hO;=}dzADWE2A}a?ZLn>z9 zZ&t`zD#4dBJ|amO8y-IC#=JiwRZJ#MNoIy}H1BXoXGsmcxesF50OZS0$i0=AY8#sU zD!V^qTh6oTT6CuYXpN0iw1?&J3=IjOb!KD2J{e1H^XB;61k`4jmU(ntVKgn$jR{n^)l@QSN% zKI8YFsx{LPNT8L%qQkIx&E1}IaP`)?%g^ujZWfWl8f%O&y2ODJ)`TJe2rPk7iv`}I zj2{3Q(s#&Lk4lQz`3RfaS(^b#A7S8IE3d2Kku0;jsiK62e@zsl}bXT?SEs)inK1Y`x0D*+Nia(Rh`B^B02duDN^ zo#4|?boZ6U*DT}yHy-)nF9SmpNk$2=p1u6KD_-@U)!({$q6VWx8U=#^br75;5Fn{6 z)o?VDG&2(Op<1}#T~Geg-8Vb{0zU-q!B z5E!QJcEY{h^#}IsN9~&F_9-YjB_KkfrYQ^RCt-vctTI-{4rEa1My&du?c0Cjb^E@1 z1>GT!go(r6SN>OJtEM;p=%)LXL1U zN>Csf!u0eB&wBh_-}MK3?{m`r4iGWXle=7w^!@-DMS@177(^3;7~mp&?e<+aKliV{ z_mR&K*7IIbZ~b}tY9J7vJO#=85iVhiNTRXs%cn(w5VyRZ z%p!?ZPn$aZGz?B7uVG}4XY{vU-dyEM2&Pe$33_3>6+r6CM_L&?IscvzUL2uTHQon#xp=k^0{k=wa3IYq&pWb@B*IqEsjhk;;Qn{FM&;oYHg z1B^}%B*0*9A#^*F=bU=ZJO60%`H#D8U1#2eoJf%}3J}dnSpXe$np|~X?(S|rvv2m$ zt3Pn{YyPgDn^$y6KQJ#_h@EIKpR~#2H=q3?JMC9wt7Vlu*$GkJhg{~WNJJdNIB^sU zX~W(K$&uHmEPY1GU*Cp~EE7$OEp8q!e_uMx3Nu!|?u1iLI1L+qa`OZJZ2H*8AWQ+3 zGdO#Ym1`&ur_58f#2*{seT^SgezZ&Zd}WMrOqc)iPX&$JN}^=k@fAu01g~SP>)A`T zU;h_>-}%N>(+-HL*a1C|=nR=R3N8W$7iwVk-Y1^+@?Y;9yCrr)CBh7H(f}5q5J~mb zpS|&8bC;YS;tr$JD@SB@j~w~tZ=suZG)f_Nt}?wpQ5Wu)&;ZuYJ#YXpWBJh@u3d^G zo9*T?2Z8}$qbex^1lZ`YBNc{rvCEP?*-J?Rlsv_SkRb(!i2?*bZX6nh=8_uYYL|>& z1SN@-h#*0swip~{or9Cdt-0HiXSV*NVUnB#2#o6<(p;kB;8LV31vIlUGK-Y_k#1Mf zNH(hCqK)#a6;E+zDRVRc0Si`jJ2UsZ>)qb@htq%bMI1zq0+(XZ+cLuU?9UJ_IQ}SoGx(1G}rYJz@2IUlzM}?faz5#VYT%_6P$93l`!8~SKsAP z$K3zVS8sicVTSCcK^%5p7iDEs-S-MP_}$?sn@L)B+%w5Cv!6)+=%%?J{!%s>QH$5j zs!lh|OdtEypE>(Of7W~K19wkXy~=_LRwhYqAmu=0UAy--U%ThJ-+sgHfBF;-&BE(e zZf%sIwxF4H_g{biKdIN<(=8Z8LUPF^&!UDj<{oIj6w79}1|Qc};V`OlVEFy%a~S=P zmFEact(0Br z=N$jG7aq9#*4arV--)FrSqK0W=>oi>yMffYgR^^H^Y>T2?mgJEFUANce+vV?gFqz) z8LH`fY`f2^HJpvUjkg_Hn{Z(cw--P=2MslmSwI{?HBo@b7#7 z;Zy5(_d;}0a8yzaH-Uh;AUNd6PG8buboqh%opQ=6esl8F;~6@j0t|!z5X2B)X~F5} z-FoX47wCo$cjKIyyDCp{MzoCKU7}^O<=Fs8Iy4NxF1tIDgb0us`n(V!l9r|GV#`dy zawl2#9MA73yilverhOvgi#?A<13((Yd;u6;$)%m1<n7GM9!RU>KZyIly<82GKI+>FFaCCJu(hKFR=OTqsDG8 zHh^)SFP~<)v~{PJzfj6Nj+r&WG!>?%PW<)9o%#2#TKM_<@0&6aP5=zBjtBt`NXQnt znN36**uVw-u*WwPJc$U8ILA`pcx1dD#NO* zbndxg9s;$O720o6rcVn@p7fD?QIB76>nsjXDUHm?a!CoGcd-oPvV&8gQ-l-sf_3ND zoxAS7FWqv^?}n*+RhkLvjHAaNGc0seQf_7|#E%!4l6aUY&fT{v1fDka4DRas^RD#>;H77l3ZhG6kOWry&e~oJbqLBavEKkon zZa{xd2q{=fHD$&&YzNG6W&=G+=PZTs^)bfopGc~R^V8r<_s{aZ5V95 z}R@Iexd1j8))Zf zqkvjQfK=Ad(JW07N|H6t`8%TlZnrW4NxGR31qcGz>1;UjjJv)0_qV?G*LR%0an76( z?yl$*;L2Y_H-bPVlNmy_dco^A{L7WEIKMx)02jze4wFSp5^coZ29p`_j&oD zne)h`!SLpT<8y7JKikZyD^P7)Y(IpQl31V$Qg1;D8Kb1Yx0a=#D{t)`Pf+O0bx*M~ zp0?#j{(Sm`pY^cGFik*|NGMADH`2fbi!)uNDkXZgZ z;{e9xh!ck%Jsv!C>qkC+%X>bt;l|y;5#e1}jmWABq6i5ffna7eY5#vAx$~ZPIsG-y zoH*$&5Y0>(wB|rudqW*z#9Tdh%WW6^b@#R}1@;VZRzeU31wc8e9Huvy#4?5@En}ZO zk#(l{tf7m}krmO9in6v$kVzs#{s@QW2j&n|nN;r*q0N%DTw0=?G6l zgP>0-I#ThhUFOeP3rs2pf+WD52p){JSUDqQq)V6&n@W7wSY<2tK>Z6hkW{Id*wV3pgn;F$CbEmk_n5)svP{C}~ z%rU?8Q=5MJfj7SYKX?6~eR$7YtZNIQ(-Vwt0!%bFdLYGA$)<0A@9N)t!?u?_ZNqu@ zB#nZbOu!Jq)-~yH!*pljgstcN@y;vGpTFs!r=rXwHN|Sq3CLOxwtNJ}#=%jzznL07 z6(4*cDYuji+uLo1paC(-Wg$QiMC5a2k`Js$^7d+nmb#SuxA{m~6~ed6TwskAA2kfllr(N(1ky?F=5@%@!FP7d>O4ds3oYHOZ|1&z}Y&Owr| zglc&{OidV{VMA>`cxM>!N}TQHPzXlThP5X>>+$`^KJ?o6|J#9&eQC|E1&{8CT_=Kp z1~b5dF$utRr8VE%d+Q6{KL6ZDZ++6k+3j>h0MZbpIitY|$R>zQo9_Osxm73byW(A4 z?yfLzpu-A_VKiht4kX7cY#}L~894zeX#6Q@82xI^$_&x-B>TEZDtk6}4Q0aj2a$># zY3huYI=m97h2-LCL`2q9Wp0LgmRqp--WyJP%*5Jzk<(&?DwsEYiVBM6mZwo!Vkp0h zHrahi7JhAMlPi2LA1X&(?KAMDRLiTk%^=J1DR-)OMlI!&6?6h_q*V&2j@x|K7e2N3 zOFwK0O%PJNv7EHEhw6pbf8@$bu0P|&zZ}<00x*Q^WiiW6 zh_5m9SGG#anL-HttwZ7(^hmUmnk;+SBel60sO00fGlp`PRFu;JYGr#@j~{k_Ch z5Wvb*2`-I%z>kk#G%62KE(L(-akMLiR*n{%Sk_k2-SZT+g(G)VTY1^z;!fZHm9sl3 zI+78uUoYIaw!th+6X77FcNw0dw)Yh%m34#_i z9DyW2B()3$2z;)k~%-A#h@$;DHpP0+08y= zD{4tS02?}S2W(LtUjqgJGSElTHjtkGm2DFnvf4c!JA<$35Al~Juc!AQ1o zRxH<3Py`)yT9L~ERaqf3Y!cJ;_MFI_j-E$Oyf6YFD0fUwbWT0?q(6GP|LUV|{Lp7_ z`ox#l?V3Y$I02dv9DM?W+(C9@FMRp3YkuX`$G!5`*WLS`ak2_9!T`@`nKja6k+8|N zXK%UppX|Np9rf-nx%D6h#!j4wtdWX9LIB$iszlt%T4s_(@glF;k$zofrj)iJsgg=o z4LV6EjeP}okSRnnv1jTL7M%0dI&QrG#*==wyZ-DLR>?bNkuZrsi)0~1>0i={dW99# zL}hjS7`Srdj^gMa2JT+g?~f{nwjllEuI4+>mG;}uMVYD^2#9smo&(?e_!oA5^uMNW z+`p<%!Uf4|NCv`Hdta*AkNek7u6w~R-R1EQBtR5oz1Z8c`^xv~ zrhlt6YcV<`afylqgs{{(a|m&?W>jh}0i>QtDnyn0_n);PUNH_xLM{O`34PHB=|k3` zHQ!5asK_LMjJt;j$~)PKHUqeJKN4%bJ8QbYtD3= z4hrzDLLaCK?0kWzPQNyNE5f@;)sU55oYdwe_a zoz~+*5dl&FL1QH76)3VYY+8H#(;s!hFZ|RkU-;^+AN*YB;_Fw}G%zI!=*Z+mRjt|A zpMBLoTy@2DcYVPxY0XUNghlTf%QW4dNd^SnF==7*rZawP@AOFrx4&a`JOC*tIut-i zO@=#RHKUQ5yp;z|dslHTr2=X^OO3qtOiEXL3{|xyB#o7GIZqJQf-OvJojCT#HXr+w zHhr3Nf?yEDN>xo5L_jU6!C}v8yA1PKF7OQuZOi>(M&c>Uj;(h@G9`)be%Q^{W$qwd zIa+g6)*Bwlc2}a#mOIBCs(X9BcF|2A{j9!t$;@mW`mw6zj3zkHaYT9_Hh13Xr@rc0 z)d^crg${xniJ`1kx-OqRxbwdcT=J&rg_|8N?~4`$2L}7I1A+rtJ!&Of0t9O)09azh zr2(Qes;HX2+v2l6A`?0+0-3qQFywvN52Q!=U*eJ(YZ|4E>_Y^DNUDYcOzsoCwfCGk z?xCxXd9Zah0n=rF7)k9f7Ou1p25O(zw=DmSk33d(VLytze_<>wX+o7xZu?Reid*8K z<}dY5x1TW!$&ubyAmT)K!;hT1>AbUNue<4nfBEd5kAG>+jsw#UbHP{#m|F*!{GTf? zddAC7dHJ(fKlm&UMzCF5Q>!*Ttb6ZuJHGXn)pM7))t%b=p@zDpl!wsh%U$iUf(y=n zLmbtK}P zh+5-q!?HVkdty3<5KvIgtavH2v$tm|M|Ur5m6oM&Dwgf$M|f%qp1dm!zHzi_JjMG2tUqCk;=LZX;i{enmmU|HQ|4W#j7`E}! zza7EN0w&_rNt@37{oR*ru5bQ0c@Q!HMh+uyE%RxRCOIe|0%5Rv0R$-l(1_rJX+&67 zNj4Id?nya$-5MF-?{y>tJyADF-TpSr$Bm3R6bhsl7< z`6j_=0!qDgA}4JPB8$vK$wWg10A^rQx0BZQg80^5Chh515=f4z?Q55}vO>F2^V9Wvz6XKWzylQ=lj1 zCdT0B+Da`)!GWdR^ClFiSyecAmyrRv&HXta+9qy*iUEOBO- z>`ZjmZGGIswmkaB;w^h``_$L={?|9{;_pqkPk!~XE1&UaC%p0*tM7exBa~$z7csCz zGIZ9hKjrBMr?wot{5{=;Tiu+Hz(Qc0(gWf`+xNGrXNzab$gs$e{r>EAH}C%J z*AIUBo7jG9J=NXvJ5Sj3*awHuN$@-xwg4ozXwf3-x&2?+^R2&`n7=H5Vn80~Do9vj zJC$+B7`X^lFL%$Pl#xpS`2Yv*Qsp7S9#e~>zctIgYshzNL2Dc%70#O~M<<4kPaNCb zc;D%5_n%yUcZH2a5CnC_+5mu%fmvJ2a!xDjyW2lVavAxy#&lO1hns9asPV0jWeP@e z9F{cysX8+1h@ARk&UwQQ0rQ3IY z{+l=a^QU&+c>NjAdU9uS0)z-?@EB`jC36B+O|8D)#Cda%7Qc^nQ*d0gMXdU6{)9B#tT29mz*epmP5&{01q$*- zEPm3Nnl%7)ffVxzaS}iPT#BF|E*bzpYRFhhVVndgND=`OfSApIRzht^fKA*`!h~9s zDFBERDN__ssdDrpEo!3X3|F5tz4^Sb>6}$-PnE4TGpJi4xOpJnI3%O2QX8@6R8M(> zlo9;fQ0j4vz-7BaxdTVV%d!1rRx9o8K7RP(VS0H-Umz&QOcXemLXybBkuxUWkvBK^ zo>fz8&pKtbyPE~IuHP(Z$|p|Ubmng#=x*z6|L4HHfr60YxR8y-NO734)Inh>un@C; zmUzH2Ut-{jimH+idQf0Z89=u>$U7cH)@Bsv!elSpWn%T26Wh+-uL!P#ZfRRoZ zoFLkkN)!y{k_D`p-uiPkeeBL}y=8L#Qj$7dC2~%FxEjPHNCS7V_d>$8OW`&$hjtWMAq>__IRy5V|x8*lWR{`XCtjc1fl~3vaNSx`=f|t`$(PkQgykNGDbiPJHcq4 zvg7Z9ysE-1738OtK9(*8E()NZ$1#J=$-s{C5Vmte;1 zPa?RmneNo-TkrcPJ1=>M-~1njUKb0_AXsoYF`^V$Ft?=do~ef#e6Ez>nn_!%$=QhP zrIOa(k#tZ(5QG$j+yMo1!7EjwMFvOkh}c=j=@YtZj-OtC#`;w!RI5(#Fhd1H2MPcL z5=_bY+pqujc{>i5j94(w7P?wCv6d6f7F0crpT_ch&8t0(Dx($U^xJ3dqPq`!co|MU zg%fxjBHl6ty#1+r*bxB>`u*`w@*L>vQ)$i;2 z0W(QXnuefM2peCM9MKBzN3I_=IZFP}B%Ufb$ce(s#J&;DI| z?e)JX6@?U5n`{SrYiom!Y@V%m<^?Nph4U+aiJS$$*3C#gRy>S)Gx%NM3?9hd$~u1e z7`6cg1G0c}nfu?yw|pUDM4PpDnoMNmiWLZfiqx#GS-#~LWM$tQhaMi!j#QY=(l{y1 z0kAYoym%TAriXy-76OsTGk_o7kppH>hnNWdZjLCI*`KEe#@AAt2w0uLg zdUL(9r>b`eMuH`gks^!K6pAo0iBKhxBasH82@oNX+sw_Lb41bz!5hdk>?o#Ru|Y7+ zEtk5D78e-56TqTerd(D)l3JWa zN+lR=U_ZwzRdo%k}wNcN*$n=mQ|W|I+4@{wf3^xS@1 zBIT0Nv<*_7rmKVU^eH7{xdMIw=2qE_#2Ft)*5(1TaTylJ3&;ERt3b7oF`U7Tl_Q}L zI-51GA3yZ4y!wwzx-m<2!a3z=YkPl`&%3A25YduMK$im$X!kAwx?4P&UH{{I4}4xQ z7Em@b<_l#egdkA>0wNS%2y682q|{atuZbJt%Gi zi5q%QJ7-xuJ7H@|nA{CP~-hab; z?>)Bi_Vwox1*!)5}Eud|OV5N!3*vz^m+*BHm-~3IIPzZzs+sg@;6&xCf zsYFSJ&y8WBUk)iA04A8ekmPV&vVnYzHL>4D%r~3_8C2EJG{sGw;tGROo@>nC%MA>F z8UwO*%RbufXfc`2>{-|pQ&F0Ha{W91aQfMYCmSaeIT3;?kt$+v03>@M7l8;wNs4F! zgcJ&)U5IM-+`Z?nPXoJvWbiSbPUFIvxJ=(RptK2myB(WRu)Xnn_#fCtM;FyE`tcF~ zBPrORjGX~se)kAMa3Nz}Z8TvY9y{K&-y5JGPZ#Gu=Bee67+D()4n3W?_jR0itkUYr zzMmT{?>q602bQNVY}8GawMk9VQ8YzK`3hM?sJRVERKj_isn_rM31FT9-y@kSY@);# zHx1x5;<~O#8rMdEdx&xs5fwLp5)~K#=8%ZEt$_Rh=a+3oVs{wT{GI^|lh_Q;SS&W( zXxzclHX?*jL}omzXTQDn#3Re6{w=ZEkd#CX4!{69X}tCK=4DMNoycuIS2bd+)q=?ddO7um2r5 zlX&#+axq!8S;ioZh@ikw0BD+37`Qt^7pIgAd%i#lm$~|^*rrPMi7s!N-rlAFJlV^; zeMR_7TS@5;1PLIp5vC`w4H>qyuDsk-)d&#e&opT)R#EwCE{3c9oG%`O+4r#R5g#`I zs*UQ2lZPLjz4+&2U7yv{I*$kuvf~~MKu9UVxTVe%x)i;ZxdG70<=eJHQ9wi!_x;Ru zcYX?36{a{h8OO-R=@|6Bx8?7qRcN`hno{x$lecQwb2{vS^)~G@6qyFcvyHm_egIJS z6Qh$f=y9h8+^!I(Ixl~mzF3#zxRqfKGvCk_ry8O*IiOLdkXt5H`Eyq| z4N(leagTJ5exbbml|M=Z04Kk0w;Y#b7?+dts;lFHk^O4MlAoRT>=VgsM8h2!L(P-Gd5PtZUUm z24KhdTx`D3vc65Z^q|hR3-t(?xgqCXIr7XO<9{Eokt;SiiD+GR>s*tqJXb3SnIS@E zqQ?H8-~EAm6PCzyX|q6=A1Me`!^*`h&K-9S|6Bp)%z1!)2NEJH7>bztpHr z0BC0fi|5bsXMT5T!>^_oqH*|$-!Xn0DqHsx9awzbGO9G5Exqf8+kfNKxAsn7dTb(q zf}#WMI0OPHiN20ka5Kb1czu(M2Q{-DFe4g%ht$Oi>gUvs4#&p_PF9p8viH&UhJ&g` ze3VLX4*;dr_|;*_V{hb~yCdnL?wwD{iFjnty(uY`jie(2@6szNkV`^il|DJZu;o1_y1P;URPi{W?^g~PMUKmIbk^}-!nNqT8WV?$t9>{1XrG;#TY+gEIAi zuI=(_bY%M+82xVVy4jm$z`1cKhRfeT(&Y) zNzxBk{X?~>IY?I2Y);P3{`cCGf4Fk;8O039l$w$uQm_T=HfeDiBH)|j^$1At(yW@9{9Q*=~JU*r* zYzQj~hzzMoUUFQ*@27CN=}-nx;F(LrTOuAX$0O1S8Yy{bQsZ|Voa`QhH?i#%Z1)>V zcN0IAc1>Fc;3vL}o5zR-VF*JV7BYbt8jCCbc!YioXuL9dF}5{=xNTXj=PLuV05GMi zBFGeqB6DOX3Z1&P)iPMS-umP(rE&$S6#?~i*%$vcAP@m@jJ|VIqf=fL(evIJZ z-t3?{MhnJ=1|Mldj{S*8B;DIQ#E38J3{V;m!fdZeIZjVI&h(#=aG`aNFvW(%)449j z88F@&OrnPhj?0VUENalje}L!$E(K;;>^&VI)sJ0)!jv|DRjjYyaqF*c?AU+m&=)3h zrs6rsn4$t9iB5EWTY5_}Apz3KJS-Gc)rb^b^}ed|4t2?Y+M=|X?;(`TcS zxSrWI@ihL7*p~;44y;!;diB@0`*Nza{nYRf>43b^K#eB+z1r5|MIEb}-NIrGg#32R z+cL0z+7w$kXs>>>uIv$0xImT&JX(r50&TgJ3dVtqmwQ z^A)6V{qB!nd&|dwr64!cmX>|#aa#;$ZfCpwlXQ@oQ5&HEE{gVRqiLJr^XDh%q3`UG zBF*53>g`2&DJ&eIg&L>KheY)d) z`Q-}nu5EplU;N@E;q7K4$zttd_)Z%-fKai|YS8F2{;gkkT`1{7a8RRu2yEi0 zHoo$4*f36?y~V+i`|HQ~RFZQDe&sRP^($kHSU}Mc+wxq0-Wz70-k;<9*Ag&!LCGQcMx&;H09y&LURpUgxQ$OTM(LPW1T#J zY?31z5)u_!H{)vcNA}$D>7!3QBy0c7R8`ufO^{#`*tx*~2oh2PKtWq6lBp_~XeB^` z45OnO-nZNi^#j1kt=g*{4+&>OMIgtCFE)`qj5f=(s>X3yY4z{P$m6GoopCe#7V&Ws~?6NmqF`tqNRxh`ok z(y6kh5CA3ddsPBSxH(p3M!odDdspA_QJ@m0qA$zZ%`a=Vch*U{`$eOE7P$wsM?B2C zLo$Zp4Zu_zvttpuDO2WTaS*X$5~a78py4K7%61|Hkp7P z8NmYDsStl)NY#UZi0v`N6&R7Ys`2_B3$7l1KL!~T7b*AP=b)!^f;;wn>PrdhOB z5Gl!I`TCz6ePGYAr~Y7a`d~^zQnxVgAyotdAhjGoU{8IN^3WNkB00hWDM$lZBx5CR zD-{W{JAm9GP$E`9LEtthI6n^&A4Y)oi~?EE(k4FrVC996)28_R>KsZYsb~Ld?a+hE zNB*@*np8#7_qL;L#Gx#cmG}S7j+^cVCM1gXqf0&X78nDz6nn7}j~8U0AbXC*0Icc9 z!&>-1v=Ly0oAl46HX1Ub+!=0-PG{)I-3Xwt%yGr1jiBsE@HHDcgU&1g?jfpO7#<}J zFWY4e%yi|Q*WZ5Msb{~KU-^q9TI_qbGZCazE7;|#H0qC{*>c?9< za91Dmf5&KJ7hasVR@H$B!1&eb4@4d>k?2Ey57j_8EyM4@rJr|MVa>S=pS|Fs0#+g? zLXg-rjjW$}`L8w(|H&vHg``r!L$cAy>Ycy0a^p{lOl&eaeu(12BG21FhiyZ0pdGK% z0LkIp+)84!ANlW@<3MM&y6?XG<}7#fcVW9u1Tri~;od&&9&fB3F;&9&F1|sFZ_{s$ z>IN8yR2f=Hm87n(eP_Dnht94&Q|pcNVsA>kwRruwFa*HR zOIP$|L7+$6(*1T@&P#{`uuIS|)RrT)Z-WD{*+z%WKq!f>ZR8uIgZxy0b82=k9_x{a zfATyS2wPjgOsO=Vn4Wq5Xo1l|PM6}VB)dQbh+Vf#ZC6WxuLm=B)2=&B4 z+Tcws6tj5Y(&7Qkz5wi-FM#v|x<*x9?2_4Gb^y#R;Ozj59#Z!D{oofaL-6xSaan;~ zC!}el2sDdApt^zCspHRmIluguHKqvyp;8-Nz2?CElRZBHR8+BzOajv%)--nhsWReX z`Z#P1By0>MZ&nuA~@4A0-!-tFuAOL2AD62@$SvC@%MZEn% z369`o<5~xjUQA_I%Q*+9>Ng^`wd!;83oD*`cg~5PB2gR+8g1hzU9J&pj+mlAqCJ03amZ7JSv`Dcl>dYs>;X(AVx zJ$IXavX{5L06LY*-{=pb?EKzg0rzE>OWrI+A0pvW(c~*?a+GT4ybeeVJ&1SWke)oUP ztU32TuRs0y)2E-&ypc8MJk9yL)y~)KF2$_9liqU5F7(}QrRGL2zSJK0>=t`vS}3yN z#J*^|z+ArSJSjTex|HKA&wF#elw&UOKzTsyThNIojg>K;m*3hKNjqj^rWYN<{{cYN V2f|i$C5He2002ovPDHLkV1f -u -p < db/init.sql + +CREATE TABLE IF NOT EXISTS workspaces ( + id INT AUTO_INCREMENT PRIMARY KEY, + team_id VARCHAR(100) UNIQUE NOT NULL, + workspace_name VARCHAR(100), + bot_token VARCHAR(256) NOT NULL, + deleted_at DATETIME DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +CREATE TABLE IF NOT EXISTS instance_keys ( + id INT AUTO_INCREMENT PRIMARY KEY, + public_key TEXT NOT NULL, + private_key_encrypted TEXT NOT NULL, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +CREATE TABLE IF NOT EXISTS federated_workspaces ( + id INT AUTO_INCREMENT PRIMARY KEY, + instance_id VARCHAR(64) NOT NULL UNIQUE, + webhook_url VARCHAR(500) NOT NULL, + public_key TEXT NOT NULL, + status VARCHAR(20) NOT NULL DEFAULT 'active', + name VARCHAR(200) DEFAULT NULL, + primary_team_id VARCHAR(100) DEFAULT NULL, + primary_workspace_name VARCHAR(100) DEFAULT NULL, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +CREATE TABLE IF NOT EXISTS workspace_groups ( + id INT AUTO_INCREMENT PRIMARY KEY, + name VARCHAR(100) NOT NULL, + invite_code VARCHAR(20) NOT NULL UNIQUE, + status VARCHAR(20) NOT NULL DEFAULT 'active', + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + created_by_workspace_id INT NOT NULL, + FOREIGN KEY (created_by_workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +CREATE TABLE IF NOT EXISTS workspace_group_members ( + id INT AUTO_INCREMENT PRIMARY KEY, + group_id INT NOT NULL, + workspace_id INT DEFAULT NULL, + federated_workspace_id INT DEFAULT NULL, + status VARCHAR(20) NOT NULL DEFAULT 'active', + role VARCHAR(20) NOT NULL DEFAULT 'member', + joined_at DATETIME DEFAULT NULL, + deleted_at DATETIME DEFAULT NULL, + dm_messages TEXT DEFAULT NULL, + FOREIGN KEY (group_id) REFERENCES workspace_groups(id) ON DELETE CASCADE, + FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE, + FOREIGN KEY (federated_workspace_id) REFERENCES federated_workspaces(id) ON DELETE SET NULL, + UNIQUE KEY uq_group_workspace (group_id, workspace_id) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +CREATE TABLE IF NOT EXISTS syncs ( + id INT AUTO_INCREMENT PRIMARY KEY, + title VARCHAR(100) NOT NULL, + description VARCHAR(100), + group_id INT DEFAULT NULL, + sync_mode VARCHAR(20) NOT NULL DEFAULT 'group', + target_workspace_id INT DEFAULT NULL, + publisher_workspace_id INT DEFAULT NULL, + FOREIGN KEY (group_id) REFERENCES workspace_groups(id) ON DELETE CASCADE, + FOREIGN KEY (target_workspace_id) REFERENCES workspaces(id) ON DELETE SET NULL, + FOREIGN KEY (publisher_workspace_id) REFERENCES workspaces(id) ON DELETE SET NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +CREATE TABLE IF NOT EXISTS sync_channels ( + id INT AUTO_INCREMENT PRIMARY KEY, + sync_id INT NOT NULL, + workspace_id INT NOT NULL, + channel_id VARCHAR(100) NOT NULL, + status VARCHAR(20) NOT NULL DEFAULT 'active', + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + deleted_at DATETIME DEFAULT NULL, + FOREIGN KEY (sync_id) REFERENCES syncs(id) ON DELETE CASCADE, + FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +CREATE TABLE IF NOT EXISTS post_meta ( + id INT AUTO_INCREMENT PRIMARY KEY, + post_id VARCHAR(100) NOT NULL, + sync_channel_id INT NOT NULL, + ts DECIMAL(16, 6) NOT NULL, + FOREIGN KEY (sync_channel_id) REFERENCES sync_channels(id) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +CREATE TABLE IF NOT EXISTS user_directory ( + id INT AUTO_INCREMENT PRIMARY KEY, + workspace_id INT NOT NULL, + slack_user_id VARCHAR(100) NOT NULL, + email VARCHAR(320) DEFAULT NULL, + real_name VARCHAR(200) DEFAULT NULL, + display_name VARCHAR(200) DEFAULT NULL, + normalized_name VARCHAR(200) DEFAULT NULL, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + deleted_at DATETIME DEFAULT NULL, + FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE, + UNIQUE KEY uq_workspace_user (workspace_id, slack_user_id) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +CREATE TABLE IF NOT EXISTS user_mappings ( + id INT AUTO_INCREMENT PRIMARY KEY, + source_workspace_id INT NOT NULL, + source_user_id VARCHAR(100) NOT NULL, + target_workspace_id INT NOT NULL, + target_user_id VARCHAR(100) DEFAULT NULL, + match_method VARCHAR(20) NOT NULL DEFAULT 'none', + source_display_name VARCHAR(200) DEFAULT NULL, + matched_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + group_id INT DEFAULT NULL, + FOREIGN KEY (source_workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE, + FOREIGN KEY (target_workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE, + FOREIGN KEY (group_id) REFERENCES workspace_groups(id) ON DELETE CASCADE, + UNIQUE KEY uq_source_target (source_workspace_id, source_user_id, target_workspace_id) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +-- Indexes +CREATE INDEX idx_sync_channels_channel_id ON sync_channels(channel_id); +CREATE INDEX idx_sync_channels_sync_id ON sync_channels(sync_id); +CREATE INDEX idx_sync_channels_workspace_id ON sync_channels(workspace_id); +CREATE INDEX idx_sync_channels_deleted_at ON sync_channels(deleted_at); +CREATE INDEX idx_post_meta_ts ON post_meta(ts); +CREATE INDEX idx_post_meta_post_id ON post_meta(post_id); +CREATE INDEX idx_workspaces_team_id ON workspaces(team_id); +CREATE INDEX idx_user_dir_email ON user_directory(workspace_id, email); +CREATE INDEX idx_user_dir_normalized ON user_directory(workspace_id, normalized_name); +CREATE INDEX idx_user_mappings_target ON user_mappings(target_workspace_id, match_method); +CREATE INDEX idx_groups_code ON workspace_groups(invite_code, status); +CREATE INDEX idx_group_members_group ON workspace_group_members(group_id, status); +CREATE INDEX idx_group_members_workspace ON workspace_group_members(workspace_id, status); +CREATE INDEX idx_syncs_group ON syncs(group_id); diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..cbc2780 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,43 @@ +services: + db: + image: mysql:8 + environment: + MYSQL_ROOT_PASSWORD: rootpass + MYSQL_ROOT_HOST: "%" # allow root from host (e.g. mysql -h 127.0.0.1 -P 3306 -u root -p) + MYSQL_DATABASE: syncbot + ports: + - "3306:3306" + volumes: + - syncbot-db:/var/lib/mysql + - ./db/init.sql:/docker-entrypoint-initdb.d/01-init.sql:ro + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost"] + interval: 5s + timeout: 5s + retries: 10 + + app: + build: . + ports: + - "3000:3000" + depends_on: + db: + condition: service_healthy + environment: + # Database + DATABASE_HOST: db + ADMIN_DATABASE_USER: root + ADMIN_DATABASE_PASSWORD: rootpass + ADMIN_DATABASE_SCHEMA: syncbot + # Slack — replace with your values or use a .env file + SLACK_BOT_TOKEN: ${SLACK_BOT_TOKEN:-xoxb-your-bot-token} + # Optional + PASSWORD_ENCRYPT_KEY: ${PASSWORD_ENCRYPT_KEY:-123} + REQUIRE_ADMIN: ${REQUIRE_ADMIN:-true} + AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-} + AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-} + volumes: + - ./syncbot:/app/syncbot + +volumes: + syncbot-db: diff --git a/poetry.lock b/poetry.lock index aca34c8..fa8e085 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,43 +1,4 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. - -[[package]] -name = "appnope" -version = "0.1.3" -description = "Disable App Nap on macOS >= 10.9" -optional = false -python-versions = "*" -files = [ - {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"}, - {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"}, -] - -[[package]] -name = "asttokens" -version = "2.4.0" -description = "Annotate AST trees with source code positions" -optional = false -python-versions = "*" -files = [ - {file = "asttokens-2.4.0-py2.py3-none-any.whl", hash = "sha256:cf8fc9e61a86461aa9fb161a14a0841a03c405fa829ac6b202670b3495d2ce69"}, - {file = "asttokens-2.4.0.tar.gz", hash = "sha256:2e0171b991b2c959acc6c49318049236844a5da1d65ba2672c4880c1c894834e"}, -] - -[package.dependencies] -six = ">=1.12.0" - -[package.extras] -test = ["astroid", "pytest"] - -[[package]] -name = "backcall" -version = "0.2.0" -description = "Specifications for callback functions passed in to an API" -optional = false -python-versions = "*" -files = [ - {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"}, - {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"}, -] +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "boto3" @@ -45,6 +6,7 @@ version = "1.28.60" description = "The AWS SDK for Python" optional = false python-versions = ">= 3.7" +groups = ["dev"] files = [ {file = "boto3-1.28.60-py3-none-any.whl", hash = "sha256:d5f270c2c9a051f78c308cbba4268458e8df441057b73ba140742707ac1bc7ea"}, {file = "boto3-1.28.60.tar.gz", hash = "sha256:dccb49cc10b31314b8553c6c9614c44b2249e0d0285d73f608a5d2010f6e1d82"}, @@ -64,6 +26,7 @@ version = "1.31.60" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">= 3.7" +groups = ["dev"] files = [ {file = "botocore-1.31.60-py3-none-any.whl", hash = "sha256:b6de7a6a03ca3da18b78615a2cb5221c9fdb9483d3f50cb4281ae038b3f22d9f"}, {file = "botocore-1.31.60.tar.gz", hash = "sha256:578470a15a5bd64f67437a81f23feccba85084167acf63c56acada2c1c1d95d8"}, @@ -83,6 +46,7 @@ version = "2023.7.22" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, @@ -90,67 +54,101 @@ files = [ [[package]] name = "cffi" -version = "1.16.0" +version = "2.0.0" description = "Foreign Function Interface for Python calling C code." optional = false -python-versions = ">=3.8" -files = [ - {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, - {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, - {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, - {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, - {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, - {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, - {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, - {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, - {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, - {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, - {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, - {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, - {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, - {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, - {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, +python-versions = ">=3.9" +groups = ["main"] +markers = "platform_python_implementation != \"PyPy\"" +files = [ + {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"}, + {file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb"}, + {file = "cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a"}, + {file = "cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743"}, + {file = "cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5"}, + {file = "cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5"}, + {file = "cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187"}, + {file = "cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18"}, + {file = "cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5"}, + {file = "cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b"}, + {file = "cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27"}, + {file = "cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75"}, + {file = "cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1"}, + {file = "cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f"}, + {file = "cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25"}, + {file = "cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4"}, + {file = "cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e"}, + {file = "cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6"}, + {file = "cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322"}, + {file = "cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a"}, + {file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"}, + {file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"}, ] [package.dependencies] -pycparser = "*" +pycparser = {version = "*", markers = "implementation_name != \"PyPy\""} [[package]] name = "charset-normalizer" @@ -158,6 +156,7 @@ version = "3.3.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" +groups = ["main"] files = [ {file = "charset-normalizer-3.3.0.tar.gz", hash = "sha256:63563193aec44bce707e0c5ca64ff69fa72ed7cf34ce6e11d5127555756fd2f6"}, {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:effe5406c9bd748a871dbcaf3ac69167c38d72db8c9baf3ff954c344f31c4cbe"}, @@ -257,133 +256,93 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] +markers = "sys_platform == \"win32\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -[[package]] -name = "comm" -version = "0.1.4" -description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." -optional = false -python-versions = ">=3.6" -files = [ - {file = "comm-0.1.4-py3-none-any.whl", hash = "sha256:6d52794cba11b36ed9860999cd10fd02d6b2eac177068fdd585e1e2f8a96e67a"}, - {file = "comm-0.1.4.tar.gz", hash = "sha256:354e40a59c9dd6db50c5cc6b4acc887d82e9603787f83b68c01a80a923984d15"}, -] - -[package.dependencies] -traitlets = ">=4" - -[package.extras] -lint = ["black (>=22.6.0)", "mdformat (>0.7)", "mdformat-gfm (>=0.3.5)", "ruff (>=0.0.156)"] -test = ["pytest"] -typing = ["mypy (>=0.990)"] - [[package]] name = "cryptography" -version = "41.0.4" +version = "46.0.5" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false -python-versions = ">=3.7" -files = [ - {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839"}, - {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143"}, - {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397"}, - {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860"}, - {file = "cryptography-41.0.4-cp37-abi3-win32.whl", hash = "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd"}, - {file = "cryptography-41.0.4-cp37-abi3-win_amd64.whl", hash = "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311"}, - {file = "cryptography-41.0.4.tar.gz", hash = "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a"}, +python-versions = "!=3.9.0,!=3.9.1,>=3.8" +groups = ["main"] +files = [ + {file = "cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0"}, + {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731"}, + {file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82"}, + {file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1"}, + {file = "cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48"}, + {file = "cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4"}, + {file = "cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0"}, + {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663"}, + {file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826"}, + {file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d"}, + {file = "cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a"}, + {file = "cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4"}, + {file = "cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d"}, + {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c"}, + {file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4"}, + {file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9"}, + {file = "cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72"}, + {file = "cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595"}, + {file = "cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c"}, + {file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a"}, + {file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356"}, + {file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da"}, + {file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257"}, + {file = "cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7"}, + {file = "cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d"}, ] [package.dependencies] -cffi = ">=1.12" +cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9.0\" and platform_python_implementation != \"PyPy\""} [package.extras] -docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] -docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] -nox = ["nox"] -pep8test = ["black", "check-sdist", "mypy", "ruff"] -sdist = ["build"] +docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"] +docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"] +nox = ["nox[uv] (>=2024.4.15)"] +pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"] +sdist = ["build (>=1.0.0)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi (>=2024)", "cryptography-vectors (==46.0.5)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] test-randomorder = ["pytest-randomly"] -[[package]] -name = "debugpy" -version = "1.8.0" -description = "An implementation of the Debug Adapter Protocol for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "debugpy-1.8.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:7fb95ca78f7ac43393cd0e0f2b6deda438ec7c5e47fa5d38553340897d2fbdfb"}, - {file = "debugpy-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef9ab7df0b9a42ed9c878afd3eaaff471fce3fa73df96022e1f5c9f8f8c87ada"}, - {file = "debugpy-1.8.0-cp310-cp310-win32.whl", hash = "sha256:a8b7a2fd27cd9f3553ac112f356ad4ca93338feadd8910277aff71ab24d8775f"}, - {file = "debugpy-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:5d9de202f5d42e62f932507ee8b21e30d49aae7e46d5b1dd5c908db1d7068637"}, - {file = "debugpy-1.8.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:ef54404365fae8d45cf450d0544ee40cefbcb9cb85ea7afe89a963c27028261e"}, - {file = "debugpy-1.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60009b132c91951354f54363f8ebdf7457aeb150e84abba5ae251b8e9f29a8a6"}, - {file = "debugpy-1.8.0-cp311-cp311-win32.whl", hash = "sha256:8cd0197141eb9e8a4566794550cfdcdb8b3db0818bdf8c49a8e8f8053e56e38b"}, - {file = "debugpy-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:a64093656c4c64dc6a438e11d59369875d200bd5abb8f9b26c1f5f723622e153"}, - {file = "debugpy-1.8.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:b05a6b503ed520ad58c8dc682749113d2fd9f41ffd45daec16e558ca884008cd"}, - {file = "debugpy-1.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c6fb41c98ec51dd010d7ed650accfd07a87fe5e93eca9d5f584d0578f28f35f"}, - {file = "debugpy-1.8.0-cp38-cp38-win32.whl", hash = "sha256:46ab6780159eeabb43c1495d9c84cf85d62975e48b6ec21ee10c95767c0590aa"}, - {file = "debugpy-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:bdc5ef99d14b9c0fcb35351b4fbfc06ac0ee576aeab6b2511702e5a648a2e595"}, - {file = "debugpy-1.8.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:61eab4a4c8b6125d41a34bad4e5fe3d2cc145caecd63c3fe953be4cc53e65bf8"}, - {file = "debugpy-1.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:125b9a637e013f9faac0a3d6a82bd17c8b5d2c875fb6b7e2772c5aba6d082332"}, - {file = "debugpy-1.8.0-cp39-cp39-win32.whl", hash = "sha256:57161629133113c97b387382045649a2b985a348f0c9366e22217c87b68b73c6"}, - {file = "debugpy-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:e3412f9faa9ade82aa64a50b602544efcba848c91384e9f93497a458767e6926"}, - {file = "debugpy-1.8.0-py2.py3-none-any.whl", hash = "sha256:9c9b0ac1ce2a42888199df1a1906e45e6f3c9555497643a85e0bf2406e3ffbc4"}, - {file = "debugpy-1.8.0.zip", hash = "sha256:12af2c55b419521e33d5fb21bd022df0b5eb267c3e178f1d374a63a2a6bdccd0"}, -] - -[[package]] -name = "decorator" -version = "5.1.1" -description = "Decorators for Humans" -optional = false -python-versions = ">=3.5" -files = [ - {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, - {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, -] - -[[package]] -name = "executing" -version = "2.0.0" -description = "Get the currently executing AST node of a frame, and other information" -optional = false -python-versions = "*" -files = [ - {file = "executing-2.0.0-py2.py3-none-any.whl", hash = "sha256:06df6183df67389625f4e763921c6cf978944721abf3e714000200aab95b0657"}, - {file = "executing-2.0.0.tar.gz", hash = "sha256:0ff053696fdeef426cda5bd18eacd94f82c91f49823a2e9090124212ceea9b08"}, -] - -[package.extras] -tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] - [[package]] name = "greenlet" version = "3.0.0" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" +groups = ["main"] +markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\"" files = [ {file = "greenlet-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e09dea87cc91aea5500262993cbd484b41edf8af74f976719dd83fe724644cd6"}, {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f47932c434a3c8d3c86d865443fadc1fbf574e9b11d6650b656e602b1797908a"}, @@ -459,464 +418,232 @@ version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" +groups = ["main"] files = [ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, ] [[package]] -name = "ipykernel" -version = "6.25.2" -description = "IPython Kernel for Jupyter" -optional = false -python-versions = ">=3.8" -files = [ - {file = "ipykernel-6.25.2-py3-none-any.whl", hash = "sha256:2e2ee359baba19f10251b99415bb39de1e97d04e1fab385646f24f0596510b77"}, - {file = "ipykernel-6.25.2.tar.gz", hash = "sha256:f468ddd1f17acb48c8ce67fcfa49ba6d46d4f9ac0438c1f441be7c3d1372230b"}, -] - -[package.dependencies] -appnope = {version = "*", markers = "platform_system == \"Darwin\""} -comm = ">=0.1.1" -debugpy = ">=1.6.5" -ipython = ">=7.23.1" -jupyter-client = ">=6.1.12" -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -matplotlib-inline = ">=0.1" -nest-asyncio = "*" -packaging = "*" -psutil = "*" -pyzmq = ">=20" -tornado = ">=6.1" -traitlets = ">=5.4.0" - -[package.extras] -cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] -pyqt5 = ["pyqt5"] -pyside6 = ["pyside6"] -test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio", "pytest-cov", "pytest-timeout"] - -[[package]] -name = "ipython" -version = "8.16.1" -description = "IPython: Productive Interactive Computing" +name = "iniconfig" +version = "2.3.0" +description = "brain-dead simple config-ini parsing" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" +groups = ["dev"] files = [ - {file = "ipython-8.16.1-py3-none-any.whl", hash = "sha256:0852469d4d579d9cd613c220af7bf0c9cc251813e12be647cb9d463939db9b1e"}, - {file = "ipython-8.16.1.tar.gz", hash = "sha256:ad52f58fca8f9f848e256c629eff888efc0528c12fe0f8ec14f33205f23ef938"}, + {file = "iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12"}, + {file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"}, ] -[package.dependencies] -appnope = {version = "*", markers = "sys_platform == \"darwin\""} -backcall = "*" -colorama = {version = "*", markers = "sys_platform == \"win32\""} -decorator = "*" -jedi = ">=0.16" -matplotlib-inline = "*" -pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} -pickleshare = "*" -prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0" -pygments = ">=2.4.0" -stack-data = "*" -traitlets = ">=5" - -[package.extras] -all = ["black", "curio", "docrepr", "exceptiongroup", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] -black = ["black"] -doc = ["docrepr", "exceptiongroup", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] -kernel = ["ipykernel"] -nbconvert = ["nbconvert"] -nbformat = ["nbformat"] -notebook = ["ipywidgets", "notebook"] -parallel = ["ipyparallel"] -qtconsole = ["qtconsole"] -test = ["pytest (<7.1)", "pytest-asyncio", "testpath"] -test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"] - -[[package]] -name = "jedi" -version = "0.19.1" -description = "An autocompletion tool for Python that can be used for text editors." -optional = false -python-versions = ">=3.6" -files = [ - {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, - {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, -] - -[package.dependencies] -parso = ">=0.8.3,<0.9.0" - -[package.extras] -docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] -qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] -testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] - [[package]] name = "jmespath" version = "1.0.1" description = "JSON Matching Expressions" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, ] -[[package]] -name = "jupyter-client" -version = "8.3.1" -description = "Jupyter protocol implementation and client libraries" -optional = false -python-versions = ">=3.8" -files = [ - {file = "jupyter_client-8.3.1-py3-none-any.whl", hash = "sha256:5eb9f55eb0650e81de6b7e34308d8b92d04fe4ec41cd8193a913979e33d8e1a5"}, - {file = "jupyter_client-8.3.1.tar.gz", hash = "sha256:60294b2d5b869356c893f57b1a877ea6510d60d45cf4b38057f1672d85699ac9"}, -] - -[package.dependencies] -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -python-dateutil = ">=2.8.2" -pyzmq = ">=23.0" -tornado = ">=6.2" -traitlets = ">=5.3" - -[package.extras] -docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] -test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] - -[[package]] -name = "jupyter-core" -version = "5.3.2" -description = "Jupyter core package. A base package on which Jupyter projects rely." -optional = false -python-versions = ">=3.8" -files = [ - {file = "jupyter_core-5.3.2-py3-none-any.whl", hash = "sha256:a4af53c3fa3f6330cebb0d9f658e148725d15652811d1c32dc0f63bb96f2e6d6"}, - {file = "jupyter_core-5.3.2.tar.gz", hash = "sha256:0c28db6cbe2c37b5b398e1a1a5b22f84fd64cd10afc1f6c05b02fb09481ba45f"}, -] - -[package.dependencies] -platformdirs = ">=2.5" -pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} -traitlets = ">=5.3" - -[package.extras] -docs = ["myst-parser", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] -test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] - -[[package]] -name = "matplotlib-inline" -version = "0.1.6" -description = "Inline Matplotlib backend for Jupyter" -optional = false -python-versions = ">=3.5" -files = [ - {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, - {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"}, -] - -[package.dependencies] -traitlets = "*" - -[[package]] -name = "nest-asyncio" -version = "1.5.8" -description = "Patch asyncio to allow nested event loops" -optional = false -python-versions = ">=3.5" -files = [ - {file = "nest_asyncio-1.5.8-py3-none-any.whl", hash = "sha256:accda7a339a70599cb08f9dd09a67e0c2ef8d8d6f4c07f96ab203f2ae254e48d"}, - {file = "nest_asyncio-1.5.8.tar.gz", hash = "sha256:25aa2ca0d2a5b5531956b9e273b45cf664cae2b145101d73b86b199978d48fdb"}, -] - [[package]] name = "packaging" version = "23.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, ] -[[package]] -name = "parso" -version = "0.8.3" -description = "A Python Parser" -optional = false -python-versions = ">=3.6" -files = [ - {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, - {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, -] - -[package.extras] -qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] -testing = ["docopt", "pytest (<6.0.0)"] - -[[package]] -name = "pexpect" -version = "4.8.0" -description = "Pexpect allows easy control of interactive console applications." -optional = false -python-versions = "*" -files = [ - {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"}, - {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"}, -] - -[package.dependencies] -ptyprocess = ">=0.5" - -[[package]] -name = "pickleshare" -version = "0.7.5" -description = "Tiny 'shelve'-like database with concurrency support" -optional = false -python-versions = "*" -files = [ - {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"}, - {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, -] - [[package]] name = "pillow" -version = "10.3.0" -description = "Python Imaging Library (Fork)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, - {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"}, - {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"}, - {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"}, - {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"}, - {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"}, - {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"}, - {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"}, - {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"}, - {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"}, - {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"}, - {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"}, - {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"}, - {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"}, - {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"}, - {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"}, - {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"}, +version = "12.1.1" +description = "Python Imaging Library (fork)" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "pillow-12.1.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f1625b72740fdda5d77b4def688eb8fd6490975d06b909fd19f13f391e077e0"}, + {file = "pillow-12.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:178aa072084bd88ec759052feca8e56cbb14a60b39322b99a049e58090479713"}, + {file = "pillow-12.1.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b66e95d05ba806247aaa1561f080abc7975daf715c30780ff92a20e4ec546e1b"}, + {file = "pillow-12.1.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:89c7e895002bbe49cdc5426150377cbbc04767d7547ed145473f496dfa40408b"}, + {file = "pillow-12.1.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a5cbdcddad0af3da87cb16b60d23648bc3b51967eb07223e9fed77a82b457c4"}, + {file = "pillow-12.1.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9f51079765661884a486727f0729d29054242f74b46186026582b4e4769918e4"}, + {file = "pillow-12.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:99c1506ea77c11531d75e3a412832a13a71c7ebc8192ab9e4b2e355555920e3e"}, + {file = "pillow-12.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:36341d06738a9f66c8287cf8b876d24b18db9bd8740fa0672c74e259ad408cff"}, + {file = "pillow-12.1.1-cp310-cp310-win32.whl", hash = "sha256:6c52f062424c523d6c4db85518774cc3d50f5539dd6eed32b8f6229b26f24d40"}, + {file = "pillow-12.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:c6008de247150668a705a6338156efb92334113421ceecf7438a12c9a12dab23"}, + {file = "pillow-12.1.1-cp310-cp310-win_arm64.whl", hash = "sha256:1a9b0ee305220b392e1124a764ee4265bd063e54a751a6b62eff69992f457fa9"}, + {file = "pillow-12.1.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e879bb6cd5c73848ef3b2b48b8af9ff08c5b71ecda8048b7dd22d8a33f60be32"}, + {file = "pillow-12.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:365b10bb9417dd4498c0e3b128018c4a624dc11c7b97d8cc54effe3b096f4c38"}, + {file = "pillow-12.1.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d4ce8e329c93845720cd2014659ca67eac35f6433fd3050393d85f3ecef0dad5"}, + {file = "pillow-12.1.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc354a04072b765eccf2204f588a7a532c9511e8b9c7f900e1b64e3e33487090"}, + {file = "pillow-12.1.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e7976bf1910a8116b523b9f9f58bf410f3e8aa330cd9a2bb2953f9266ab49af"}, + {file = "pillow-12.1.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:597bd9c8419bc7c6af5604e55847789b69123bbe25d65cc6ad3012b4f3c98d8b"}, + {file = "pillow-12.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2c1fc0f2ca5f96a3c8407e41cca26a16e46b21060fe6d5b099d2cb01412222f5"}, + {file = "pillow-12.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:578510d88c6229d735855e1f278aa305270438d36a05031dfaae5067cc8eb04d"}, + {file = "pillow-12.1.1-cp311-cp311-win32.whl", hash = "sha256:7311c0a0dcadb89b36b7025dfd8326ecfa36964e29913074d47382706e516a7c"}, + {file = "pillow-12.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:fbfa2a7c10cc2623f412753cddf391c7f971c52ca40a3f65dc5039b2939e8563"}, + {file = "pillow-12.1.1-cp311-cp311-win_arm64.whl", hash = "sha256:b81b5e3511211631b3f672a595e3221252c90af017e399056d0faabb9538aa80"}, + {file = "pillow-12.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab323b787d6e18b3d91a72fc99b1a2c28651e4358749842b8f8dfacd28ef2052"}, + {file = "pillow-12.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adebb5bee0f0af4909c30db0d890c773d1a92ffe83da908e2e9e720f8edf3984"}, + {file = "pillow-12.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb66b7cc26f50977108790e2456b7921e773f23db5630261102233eb355a3b79"}, + {file = "pillow-12.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aee2810642b2898bb187ced9b349e95d2a7272930796e022efaf12e99dccd293"}, + {file = "pillow-12.1.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a0b1cd6232e2b618adcc54d9882e4e662a089d5768cd188f7c245b4c8c44a397"}, + {file = "pillow-12.1.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7aac39bcf8d4770d089588a2e1dd111cbaa42df5a94be3114222057d68336bd0"}, + {file = "pillow-12.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ab174cd7d29a62dd139c44bf74b698039328f45cb03b4596c43473a46656b2f3"}, + {file = "pillow-12.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:339ffdcb7cbeaa08221cd401d517d4b1fe7a9ed5d400e4a8039719238620ca35"}, + {file = "pillow-12.1.1-cp312-cp312-win32.whl", hash = "sha256:5d1f9575a12bed9e9eedd9a4972834b08c97a352bd17955ccdebfeca5913fa0a"}, + {file = "pillow-12.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:21329ec8c96c6e979cd0dfd29406c40c1d52521a90544463057d2aaa937d66a6"}, + {file = "pillow-12.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:af9a332e572978f0218686636610555ae3defd1633597be015ed50289a03c523"}, + {file = "pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:d242e8ac078781f1de88bf823d70c1a9b3c7950a44cdf4b7c012e22ccbcd8e4e"}, + {file = "pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:02f84dfad02693676692746df05b89cf25597560db2857363a208e393429f5e9"}, + {file = "pillow-12.1.1-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:e65498daf4b583091ccbb2556c7000abf0f3349fcd57ef7adc9a84a394ed29f6"}, + {file = "pillow-12.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c6db3b84c87d48d0088943bf33440e0c42370b99b1c2a7989216f7b42eede60"}, + {file = "pillow-12.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b7e5304e34942bf62e15184219a7b5ad4ff7f3bb5cca4d984f37df1a0e1aee2"}, + {file = "pillow-12.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5bddd742a44b7e6b1e773ab5db102bd7a94c32555ba656e76d319d19c3850"}, + {file = "pillow-12.1.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc44ef1f3de4f45b50ccf9136999d71abb99dca7706bc75d222ed350b9fd2289"}, + {file = "pillow-12.1.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a8eb7ed8d4198bccbd07058416eeec51686b498e784eda166395a23eb99138e"}, + {file = "pillow-12.1.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47b94983da0c642de92ced1702c5b6c292a84bd3a8e1d1702ff923f183594717"}, + {file = "pillow-12.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:518a48c2aab7ce596d3bf79d0e275661b846e86e4d0e7dec34712c30fe07f02a"}, + {file = "pillow-12.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a550ae29b95c6dc13cf69e2c9dc5747f814c54eeb2e32d683e5e93af56caa029"}, + {file = "pillow-12.1.1-cp313-cp313-win32.whl", hash = "sha256:a003d7422449f6d1e3a34e3dd4110c22148336918ddbfc6a32581cd54b2e0b2b"}, + {file = "pillow-12.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:344cf1e3dab3be4b1fa08e449323d98a2a3f819ad20f4b22e77a0ede31f0faa1"}, + {file = "pillow-12.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:5c0dd1636633e7e6a0afe7bf6a51a14992b7f8e60de5789018ebbdfae55b040a"}, + {file = "pillow-12.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0330d233c1a0ead844fc097a7d16c0abff4c12e856c0b325f231820fee1f39da"}, + {file = "pillow-12.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dae5f21afb91322f2ff791895ddd8889e5e947ff59f71b46041c8ce6db790bc"}, + {file = "pillow-12.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e0c664be47252947d870ac0d327fea7e63985a08794758aa8af5b6cb6ec0c9c"}, + {file = "pillow-12.1.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:691ab2ac363b8217f7d31b3497108fb1f50faab2f75dfb03284ec2f217e87bf8"}, + {file = "pillow-12.1.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9e8064fb1cc019296958595f6db671fba95209e3ceb0c4734c9baf97de04b20"}, + {file = "pillow-12.1.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:472a8d7ded663e6162dafdf20015c486a7009483ca671cece7a9279b512fcb13"}, + {file = "pillow-12.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:89b54027a766529136a06cfebeecb3a04900397a3590fd252160b888479517bf"}, + {file = "pillow-12.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:86172b0831b82ce4f7877f280055892b31179e1576aa00d0df3bb1bbf8c3e524"}, + {file = "pillow-12.1.1-cp313-cp313t-win32.whl", hash = "sha256:44ce27545b6efcf0fdbdceb31c9a5bdea9333e664cda58a7e674bb74608b3986"}, + {file = "pillow-12.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a285e3eb7a5a45a2ff504e31f4a8d1b12ef62e84e5411c6804a42197c1cf586c"}, + {file = "pillow-12.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cc7d296b5ea4d29e6570dabeaed58d31c3fea35a633a69679fb03d7664f43fb3"}, + {file = "pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:417423db963cb4be8bac3fc1204fe61610f6abeed1580a7a2cbb2fbda20f12af"}, + {file = "pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:b957b71c6b2387610f556a7eb0828afbe40b4a98036fc0d2acfa5a44a0c2036f"}, + {file = "pillow-12.1.1-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:097690ba1f2efdeb165a20469d59d8bb03c55fb6621eb2041a060ae8ea3e9642"}, + {file = "pillow-12.1.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2815a87ab27848db0321fb78c7f0b2c8649dee134b7f2b80c6a45c6831d75ccd"}, + {file = "pillow-12.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f7ed2c6543bad5a7d5530eb9e78c53132f93dfa44a28492db88b41cdab885202"}, + {file = "pillow-12.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:652a2c9ccfb556235b2b501a3a7cf3742148cd22e04b5625c5fe057ea3e3191f"}, + {file = "pillow-12.1.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d6e4571eedf43af33d0fc233a382a76e849badbccdf1ac438841308652a08e1f"}, + {file = "pillow-12.1.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b574c51cf7d5d62e9be37ba446224b59a2da26dc4c1bb2ecbe936a4fb1a7cb7f"}, + {file = "pillow-12.1.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a37691702ed687799de29a518d63d4682d9016932db66d4e90c345831b02fb4e"}, + {file = "pillow-12.1.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f95c00d5d6700b2b890479664a06e754974848afaae5e21beb4d83c106923fd0"}, + {file = "pillow-12.1.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:559b38da23606e68681337ad74622c4dbba02254fc9cb4488a305dd5975c7eeb"}, + {file = "pillow-12.1.1-cp314-cp314-win32.whl", hash = "sha256:03edcc34d688572014ff223c125a3f77fb08091e4607e7745002fc214070b35f"}, + {file = "pillow-12.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:50480dcd74fa63b8e78235957d302d98d98d82ccbfac4c7e12108ba9ecbdba15"}, + {file = "pillow-12.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:5cb1785d97b0c3d1d1a16bc1d710c4a0049daefc4935f3a8f31f827f4d3d2e7f"}, + {file = "pillow-12.1.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1f90cff8aa76835cba5769f0b3121a22bd4eb9e6884cfe338216e557a9a548b8"}, + {file = "pillow-12.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f1be78ce9466a7ee64bfda57bdba0f7cc499d9794d518b854816c41bf0aa4e9"}, + {file = "pillow-12.1.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:42fc1f4677106188ad9a55562bbade416f8b55456f522430fadab3cef7cd4e60"}, + {file = "pillow-12.1.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98edb152429ab62a1818039744d8fbb3ccab98a7c29fc3d5fcef158f3f1f68b7"}, + {file = "pillow-12.1.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d470ab1178551dd17fdba0fef463359c41aaa613cdcd7ff8373f54be629f9f8f"}, + {file = "pillow-12.1.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6408a7b064595afcab0a49393a413732a35788f2a5092fdc6266952ed67de586"}, + {file = "pillow-12.1.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5d8c41325b382c07799a3682c1c258469ea2ff97103c53717b7893862d0c98ce"}, + {file = "pillow-12.1.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c7697918b5be27424e9ce568193efd13d925c4481dd364e43f5dff72d33e10f8"}, + {file = "pillow-12.1.1-cp314-cp314t-win32.whl", hash = "sha256:d2912fd8114fc5545aa3a4b5576512f64c55a03f3ebcca4c10194d593d43ea36"}, + {file = "pillow-12.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:4ceb838d4bd9dab43e06c363cab2eebf63846d6a4aeaea283bbdfd8f1a8ed58b"}, + {file = "pillow-12.1.1-cp314-cp314t-win_arm64.whl", hash = "sha256:7b03048319bfc6170e93bd60728a1af51d3dd7704935feb228c4d4faab35d334"}, + {file = "pillow-12.1.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:600fd103672b925fe62ed08e0d874ea34d692474df6f4bf7ebe148b30f89f39f"}, + {file = "pillow-12.1.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:665e1b916b043cef294bc54d47bf02d87e13f769bc4bc5fa225a24b3a6c5aca9"}, + {file = "pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:495c302af3aad1ca67420ddd5c7bd480c8867ad173528767d906428057a11f0e"}, + {file = "pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8fd420ef0c52c88b5a035a0886f367748c72147b2b8f384c9d12656678dfdfa9"}, + {file = "pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f975aa7ef9684ce7e2c18a3aa8f8e2106ce1e46b94ab713d156b2898811651d3"}, + {file = "pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8089c852a56c2966cf18835db62d9b34fef7ba74c726ad943928d494fa7f4735"}, + {file = "pillow-12.1.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:cb9bb857b2d057c6dfc72ac5f3b44836924ba15721882ef103cecb40d002d80e"}, + {file = "pillow-12.1.1.tar.gz", hash = "sha256:9ad8fa5937ab05218e2b6a4cff30295ad35afd2f83ac592e68c0d871bb0fdbc4"}, ] [package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] -typing = ["typing-extensions"] +test-arrow = ["arro3-compute", "arro3-core", "nanoarrow", "pyarrow"] +tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma (>=5)", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "trove-classifiers (>=2024.10.12)"] xmp = ["defusedxml"] [[package]] name = "pillow-heif" -version = "0.16.0" +version = "1.2.1" description = "Python interface for libheif library" optional = false -python-versions = ">=3.8" -files = [ - {file = "pillow_heif-0.16.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:c7db96ac172e2654676986e8c35fa32bffdd5b429a8c86b9d628c0333c570d82"}, - {file = "pillow_heif-0.16.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:a146be0c8e7bef204eeaa14799b2fca8a4a52ad972850975e23ef10cee4e7de7"}, - {file = "pillow_heif-0.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33e0b1549bcdfec363b3ba6fb55b3de882e1409b5b00f5a68a1a027f051e8ef2"}, - {file = "pillow_heif-0.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fea4410ce02e295079db5b2617579ba016671d334ac1888a1d4b34aedb56b866"}, - {file = "pillow_heif-0.16.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:331579ce4f5fa079595c529b06810886ff76f8ade3eb411a1c9c90853a708022"}, - {file = "pillow_heif-0.16.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:792e5d88b7d016fe48ae2fd77a852ec8dcf9a7fad1f7f191d35bc173896fe378"}, - {file = "pillow_heif-0.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e0492e4fd6d3334b9eed3651058216ef62f04afa099cfc6b05815c1bf0da2c38"}, - {file = "pillow_heif-0.16.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:beb6576cbe5a9404a8f2ad9ec68f6b0c406e5e9f5d5573722dc3244898dc9866"}, - {file = "pillow_heif-0.16.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:241cf6c510215c6df0ee948dfed06a20c099475250c5c6cac5e7a1ef9e0ec4c3"}, - {file = "pillow_heif-0.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28c980bf8d5239ee87986c9217a5954b07993d71d391949a9feafad0a9c5e9a7"}, - {file = "pillow_heif-0.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8856cf5f0d53f83d814ae5c8d34433e5e5ad9f3e328480257cd6e9fbdb4a458"}, - {file = "pillow_heif-0.16.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fba5c46f84031f1186bdea2a0c95f82958f8c29321200e73d7ac5e79ee460c83"}, - {file = "pillow_heif-0.16.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5c7f7a94fc2d08ddcf55a6834c4c55b7dea9605656c565ce11c82e3f6e0454a8"}, - {file = "pillow_heif-0.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a2681d4b62418813289987a9420059d724cd93542d0b05e0928fe4578517714"}, - {file = "pillow_heif-0.16.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:7e424d6a34b9466d054706393e76b5abdd84fabdc0c72b19ca10435a76140de7"}, - {file = "pillow_heif-0.16.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:be41b7fadd4a9355d24936f6fad83bb8130fe55ba228ec298ad316392bb6f38b"}, - {file = "pillow_heif-0.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:078bc74fd767625e465b2c107228f9c398b9a128bdf81b3f18812d7c07be660f"}, - {file = "pillow_heif-0.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f4293ecbb81d255d8d887dce4708a58e87c86e53c6f1b1affc4c3105e1bcb8c"}, - {file = "pillow_heif-0.16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f63a1d8f95811569df5df9b6b11674038929c2f696221f2a393aee5ac1e535b4"}, - {file = "pillow_heif-0.16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:89ec30420ddc843c43916febbe31697552ed123396a1696715eea75169866c07"}, - {file = "pillow_heif-0.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:d4595ec975db845d84ab90cbf0678f15b0068b8b83c01d1db7ea524e31bab4b4"}, - {file = "pillow_heif-0.16.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:1421d96aebdc9f5773213c8221ce547efb56e37a62da6698312edd4f281efb42"}, - {file = "pillow_heif-0.16.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:88ff22d2b162e7edd9cb9dd98de81455be04c40a99d1d3d3ebe1602b1a21c453"}, - {file = "pillow_heif-0.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb3efbe8efd26203589794988b11ea9bf3dea2d3bcf218e658f779d526dfcf80"}, - {file = "pillow_heif-0.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f062c1be6f04804ffdf0bc452142eff38d7544c8655c04291d16e3b996e4dc4"}, - {file = "pillow_heif-0.16.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:7fabd6534a38078a66ce8b7a5ae8ad37afd9863c930abd3031fb553f1ab4f01a"}, - {file = "pillow_heif-0.16.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d9e465d92cf01093e3e4c33776af97368add23ac1c8d0007f34b8d3e3390d6ad"}, - {file = "pillow_heif-0.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:502cebc90c11a6bffa2ea899088999c25fc99c8f322e047a266e541e3046b27c"}, - {file = "pillow_heif-0.16.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:c2ad68e3e4be40adfc5290bf6daa1569dd7d18501e17779d217ce5cd8c1e338d"}, - {file = "pillow_heif-0.16.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:8e168d45b2ce63c1fe2334fd02927699b0097de72605f7571948010fd79e58f0"}, - {file = "pillow_heif-0.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bf10a1686c2d51f4db8ebb78825f96f28d18d1878599e1c64e88cfbdb70a3d2"}, - {file = "pillow_heif-0.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f15dc73ced02a0ccfac93159d12deeaecfbe4335883a1a3309df0f01c26e6e6"}, - {file = "pillow_heif-0.16.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2673048f3cf1498327add70f16e1129be2a09cf4a31cbc02363f5760eb5ba955"}, - {file = "pillow_heif-0.16.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9273af7224e0fb16c18637184a8ea9a8790105658daab04ad541982b8623e5c1"}, - {file = "pillow_heif-0.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:f613dfd05fd62a8b7b57649bfa5db1501be41e18b5e15dd4a2fc12d3e3ddfdaa"}, - {file = "pillow_heif-0.16.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:3501f22985cbb427c76febf07a7e309cb828e485c0cf250a625733fc06fc1815"}, - {file = "pillow_heif-0.16.0-pp310-pypy310_pp73-macosx_12_0_arm64.whl", hash = "sha256:2b7450303f08ec81d1a63a75052863bb687fc3be1fdd8a34d2c0fef627aacae5"}, - {file = "pillow_heif-0.16.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7794c1a8304eeb841d72cb73aa64cc60c9e5dccb2c7612f8caf528505f78581f"}, - {file = "pillow_heif-0.16.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5edd98192f74e4c7cffdd62953b2987e2b1e0d6a55d5c940306bed71f40206a"}, - {file = "pillow_heif-0.16.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:38fa2854ec7dbe6c875d64cc5b3cf5cc55f1c8a0248dc1c7c34e9d2505521b82"}, - {file = "pillow_heif-0.16.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b50160331754b603524e6ed33c386f478fd66fb345fa6433a507a01c8de642c6"}, - {file = "pillow_heif-0.16.0-pp38-pypy38_pp73-macosx_12_0_arm64.whl", hash = "sha256:9fd829c257a763e3a2e8418a773c2808c90799ee3e6b405b5399cb4fdfbe336e"}, - {file = "pillow_heif-0.16.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbd9cc527bbd53c3e7588e16aad170e11cfd180b7e9bd84f18fb020ddec11408"}, - {file = "pillow_heif-0.16.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a27abb523a07b17c118c09f1a00f92cde2295f8e997600024d4b57df3c5ba818"}, - {file = "pillow_heif-0.16.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:0075adeb324adb07ddbfbe8a5c79ed12e5d04e60e9a642ff9427e71b5b0adccd"}, - {file = "pillow_heif-0.16.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:40014105688478d6ca146fc04bff6c13f445d01bdea79417b34ee50c1e559190"}, - {file = "pillow_heif-0.16.0-pp39-pypy39_pp73-macosx_12_0_arm64.whl", hash = "sha256:7ef47297d526147923f4ecc7ff681a5d5f4e6e3300017681f59968652a0d8afb"}, - {file = "pillow_heif-0.16.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9923dfcc97ae9484d3514f2f6ec368e2ac97cd66f7b95359cc1b0ec0c1cd6157"}, - {file = "pillow_heif-0.16.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17963a73186961fe7792aef01c46e980635f3fcc1836393de39ec9c6776ca51e"}, - {file = "pillow_heif-0.16.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4b6caa5b13b4dfc180507527254014530f6bedbeabc1de2238918bf5b2700c7e"}, - {file = "pillow_heif-0.16.0.tar.gz", hash = "sha256:4d95004bb77aa640f80617716aa21bc092ec06307f6f2ad423deeeda07b4d29c"}, +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "pillow_heif-1.2.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:aef93f67030b953c401058b4735782b412787629054a4979809f721a27e74836"}, + {file = "pillow_heif-1.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:702f2ebf111fd13fc82c50685f0695f2bee3dd3ebd29305ddc49d6d2478e916b"}, + {file = "pillow_heif-1.2.1-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2d03c95b69bb4ca830ff2b58a9c3f7f43c61696a32a688f858fe0a9989d42c53"}, + {file = "pillow_heif-1.2.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:476f10c5785505d2ace0f86eccb2fa614b2c6ae49f636adc36cd48cbecf19e64"}, + {file = "pillow_heif-1.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5a6a72e28a8fa73457fb9c67fe8bb9f27053994e765337d21312ce23eaed3116"}, + {file = "pillow_heif-1.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e04ca9f833b42e2cfa2c72e9f8c6163e988a8b07d62f87e1f33c55f4c683138d"}, + {file = "pillow_heif-1.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:6a21dbdd1183aa44f6519c36557f8dc018d2f86c1ea3091b29008c3d7cb0db2a"}, + {file = "pillow_heif-1.2.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:c534c40708160f38a45bfe5abac1400370079edbb3bc8f23be0d51f556695a16"}, + {file = "pillow_heif-1.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dcc3ab9cbd7df179fe2b51569881732584cdc8cd306461b2cfa8416035137305"}, + {file = "pillow_heif-1.2.1-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba34dfe3ce0584b2f1b7653a075e18e4c97d72110d106b1e7aef5d702dec8045"}, + {file = "pillow_heif-1.2.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cdb956f4b3dc1d1fce2364f539f6b26d604bcb212055d0087c6ff1bd0668599"}, + {file = "pillow_heif-1.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8372ce54b76aff80eb1859d79699e7f972a7837d0e7fb4ce3350d25ac53890bc"}, + {file = "pillow_heif-1.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:110e14113a08aff5047c8c879a8ae3f284b93134e4b9b5b5a7734838030ba9a1"}, + {file = "pillow_heif-1.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2dfa2047dee77a8e8321a949bf9d0c53d03296afb459a5b03201e9d8af6dba36"}, + {file = "pillow_heif-1.2.1-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:c5a3c8fec8cf63f6d9170f092a210e76d584beef5a5b0f5e8fbfa171eb27520a"}, + {file = "pillow_heif-1.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:add430cf7f5340eaa70c2e57af59655515fd415b2b93dde0baec87be48debd0f"}, + {file = "pillow_heif-1.2.1-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f9a6daa0f88fe5fa76b72c848615836368d0577a108059e3070615c1e50551dc"}, + {file = "pillow_heif-1.2.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:35a355df6024f09b0e46b56bb5805c275a8ca7dc67e1da2be245aee3a70c82ec"}, + {file = "pillow_heif-1.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:33d84eb1c40d9c63d2ea869e6290f5b59ebf4421ed16090796be60b8e3b2a061"}, + {file = "pillow_heif-1.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2522a54df26f996993189326208513a6c8458ac89de51644a89b19fcda712539"}, + {file = "pillow_heif-1.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:0c965277fde806c7c628b16f9a45f4a7b10c32c390ce7d70c0572499a5d8426f"}, + {file = "pillow_heif-1.2.1-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:246acfade36d877fc7e01ccde03edaafd75e5aad66f889f484fc8ba7b651b688"}, + {file = "pillow_heif-1.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a746d38f2c54774fd680da45f2af56467b15f6b6c46962328ad1ed005d16ca6"}, + {file = "pillow_heif-1.2.1-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a93e374ff86ef61dc374a6d3c22e73fddc609e10b342802fa1674cc26db50859"}, + {file = "pillow_heif-1.2.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f8f0158a0368a38870deda5124d74086f8708268f335ddbdeb0890ef83ecd7ad"}, + {file = "pillow_heif-1.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5132c9c84e18ca800d559b79e389114b289899614c241e4399f8b677f1bbd3d7"}, + {file = "pillow_heif-1.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c31012a51fe3d67ee0c6c91549a5ee0590f3fa07b03882022238d0d0f052ad20"}, + {file = "pillow_heif-1.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:e27d7690a08f52c63295f5ca5e13b97bbe168f2f55e32794e3b24898a5270255"}, + {file = "pillow_heif-1.2.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:a9083f80271130580e6f99f6b79204fc7f5ff61eefb83ac64c026c68f0000775"}, + {file = "pillow_heif-1.2.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b3584abf861d33a422a7bda1f2926131cbf4bbd2801390cb7f75f03ef3833a2d"}, + {file = "pillow_heif-1.2.1-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be790ce430645c3e0b148e873ed5ebeeb6d001ae685e8db40f77f43474ab9848"}, + {file = "pillow_heif-1.2.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:957060f8f2ceaa2e1fd41450da05bee87abc054a6247c02b53e9322ce4e53958"}, + {file = "pillow_heif-1.2.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fe231ca4c4e387785a97f2acf38a24474f3a0819b7e2234144cff9fa3de5d3ac"}, + {file = "pillow_heif-1.2.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:f780890596161c7f43512377dda9106f793421565a376c70988355de5c4241de"}, + {file = "pillow_heif-1.2.1-cp314-cp314-win_amd64.whl", hash = "sha256:eee8c933cce88dc8f6a01afc3befc159341fbc404a981c3759b3dca97b7f2dbb"}, + {file = "pillow_heif-1.2.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a1e6ae0b83068874ec8735e7fd066433fda77189facd158d750b820e24686454"}, + {file = "pillow_heif-1.2.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:b49dbcefa59c54d03cc2cfd98e3fbafa3aa38c3afa1ad719f2a5d6682fbe2752"}, + {file = "pillow_heif-1.2.1-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39371c003a9ac657e0e083989b4a25f1eb4ad6a9ea01dd7ea85f93dd00ab9376"}, + {file = "pillow_heif-1.2.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:084e1d78a6a74efd41d2cb803554500067d509be3ff7f77b61140adeb9867660"}, + {file = "pillow_heif-1.2.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c0fa8c2ceec1fc83b45b8ea036add55706aa2d1f789acfece5f30b124f11fdb3"}, + {file = "pillow_heif-1.2.1.tar.gz", hash = "sha256:29be44d636269e2d779b4aec629bc056ec7260b734a16b4d3bb284c49c200274"}, ] [package.dependencies] -pillow = ">=9.5.0" +pillow = ">=11.1.0" [package.extras] -dev = ["coverage", "defusedxml", "numpy", "opencv-python (==4.9.0.80)", "packaging", "pre-commit", "pylint", "pympler", "pytest"] +dev = ["coverage", "defusedxml", "numpy", "opencv-python (==4.13.0.92)", "packaging", "pre-commit", "pylint", "pympler", "pytest", "setuptools"] docs = ["sphinx (>=4.4)", "sphinx-issues (>=3.0.1)", "sphinx-rtd-theme (>=1.0)"] tests = ["defusedxml", "numpy", "packaging", "pympler", "pytest"] tests-min = ["defusedxml", "packaging", "pytest"] [[package]] -name = "platformdirs" -version = "3.11.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +name = "pluggy" +version = "1.6.0" +description = "plugin and hook calling mechanisms for python" optional = false -python-versions = ">=3.7" -files = [ - {file = "platformdirs-3.11.0-py3-none-any.whl", hash = "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e"}, - {file = "platformdirs-3.11.0.tar.gz", hash = "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3"}, -] - -[package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] - -[[package]] -name = "prompt-toolkit" -version = "3.0.39" -description = "Library for building powerful interactive command lines in Python" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "prompt_toolkit-3.0.39-py3-none-any.whl", hash = "sha256:9dffbe1d8acf91e3de75f3b544e4842382fc06c6babe903ac9acb74dc6e08d88"}, - {file = "prompt_toolkit-3.0.39.tar.gz", hash = "sha256:04505ade687dc26dc4284b1ad19a83be2f2afe83e7a828ace0c72f3a1df72aac"}, -] - -[package.dependencies] -wcwidth = "*" - -[[package]] -name = "psutil" -version = "5.9.5" -description = "Cross-platform lib for process and system monitoring in Python." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "psutil-5.9.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:be8929ce4313f9f8146caad4272f6abb8bf99fc6cf59344a3167ecd74f4f203f"}, - {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab8ed1a1d77c95453db1ae00a3f9c50227ebd955437bcf2a574ba8adbf6a74d5"}, - {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4aef137f3345082a3d3232187aeb4ac4ef959ba3d7c10c33dd73763fbc063da4"}, - {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ea8518d152174e1249c4f2a1c89e3e6065941df2fa13a1ab45327716a23c2b48"}, - {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:acf2aef9391710afded549ff602b5887d7a2349831ae4c26be7c807c0a39fac4"}, - {file = "psutil-5.9.5-cp27-none-win32.whl", hash = "sha256:5b9b8cb93f507e8dbaf22af6a2fd0ccbe8244bf30b1baad6b3954e935157ae3f"}, - {file = "psutil-5.9.5-cp27-none-win_amd64.whl", hash = "sha256:8c5f7c5a052d1d567db4ddd231a9d27a74e8e4a9c3f44b1032762bd7b9fdcd42"}, - {file = "psutil-5.9.5-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3c6f686f4225553615612f6d9bc21f1c0e305f75d7d8454f9b46e901778e7217"}, - {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a7dd9997128a0d928ed4fb2c2d57e5102bb6089027939f3b722f3a210f9a8da"}, - {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89518112647f1276b03ca97b65cc7f64ca587b1eb0278383017c2a0dcc26cbe4"}, - {file = "psutil-5.9.5-cp36-abi3-win32.whl", hash = "sha256:104a5cc0e31baa2bcf67900be36acde157756b9c44017b86b2c049f11957887d"}, - {file = "psutil-5.9.5-cp36-abi3-win_amd64.whl", hash = "sha256:b258c0c1c9d145a1d5ceffab1134441c4c5113b2417fafff7315a917a026c3c9"}, - {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"}, - {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"}, -] - -[package.extras] -test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] - -[[package]] -name = "ptyprocess" -version = "0.7.0" -description = "Run a subprocess in a pseudo terminal" -optional = false -python-versions = "*" -files = [ - {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, - {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, -] - -[[package]] -name = "pure-eval" -version = "0.2.2" -description = "Safely evaluate AST nodes without side effects" -optional = false -python-versions = "*" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, - {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"}, + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, ] [package.extras] -tests = ["pytest"] +dev = ["pre-commit", "tox"] +testing = ["coverage", "pytest", "pytest-benchmark"] [[package]] name = "pycparser" @@ -924,6 +651,8 @@ version = "2.21" description = "C parser in Python" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +groups = ["main"] +markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\"" files = [ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, @@ -935,23 +664,25 @@ version = "2.16.1" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"}, {file = "Pygments-2.16.1.tar.gz", hash = "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"}, ] [package.extras] -plugins = ["importlib-metadata"] +plugins = ["importlib-metadata ; python_version < \"3.8\""] [[package]] name = "pymysql" -version = "1.1.0" +version = "1.1.2" description = "Pure Python MySQL Driver" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "PyMySQL-1.1.0-py3-none-any.whl", hash = "sha256:8969ec6d763c856f7073c4c64662882675702efcb114b4bcbb955aea3a069fa7"}, - {file = "PyMySQL-1.1.0.tar.gz", hash = "sha256:4f13a7df8bf36a51e81dd9f3605fede45a4878fe02f9236349fd82a3f0612f96"}, + {file = "pymysql-1.1.2-py3-none-any.whl", hash = "sha256:e6b1d89711dd51f8f74b1631fe08f039e7d76cf67a42a323d3178f0f25762ed9"}, + {file = "pymysql-1.1.2.tar.gz", hash = "sha256:4961d3e165614ae65014e361811a724e2044ad3ea3739de9903ae7c21f539f03"}, ] [package.extras] @@ -959,161 +690,57 @@ ed25519 = ["PyNaCl (>=1.4.0)"] rsa = ["cryptography"] [[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" +name = "pytest" +version = "9.0.2" +description = "pytest: simple powerful testing with Python" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +python-versions = ">=3.10" +groups = ["dev"] files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {file = "pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b"}, + {file = "pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11"}, ] [package.dependencies] -six = ">=1.5" +colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} +iniconfig = ">=1.0.1" +packaging = ">=22" +pluggy = ">=1.5,<2" +pygments = ">=2.7.2" -[[package]] -name = "pywin32" -version = "306" -description = "Python for Window Extensions" -optional = false -python-versions = "*" -files = [ - {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, - {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, - {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, - {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, - {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, - {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, - {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, - {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, - {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, - {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, - {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, - {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, - {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, - {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, -] +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] [[package]] -name = "pyzmq" -version = "25.1.1" -description = "Python bindings for 0MQ" +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" optional = false -python-versions = ">=3.6" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["dev"] files = [ - {file = "pyzmq-25.1.1-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:381469297409c5adf9a0e884c5eb5186ed33137badcbbb0560b86e910a2f1e76"}, - {file = "pyzmq-25.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:955215ed0604dac5b01907424dfa28b40f2b2292d6493445dd34d0dfa72586a8"}, - {file = "pyzmq-25.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:985bbb1316192b98f32e25e7b9958088431d853ac63aca1d2c236f40afb17c83"}, - {file = "pyzmq-25.1.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:afea96f64efa98df4da6958bae37f1cbea7932c35878b185e5982821bc883369"}, - {file = "pyzmq-25.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76705c9325d72a81155bb6ab48d4312e0032bf045fb0754889133200f7a0d849"}, - {file = "pyzmq-25.1.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:77a41c26205d2353a4c94d02be51d6cbdf63c06fbc1295ea57dad7e2d3381b71"}, - {file = "pyzmq-25.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:12720a53e61c3b99d87262294e2b375c915fea93c31fc2336898c26d7aed34cd"}, - {file = "pyzmq-25.1.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:57459b68e5cd85b0be8184382cefd91959cafe79ae019e6b1ae6e2ba8a12cda7"}, - {file = "pyzmq-25.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:292fe3fc5ad4a75bc8df0dfaee7d0babe8b1f4ceb596437213821f761b4589f9"}, - {file = "pyzmq-25.1.1-cp310-cp310-win32.whl", hash = "sha256:35b5ab8c28978fbbb86ea54958cd89f5176ce747c1fb3d87356cf698048a7790"}, - {file = "pyzmq-25.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:11baebdd5fc5b475d484195e49bae2dc64b94a5208f7c89954e9e354fc609d8f"}, - {file = "pyzmq-25.1.1-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:d20a0ddb3e989e8807d83225a27e5c2eb2260eaa851532086e9e0fa0d5287d83"}, - {file = "pyzmq-25.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e1c1be77bc5fb77d923850f82e55a928f8638f64a61f00ff18a67c7404faf008"}, - {file = "pyzmq-25.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d89528b4943d27029a2818f847c10c2cecc79fa9590f3cb1860459a5be7933eb"}, - {file = "pyzmq-25.1.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:90f26dc6d5f241ba358bef79be9ce06de58d477ca8485e3291675436d3827cf8"}, - {file = "pyzmq-25.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2b92812bd214018e50b6380ea3ac0c8bb01ac07fcc14c5f86a5bb25e74026e9"}, - {file = "pyzmq-25.1.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:2f957ce63d13c28730f7fd6b72333814221c84ca2421298f66e5143f81c9f91f"}, - {file = "pyzmq-25.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:047a640f5c9c6ade7b1cc6680a0e28c9dd5a0825135acbd3569cc96ea00b2505"}, - {file = "pyzmq-25.1.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7f7e58effd14b641c5e4dec8c7dab02fb67a13df90329e61c869b9cc607ef752"}, - {file = "pyzmq-25.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c2910967e6ab16bf6fbeb1f771c89a7050947221ae12a5b0b60f3bca2ee19bca"}, - {file = "pyzmq-25.1.1-cp311-cp311-win32.whl", hash = "sha256:76c1c8efb3ca3a1818b837aea423ff8a07bbf7aafe9f2f6582b61a0458b1a329"}, - {file = "pyzmq-25.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:44e58a0554b21fc662f2712814a746635ed668d0fbc98b7cb9d74cb798d202e6"}, - {file = "pyzmq-25.1.1-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:e1ffa1c924e8c72778b9ccd386a7067cddf626884fd8277f503c48bb5f51c762"}, - {file = "pyzmq-25.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1af379b33ef33757224da93e9da62e6471cf4a66d10078cf32bae8127d3d0d4a"}, - {file = "pyzmq-25.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cff084c6933680d1f8b2f3b4ff5bbb88538a4aac00d199ac13f49d0698727ecb"}, - {file = "pyzmq-25.1.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2400a94f7dd9cb20cd012951a0cbf8249e3d554c63a9c0cdfd5cbb6c01d2dec"}, - {file = "pyzmq-25.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d81f1ddae3858b8299d1da72dd7d19dd36aab654c19671aa8a7e7fb02f6638a"}, - {file = "pyzmq-25.1.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:255ca2b219f9e5a3a9ef3081512e1358bd4760ce77828e1028b818ff5610b87b"}, - {file = "pyzmq-25.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a882ac0a351288dd18ecae3326b8a49d10c61a68b01419f3a0b9a306190baf69"}, - {file = "pyzmq-25.1.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:724c292bb26365659fc434e9567b3f1adbdb5e8d640c936ed901f49e03e5d32e"}, - {file = "pyzmq-25.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ca1ed0bb2d850aa8471387882247c68f1e62a4af0ce9c8a1dbe0d2bf69e41fb"}, - {file = "pyzmq-25.1.1-cp312-cp312-win32.whl", hash = "sha256:b3451108ab861040754fa5208bca4a5496c65875710f76789a9ad27c801a0075"}, - {file = "pyzmq-25.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:eadbefd5e92ef8a345f0525b5cfd01cf4e4cc651a2cffb8f23c0dd184975d787"}, - {file = "pyzmq-25.1.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:db0b2af416ba735c6304c47f75d348f498b92952f5e3e8bff449336d2728795d"}, - {file = "pyzmq-25.1.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7c133e93b405eb0d36fa430c94185bdd13c36204a8635470cccc200723c13bb"}, - {file = "pyzmq-25.1.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:273bc3959bcbff3f48606b28229b4721716598d76b5aaea2b4a9d0ab454ec062"}, - {file = "pyzmq-25.1.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cbc8df5c6a88ba5ae385d8930da02201165408dde8d8322072e3e5ddd4f68e22"}, - {file = "pyzmq-25.1.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:18d43df3f2302d836f2a56f17e5663e398416e9dd74b205b179065e61f1a6edf"}, - {file = "pyzmq-25.1.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:73461eed88a88c866656e08f89299720a38cb4e9d34ae6bf5df6f71102570f2e"}, - {file = "pyzmq-25.1.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:34c850ce7976d19ebe7b9d4b9bb8c9dfc7aac336c0958e2651b88cbd46682123"}, - {file = "pyzmq-25.1.1-cp36-cp36m-win32.whl", hash = "sha256:d2045d6d9439a0078f2a34b57c7b18c4a6aef0bee37f22e4ec9f32456c852c71"}, - {file = "pyzmq-25.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:458dea649f2f02a0b244ae6aef8dc29325a2810aa26b07af8374dc2a9faf57e3"}, - {file = "pyzmq-25.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7cff25c5b315e63b07a36f0c2bab32c58eafbe57d0dce61b614ef4c76058c115"}, - {file = "pyzmq-25.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1579413ae492b05de5a6174574f8c44c2b9b122a42015c5292afa4be2507f28"}, - {file = "pyzmq-25.1.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3d0a409d3b28607cc427aa5c30a6f1e4452cc44e311f843e05edb28ab5e36da0"}, - {file = "pyzmq-25.1.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:21eb4e609a154a57c520e3d5bfa0d97e49b6872ea057b7c85257b11e78068222"}, - {file = "pyzmq-25.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:034239843541ef7a1aee0c7b2cb7f6aafffb005ede965ae9cbd49d5ff4ff73cf"}, - {file = "pyzmq-25.1.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f8115e303280ba09f3898194791a153862cbf9eef722ad8f7f741987ee2a97c7"}, - {file = "pyzmq-25.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1a5d26fe8f32f137e784f768143728438877d69a586ddeaad898558dc971a5ae"}, - {file = "pyzmq-25.1.1-cp37-cp37m-win32.whl", hash = "sha256:f32260e556a983bc5c7ed588d04c942c9a8f9c2e99213fec11a031e316874c7e"}, - {file = "pyzmq-25.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:abf34e43c531bbb510ae7e8f5b2b1f2a8ab93219510e2b287a944432fad135f3"}, - {file = "pyzmq-25.1.1-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:87e34f31ca8f168c56d6fbf99692cc8d3b445abb5bfd08c229ae992d7547a92a"}, - {file = "pyzmq-25.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c9c6c9b2c2f80747a98f34ef491c4d7b1a8d4853937bb1492774992a120f475d"}, - {file = "pyzmq-25.1.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5619f3f5a4db5dbb572b095ea3cb5cc035335159d9da950830c9c4db2fbb6995"}, - {file = "pyzmq-25.1.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5a34d2395073ef862b4032343cf0c32a712f3ab49d7ec4f42c9661e0294d106f"}, - {file = "pyzmq-25.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25f0e6b78220aba09815cd1f3a32b9c7cb3e02cb846d1cfc526b6595f6046618"}, - {file = "pyzmq-25.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3669cf8ee3520c2f13b2e0351c41fea919852b220988d2049249db10046a7afb"}, - {file = "pyzmq-25.1.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2d163a18819277e49911f7461567bda923461c50b19d169a062536fffe7cd9d2"}, - {file = "pyzmq-25.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:df27ffddff4190667d40de7beba4a950b5ce78fe28a7dcc41d6f8a700a80a3c0"}, - {file = "pyzmq-25.1.1-cp38-cp38-win32.whl", hash = "sha256:a382372898a07479bd34bda781008e4a954ed8750f17891e794521c3e21c2e1c"}, - {file = "pyzmq-25.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:52533489f28d62eb1258a965f2aba28a82aa747202c8fa5a1c7a43b5db0e85c1"}, - {file = "pyzmq-25.1.1-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:03b3f49b57264909aacd0741892f2aecf2f51fb053e7d8ac6767f6c700832f45"}, - {file = "pyzmq-25.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:330f9e188d0d89080cde66dc7470f57d1926ff2fb5576227f14d5be7ab30b9fa"}, - {file = "pyzmq-25.1.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2ca57a5be0389f2a65e6d3bb2962a971688cbdd30b4c0bd188c99e39c234f414"}, - {file = "pyzmq-25.1.1-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d457aed310f2670f59cc5b57dcfced452aeeed77f9da2b9763616bd57e4dbaae"}, - {file = "pyzmq-25.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c56d748ea50215abef7030c72b60dd723ed5b5c7e65e7bc2504e77843631c1a6"}, - {file = "pyzmq-25.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8f03d3f0d01cb5a018debeb412441996a517b11c5c17ab2001aa0597c6d6882c"}, - {file = "pyzmq-25.1.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:820c4a08195a681252f46926de10e29b6bbf3e17b30037bd4250d72dd3ddaab8"}, - {file = "pyzmq-25.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17ef5f01d25b67ca8f98120d5fa1d21efe9611604e8eb03a5147360f517dd1e2"}, - {file = "pyzmq-25.1.1-cp39-cp39-win32.whl", hash = "sha256:04ccbed567171579ec2cebb9c8a3e30801723c575601f9a990ab25bcac6b51e2"}, - {file = "pyzmq-25.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:e61f091c3ba0c3578411ef505992d356a812fb200643eab27f4f70eed34a29ef"}, - {file = "pyzmq-25.1.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ade6d25bb29c4555d718ac6d1443a7386595528c33d6b133b258f65f963bb0f6"}, - {file = "pyzmq-25.1.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0c95ddd4f6e9fca4e9e3afaa4f9df8552f0ba5d1004e89ef0a68e1f1f9807c7"}, - {file = "pyzmq-25.1.1-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48e466162a24daf86f6b5ca72444d2bf39a5e58da5f96370078be67c67adc978"}, - {file = "pyzmq-25.1.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abc719161780932c4e11aaebb203be3d6acc6b38d2f26c0f523b5b59d2fc1996"}, - {file = "pyzmq-25.1.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1ccf825981640b8c34ae54231b7ed00271822ea1c6d8ba1090ebd4943759abf5"}, - {file = "pyzmq-25.1.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c2f20ce161ebdb0091a10c9ca0372e023ce24980d0e1f810f519da6f79c60800"}, - {file = "pyzmq-25.1.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:deee9ca4727f53464daf089536e68b13e6104e84a37820a88b0a057b97bba2d2"}, - {file = "pyzmq-25.1.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:aa8d6cdc8b8aa19ceb319aaa2b660cdaccc533ec477eeb1309e2a291eaacc43a"}, - {file = "pyzmq-25.1.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:019e59ef5c5256a2c7378f2fb8560fc2a9ff1d315755204295b2eab96b254d0a"}, - {file = "pyzmq-25.1.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:b9af3757495c1ee3b5c4e945c1df7be95562277c6e5bccc20a39aec50f826cd0"}, - {file = "pyzmq-25.1.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:548d6482dc8aadbe7e79d1b5806585c8120bafa1ef841167bc9090522b610fa6"}, - {file = "pyzmq-25.1.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:057e824b2aae50accc0f9a0570998adc021b372478a921506fddd6c02e60308e"}, - {file = "pyzmq-25.1.1-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2243700cc5548cff20963f0ca92d3e5e436394375ab8a354bbea2b12911b20b0"}, - {file = "pyzmq-25.1.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79986f3b4af059777111409ee517da24a529bdbd46da578b33f25580adcff728"}, - {file = "pyzmq-25.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:11d58723d44d6ed4dd677c5615b2ffb19d5c426636345567d6af82be4dff8a55"}, - {file = "pyzmq-25.1.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:49d238cf4b69652257db66d0c623cd3e09b5d2e9576b56bc067a396133a00d4a"}, - {file = "pyzmq-25.1.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fedbdc753827cf014c01dbbee9c3be17e5a208dcd1bf8641ce2cd29580d1f0d4"}, - {file = "pyzmq-25.1.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc16ac425cc927d0a57d242589f87ee093884ea4804c05a13834d07c20db203c"}, - {file = "pyzmq-25.1.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11c1d2aed9079c6b0c9550a7257a836b4a637feb334904610f06d70eb44c56d2"}, - {file = "pyzmq-25.1.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e8a701123029cc240cea61dd2d16ad57cab4691804143ce80ecd9286b464d180"}, - {file = "pyzmq-25.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:61706a6b6c24bdece85ff177fec393545a3191eeda35b07aaa1458a027ad1304"}, - {file = "pyzmq-25.1.1.tar.gz", hash = "sha256:259c22485b71abacdfa8bf79720cd7bcf4b9d128b30ea554f01ae71fdbfdaa23"}, + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] [package.dependencies] -cffi = {version = "*", markers = "implementation_name == \"pypy\""} +six = ">=1.5" [[package]] name = "requests" -version = "2.31.0" +version = "2.32.5" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, + {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, ] [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" +charset_normalizer = ">=2,<4" idna = ">=2.5,<4" urllib3 = ">=1.21.1,<3" @@ -1127,6 +754,7 @@ version = "0.7.0" description = "An Amazon S3 Transfer Manager" optional = false python-versions = ">= 3.7" +groups = ["dev"] files = [ {file = "s3transfer-0.7.0-py3-none-any.whl", hash = "sha256:10d6923c6359175f264811ef4bf6161a3156ce8e350e705396a7557d6293c33a"}, {file = "s3transfer-0.7.0.tar.gz", hash = "sha256:fd3889a66f5fe17299fe75b82eae6cf722554edca744ca5d5fe308b104883d2e"}, @@ -1144,6 +772,7 @@ version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["dev"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -1151,39 +780,33 @@ files = [ [[package]] name = "slack-bolt" -version = "1.18.0" +version = "1.27.0" description = "The Bolt Framework for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +groups = ["main"] files = [ - {file = "slack_bolt-1.18.0-py2.py3-none-any.whl", hash = "sha256:63089a401ae3900c37698890249acd008a4651d06e86194edc7b72a00819bbac"}, - {file = "slack_bolt-1.18.0.tar.gz", hash = "sha256:43b121acf78440303ce5129e53be36bdfe5d926a193daef7daf2860688e65dd3"}, + {file = "slack_bolt-1.27.0-py2.py3-none-any.whl", hash = "sha256:c43c94bf34740f2adeb9b55566c83f1e73fed6ba2878bd346cdfd6fd8ad22360"}, + {file = "slack_bolt-1.27.0.tar.gz", hash = "sha256:3db91d64e277e176a565c574ae82748aa8554f19e41a4fceadca4d65374ce1e0"}, ] [package.dependencies] -slack-sdk = ">=3.21.2,<4" - -[package.extras] -adapter = ["CherryPy (>=18,<19)", "Django (>=3,<5)", "Flask (>=1,<3)", "Werkzeug (>=2,<3)", "boto3 (<=2)", "bottle (>=0.12,<1)", "chalice (>=1.28,<2)", "falcon (>=2,<4)", "fastapi (>=0.70.0,<1)", "gunicorn (>=20,<21)", "pyramid (>=1,<3)", "sanic (>=22,<23)", "starlette (>=0.14,<1)", "tornado (>=6,<7)", "uvicorn (<1)", "websocket-client (>=1.2.3,<2)"] -adapter-testing = ["Flask (>=1,<2)", "Werkzeug (>=1,<2)", "boddle (>=0.2,<0.3)", "docker (>=5,<6)", "moto (>=3,<4)", "requests (>=2,<3)", "sanic-testing (>=0.7)"] -async = ["aiohttp (>=3,<4)", "websockets (>=10,<11)"] -testing = ["Flask-Sockets (>=0.2,<1)", "Jinja2 (==3.0.3)", "Werkzeug (>=1,<2)", "aiohttp (>=3,<4)", "black (==22.8.0)", "click (<=8.0.4)", "itsdangerous (==2.0.1)", "pytest (>=6.2.5,<7)", "pytest-asyncio (>=0.18.2,<1)", "pytest-cov (>=3,<4)"] -testing-without-asyncio = ["Flask-Sockets (>=0.2,<1)", "Jinja2 (==3.0.3)", "Werkzeug (>=1,<2)", "black (==22.8.0)", "click (<=8.0.4)", "itsdangerous (==2.0.1)", "pytest (>=6.2.5,<7)", "pytest-cov (>=3,<4)"] +slack_sdk = ">=3.38.0,<4" [[package]] name = "slack-sdk" -version = "3.23.0" +version = "3.40.1" description = "The Slack API Platform SDK for Python" optional = false -python-versions = ">=3.6.0" +python-versions = ">=3.7" +groups = ["main"] files = [ - {file = "slack_sdk-3.23.0-py2.py3-none-any.whl", hash = "sha256:2a8513505cced20ceee22b5b49c11d9545caa6234b56bf0ad47133ea5b357d10"}, - {file = "slack_sdk-3.23.0.tar.gz", hash = "sha256:9d6ebc4ff74e7983e1b27dbdb0f2bb6fc3c2a2451694686eaa2be23bbb085a73"}, + {file = "slack_sdk-3.40.1-py2.py3-none-any.whl", hash = "sha256:cd8902252979aa248092b0d77f3a9ea3cc605bc5d53663ad728e892e26e14a65"}, + {file = "slack_sdk-3.40.1.tar.gz", hash = "sha256:a215333bc251bc90abf5f5110899497bf61a3b5184b6d9ee35d73ebf09ec3fd0"}, ] [package.extras] -optional = ["SQLAlchemy (>=1.4,<3)", "aiodns (>1.0)", "aiohttp (>=3.7.3,<4)", "boto3 (<=2)", "websocket-client (>=1,<2)", "websockets (>=10,<11)"] -testing = ["Flask (>=1,<2)", "Flask-Sockets (>=0.2,<1)", "Jinja2 (==3.0.3)", "Werkzeug (<2)", "black (==22.8.0)", "boto3 (<=2)", "click (==8.0.4)", "flake8 (>=5,<6)", "itsdangerous (==1.1.0)", "moto (>=3,<4)", "psutil (>=5,<6)", "pytest (>=6.2.5,<7)", "pytest-asyncio (<1)", "pytest-cov (>=2,<3)"] +optional = ["SQLAlchemy (>=1.4,<3)", "aiodns (>1.0)", "aiohttp (>=3.7.3,<4)", "boto3 (<=2)", "websocket-client (>=1,<2)", "websockets (>=9.1,<16)"] [[package]] name = "sqlalchemy" @@ -1191,11 +814,13 @@ version = "1.4.49" description = "Database Abstraction Library" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +groups = ["main"] files = [ {file = "SQLAlchemy-1.4.49-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2e126cf98b7fd38f1e33c64484406b78e937b1a280e078ef558b95bf5b6895f6"}, {file = "SQLAlchemy-1.4.49-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:03db81b89fe7ef3857b4a00b63dedd632d6183d4ea5a31c5d8a92e000a41fc71"}, {file = "SQLAlchemy-1.4.49-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:95b9df9afd680b7a3b13b38adf6e3a38995da5e162cc7524ef08e3be4e5ed3e1"}, {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a63e43bf3f668c11bb0444ce6e809c1227b8f067ca1068898f3008a273f52b09"}, + {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca46de16650d143a928d10842939dab208e8d8c3a9a8757600cae9b7c579c5cd"}, {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f835c050ebaa4e48b18403bed2c0fda986525896efd76c245bdd4db995e51a4c"}, {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c21b172dfb22e0db303ff6419451f0cac891d2e911bb9fbf8003d717f1bcf91"}, {file = "SQLAlchemy-1.4.49-cp310-cp310-win32.whl", hash = "sha256:5fb1ebdfc8373b5a291485757bd6431de8d7ed42c27439f543c81f6c8febd729"}, @@ -1205,26 +830,35 @@ files = [ {file = "SQLAlchemy-1.4.49-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5debe7d49b8acf1f3035317e63d9ec8d5e4d904c6e75a2a9246a119f5f2fdf3d"}, {file = "SQLAlchemy-1.4.49-cp311-cp311-win32.whl", hash = "sha256:82b08e82da3756765c2e75f327b9bf6b0f043c9c3925fb95fb51e1567fa4ee87"}, {file = "SQLAlchemy-1.4.49-cp311-cp311-win_amd64.whl", hash = "sha256:171e04eeb5d1c0d96a544caf982621a1711d078dbc5c96f11d6469169bd003f1"}, + {file = "SQLAlchemy-1.4.49-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f23755c384c2969ca2f7667a83f7c5648fcf8b62a3f2bbd883d805454964a800"}, + {file = "SQLAlchemy-1.4.49-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8396e896e08e37032e87e7fbf4a15f431aa878c286dc7f79e616c2feacdb366c"}, + {file = "SQLAlchemy-1.4.49-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66da9627cfcc43bbdebd47bfe0145bb662041472393c03b7802253993b6b7c90"}, + {file = "SQLAlchemy-1.4.49-cp312-cp312-win32.whl", hash = "sha256:9a06e046ffeb8a484279e54bda0a5abfd9675f594a2e38ef3133d7e4d75b6214"}, + {file = "SQLAlchemy-1.4.49-cp312-cp312-win_amd64.whl", hash = "sha256:7cf8b90ad84ad3a45098b1c9f56f2b161601e4670827d6b892ea0e884569bd1d"}, {file = "SQLAlchemy-1.4.49-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:36e58f8c4fe43984384e3fbe6341ac99b6b4e083de2fe838f0fdb91cebe9e9cb"}, {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b31e67ff419013f99ad6f8fc73ee19ea31585e1e9fe773744c0f3ce58c039c30"}, + {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebc22807a7e161c0d8f3da34018ab7c97ef6223578fcdd99b1d3e7ed1100a5db"}, {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c14b29d9e1529f99efd550cd04dbb6db6ba5d690abb96d52de2bff4ed518bc95"}, {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c40f3470e084d31247aea228aa1c39bbc0904c2b9ccbf5d3cfa2ea2dac06f26d"}, {file = "SQLAlchemy-1.4.49-cp36-cp36m-win32.whl", hash = "sha256:706bfa02157b97c136547c406f263e4c6274a7b061b3eb9742915dd774bbc264"}, {file = "SQLAlchemy-1.4.49-cp36-cp36m-win_amd64.whl", hash = "sha256:a7f7b5c07ae5c0cfd24c2db86071fb2a3d947da7bd487e359cc91e67ac1c6d2e"}, {file = "SQLAlchemy-1.4.49-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:4afbbf5ef41ac18e02c8dc1f86c04b22b7a2125f2a030e25bbb4aff31abb224b"}, {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24e300c0c2147484a002b175f4e1361f102e82c345bf263242f0449672a4bccf"}, + {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:393cd06c3b00b57f5421e2133e088df9cabcececcea180327e43b937b5a7caa5"}, {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:201de072b818f8ad55c80d18d1a788729cccf9be6d9dc3b9d8613b053cd4836d"}, {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7653ed6817c710d0c95558232aba799307d14ae084cc9b1f4c389157ec50df5c"}, {file = "SQLAlchemy-1.4.49-cp37-cp37m-win32.whl", hash = "sha256:647e0b309cb4512b1f1b78471fdaf72921b6fa6e750b9f891e09c6e2f0e5326f"}, {file = "SQLAlchemy-1.4.49-cp37-cp37m-win_amd64.whl", hash = "sha256:ab73ed1a05ff539afc4a7f8cf371764cdf79768ecb7d2ec691e3ff89abbc541e"}, {file = "SQLAlchemy-1.4.49-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:37ce517c011560d68f1ffb28af65d7e06f873f191eb3a73af5671e9c3fada08a"}, {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1878ce508edea4a879015ab5215546c444233881301e97ca16fe251e89f1c55"}, + {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95ab792ca493891d7a45a077e35b418f68435efb3e1706cb8155e20e86a9013c"}, {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0e8e608983e6f85d0852ca61f97e521b62e67969e6e640fe6c6b575d4db68557"}, {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccf956da45290df6e809ea12c54c02ace7f8ff4d765d6d3dfb3655ee876ce58d"}, {file = "SQLAlchemy-1.4.49-cp38-cp38-win32.whl", hash = "sha256:f167c8175ab908ce48bd6550679cc6ea20ae169379e73c7720a28f89e53aa532"}, {file = "SQLAlchemy-1.4.49-cp38-cp38-win_amd64.whl", hash = "sha256:45806315aae81a0c202752558f0df52b42d11dd7ba0097bf71e253b4215f34f4"}, {file = "SQLAlchemy-1.4.49-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:b6d0c4b15d65087738a6e22e0ff461b407533ff65a73b818089efc8eb2b3e1de"}, {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a843e34abfd4c797018fd8d00ffffa99fd5184c421f190b6ca99def4087689bd"}, + {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:738d7321212941ab19ba2acf02a68b8ee64987b248ffa2101630e8fccb549e0d"}, {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1c890421651b45a681181301b3497e4d57c0d01dc001e10438a40e9a9c25ee77"}, {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d26f280b8f0a8f497bc10573849ad6dc62e671d2468826e5c748d04ed9e670d5"}, {file = "SQLAlchemy-1.4.49-cp39-cp39-win32.whl", hash = "sha256:ec2268de67f73b43320383947e74700e95c6770d0c68c4e615e9897e46296294"}, @@ -1236,79 +870,25 @@ files = [ greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} [package.extras] -aiomysql = ["aiomysql", "greenlet (!=0.4.17)"] -aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] -asyncio = ["greenlet (!=0.4.17)"] -asyncmy = ["asyncmy (>=0.2.3,!=0.2.4)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2)"] +aiomysql = ["aiomysql ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\""] +aiosqlite = ["aiosqlite ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\"", "typing-extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17) ; python_version >= \"3\""] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4) ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\""] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2) ; python_version >= \"3\""] mssql = ["pyodbc"] mssql-pymssql = ["pymssql"] mssql-pyodbc = ["pyodbc"] -mypy = ["mypy (>=0.910)", "sqlalchemy2-stubs"] -mysql = ["mysqlclient (>=1.4.0)", "mysqlclient (>=1.4.0,<2)"] +mypy = ["mypy (>=0.910) ; python_version >= \"3\"", "sqlalchemy2-stubs"] +mysql = ["mysqlclient (>=1.4.0) ; python_version >= \"3\"", "mysqlclient (>=1.4.0,<2) ; python_version < \"3\""] mysql-connector = ["mysql-connector-python"] -oracle = ["cx-oracle (>=7)", "cx-oracle (>=7,<8)"] +oracle = ["cx-oracle (>=7) ; python_version >= \"3\"", "cx-oracle (>=7,<8) ; python_version < \"3\""] postgresql = ["psycopg2 (>=2.7)"] -postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-asyncpg = ["asyncpg ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\""] postgresql-pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"] postgresql-psycopg2binary = ["psycopg2-binary"] postgresql-psycopg2cffi = ["psycopg2cffi"] -pymysql = ["pymysql", "pymysql (<1)"] -sqlcipher = ["sqlcipher3-binary"] - -[[package]] -name = "stack-data" -version = "0.6.3" -description = "Extract data from python stack frames and tracebacks for informative displays" -optional = false -python-versions = "*" -files = [ - {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, - {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, -] - -[package.dependencies] -asttokens = ">=2.1.0" -executing = ">=1.2.0" -pure-eval = "*" - -[package.extras] -tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] - -[[package]] -name = "tornado" -version = "6.3.3" -description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -optional = false -python-versions = ">= 3.8" -files = [ - {file = "tornado-6.3.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d"}, - {file = "tornado-6.3.3-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a"}, - {file = "tornado-6.3.3-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f"}, - {file = "tornado-6.3.3-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a"}, - {file = "tornado-6.3.3-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2"}, - {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0"}, - {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16"}, - {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17"}, - {file = "tornado-6.3.3-cp38-abi3-win32.whl", hash = "sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3"}, - {file = "tornado-6.3.3-cp38-abi3-win_amd64.whl", hash = "sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5"}, - {file = "tornado-6.3.3.tar.gz", hash = "sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe"}, -] - -[[package]] -name = "traitlets" -version = "5.11.2" -description = "Traitlets Python configuration system" -optional = false -python-versions = ">=3.8" -files = [ - {file = "traitlets-5.11.2-py3-none-any.whl", hash = "sha256:98277f247f18b2c5cabaf4af369187754f4fb0e85911d473f72329db8a7f4fae"}, - {file = "traitlets-5.11.2.tar.gz", hash = "sha256:7564b5bf8d38c40fa45498072bf4dc5e8346eb087bbf1e2ae2d8774f6a0f078e"}, -] - -[package.extras] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] -test = ["argcomplete (>=3.0.3)", "mypy (>=1.5.1)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"] +pymysql = ["pymysql (<1) ; python_version < \"3\"", "pymysql ; python_version >= \"3\""] +sqlcipher = ["sqlcipher3-binary ; python_version >= \"3\""] [[package]] name = "urllib3" @@ -1316,28 +896,18 @@ version = "1.26.17" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +groups = ["main", "dev"] files = [ {file = "urllib3-1.26.17-py2.py3-none-any.whl", hash = "sha256:94a757d178c9be92ef5539b8840d48dc9cf1b2709c9d6b588232a055c524458b"}, {file = "urllib3-1.26.17.tar.gz", hash = "sha256:24d6a242c28d29af46c3fae832c36db3bbebcc533dd1bb549172cd739c82df21"}, ] [package.extras] -brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +brotli = ["brotli (==1.0.9) ; os_name != \"nt\" and python_version < \"3\" and platform_python_implementation == \"CPython\"", "brotli (>=1.0.9) ; python_version >= \"3\" and platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; (os_name != \"nt\" or python_version >= \"3\") and platform_python_implementation != \"CPython\"", "brotlipy (>=0.6.0) ; os_name == \"nt\" and python_version < \"3\""] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress ; python_version == \"2.7\"", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] -[[package]] -name = "wcwidth" -version = "0.2.8" -description = "Measures the displayed width of unicode strings in a terminal" -optional = false -python-versions = "*" -files = [ - {file = "wcwidth-0.2.8-py2.py3-none-any.whl", hash = "sha256:77f719e01648ed600dfa5402c347481c0992263b81a027344f3e1ba25493a704"}, - {file = "wcwidth-0.2.8.tar.gz", hash = "sha256:8705c569999ffbb4f6a87c6d1b80f324bd6db952f5eb0b95bc07517f4c1813d4"}, -] - [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = "^3.11" -content-hash = "0dd2e974c02e443a64a6ff94746abb6fc011eb81d6a62dfa22b03d7bff4f9cd1" +content-hash = "9f953e1dbbc8bc13aa7597380931c869f83d75db1bbaa0700527d77a0b0cde84" diff --git a/pyproject.toml b/pyproject.toml index b7a407a..575a87c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,22 +2,60 @@ name = "syncbot" version = "0.1.0" description = "" -authors = ["Evan Petzoldt "] +authors = ["Klint Van Tassel ", "Evan Petzoldt "] readme = "README.md" [tool.poetry.dependencies] python = "^3.11" -slack-bolt = "^1.18.0" +slack-bolt = "^1.27.0" sqlalchemy = "<2.0" -pymysql = "^1.1.0" -cryptography = "^41.0.4" -requests = "^2.31.0" -pillow-heif = "^0.16.0" +pymysql = "^1.1.2" +cryptography = "^46.0.0" +requests = "^2.32.0" +pillow = "^12.0.0" +pillow-heif = "^1.2.0" [tool.poetry.group.dev.dependencies] -ipykernel = "^6.25.2" boto3 = "^1.28.57" +pytest = "^9.0" + +[tool.pytest.ini_options] +testpaths = ["tests"] +pythonpath = ["syncbot"] + +[tool.ruff] +target-version = "py311" +line-length = 120 +src = ["syncbot", "tests"] + +[tool.ruff.lint] +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "I", # isort + "UP", # pyupgrade + "B", # flake8-bugbear + "SIM", # flake8-simplify + "LOG", # flake8-logging +] +ignore = [ + "E501", # line-too-long (handled by formatter) + "B905", # zip-without-explicit-strict + "SIM108", # ternary operator (readability preference) +] + +[tool.ruff.lint.per-file-ignores] +"syncbot/app.py" = ["E402"] # load_dotenv() must run before app imports +"syncbot/builders/user_mapping.py" = ["I001"] # import block sort (slack.blocks shorthand used) + +[tool.ruff.lint.isort] +known-first-party = ["builders", "constants", "db", "federation", "handlers", "helpers", "logger", "routing", "slack"] + +[tool.ruff.format] +quote-style = "double" +indent-style = "space" [build-system] requires = ["poetry-core"] diff --git a/samconfig.toml b/samconfig.toml new file mode 100644 index 0000000..480a2ed --- /dev/null +++ b/samconfig.toml @@ -0,0 +1,28 @@ +# SAM CLI configuration for local deployment +# Usage: +# sam build --use-container +# sam deploy # uses [default.deploy.parameters] +# sam deploy --config-env prod # uses [prod.deploy.parameters] + +version = 0.1 + +[default.build.parameters] +use_container = true + +[default.deploy.parameters] +stack_name = "syncbot-staging" +resolve_s3 = true +s3_prefix = "syncbot-staging" +region = "us-east-2" +capabilities = "CAPABILITY_IAM" +confirm_changeset = true +parameter_overrides = "Stage=staging" + +[prod.deploy.parameters] +stack_name = "syncbot-prod" +resolve_s3 = true +s3_prefix = "syncbot-prod" +region = "us-east-2" +capabilities = "CAPABILITY_IAM" +confirm_changeset = true +parameter_overrides = "Stage=prod" diff --git a/slack-manifest.yaml b/slack-manifest.yaml new file mode 100644 index 0000000..69dd12c --- /dev/null +++ b/slack-manifest.yaml @@ -0,0 +1,61 @@ +_metadata: + major_version: 1 + minor_version: 1 + +display_information: + name: SyncBot + description: Sync chat threads between Slack Workspaces. + background_color: "#1A1D21" + +features: + app_home: + home_tab_enabled: true + messages_tab_enabled: true + messages_tab_read_only_enabled: true + bot_user: + display_name: SyncBot + always_online: true + +oauth_config: + redirect_urls: + - https://your-random-tunnel-name.trycloudflare.com/slack/oauth_redirect + scopes: + bot: + - app_mentions:read + - channels:history + - channels:join + - channels:read + - channels:manage + - chat:write + - chat:write.customize + - files:read + - files:write + - groups:history + - groups:read + - groups:write + - im:write + - reactions:read + - reactions:write + - team:read + - users:read + - users:read.email + +settings: + event_subscriptions: + request_url: https://your-random-tunnel-name.trycloudflare.com/slack/events + bot_events: + - app_home_opened + - member_joined_channel + - message.channels + - message.groups + - reaction_added + - reaction_removed + - team_join + - tokens_revoked + - user_profile_changed + interactivity: + is_enabled: true + request_url: https://your-random-tunnel-name.trycloudflare.com/slack/events + org_deploy_enabled: false + socket_mode_enabled: false + token_rotation_enabled: false diff --git a/syncbot/app.py b/syncbot/app.py index 1d34aa5..69007be 100644 --- a/syncbot/app.py +++ b/syncbot/app.py @@ -1,41 +1,190 @@ +"""SyncBot — Slack app that syncs messages across workspaces. + +This module is the entry point for both AWS Lambda (via :func:`handler`) and +local development (``python app.py`` starts a Bolt dev server on port 3000). + +All incoming Slack events, actions, view submissions, and slash commands are +funnelled through :func:`main_response`, which looks up the appropriate +handler in :data:`~utils.routing.MAIN_MAPPER` and dispatches the request. + +Federation API endpoints (``/api/federation/*``) handle cross-instance +communication and are dispatched separately from Slack events. +""" + import json import logging +import os import re -import sys + +from dotenv import load_dotenv + +# Load .env before any other app imports so env vars are available everywhere. +# In production (Lambda) there is no .env file and this is a harmless no-op. +load_dotenv(os.path.join(os.path.dirname(__file__), "..", ".env")) from slack_bolt import App from slack_bolt.adapter.aws_lambda import SlackRequestHandler -from utils.constants import LOCAL_DEVELOPMENT -from utils.helpers import get_oauth_flow, get_request_type, safe_get -from utils.routing import MAIN_MAPPER + +from constants import ( + DANGER_DROP_AND_INIT_DB, + FEDERATION_ENABLED, + HAS_REAL_BOT_TOKEN, + LOCAL_DEVELOPMENT, + validate_config, +) +from federation.api import dispatch_federation_request +from helpers import get_oauth_flow, get_request_type, safe_get +from logger import ( + configure_logging, + emit_metric, + get_request_duration_ms, + set_correlation_id, +) +from routing import MAIN_MAPPER +from slack.actions import CONFIG_PUBLISH_CHANNEL_SUBMIT, CONFIG_PUBLISH_MODE_SUBMIT + +_DEFERRED_ACK_VIEWS = frozenset({CONFIG_PUBLISH_MODE_SUBMIT, CONFIG_PUBLISH_CHANNEL_SUBMIT}) +"""view_submission callback_ids whose handlers control their own ack response.""" + +_SENSITIVE_KEYS = frozenset({ + "token", "bot_token", "access_token", "shared_secret", + "public_key", "private_key", "private_key_encrypted", +}) + + +def _redact_sensitive(obj, _depth=0): + """Return a copy of *obj* with sensitive keys replaced by ``"[REDACTED]"``.""" + if _depth > 10: + return obj + if isinstance(obj, dict): + return { + k: "[REDACTED]" if k in _SENSITIVE_KEYS else _redact_sensitive(v, _depth + 1) + for k, v in obj.items() + } + if isinstance(obj, list): + return [_redact_sensitive(v, _depth + 1) for v in obj] + return obj + SlackRequestHandler.clear_all_log_handlers() -if logging.getLogger().hasHandlers(): - logging.getLogger().setLevel(logging.INFO) -else: - logging.basicConfig(level=logging.INFO, stream=sys.stdout) +configure_logging() + +if os.environ.get(DANGER_DROP_AND_INIT_DB, "").strip().lower() == "true": + from db import drop_and_init_db + drop_and_init_db() + +validate_config() app = App( process_before_response=not LOCAL_DEVELOPMENT, + token_verification_enabled=not LOCAL_DEVELOPMENT or HAS_REAL_BOT_TOKEN, oauth_flow=get_oauth_flow(), ) -def handler(event, context): +def handler(event: dict, context: dict) -> dict: + """AWS Lambda entry point. + + Receives an API Gateway proxy event. Federation API paths + (``/api/federation/*``) are handled directly; everything else + is delegated to the Slack Bolt request handler. + """ + path = event.get("path", "") or event.get("rawPath", "") + if path.startswith("/api/federation"): + return _lambda_federation_handler(event) + slack_request_handler = SlackRequestHandler(app=app) return slack_request_handler.handle(event, context) -def main_response(body, logger, client, ack, context): - ack() - logger.info(json.dumps(body, indent=4)) +def _lambda_federation_handler(event: dict) -> dict: + """Handle a federation API request inside Lambda.""" + method = event.get("httpMethod", "GET") + path = event.get("path", "") + body_str = event.get("body", "") or "" + raw_headers = event.get("headers", {}) or {} + headers = {k: v for k, v in raw_headers.items()} + + status, resp = dispatch_federation_request(method, path, body_str, headers) + return { + "statusCode": status, + "headers": {"Content-Type": "application/json"}, + "body": json.dumps(resp), + } + + +_logger = logging.getLogger(__name__) + + +def main_response(body: dict, logger, client, ack, context: dict) -> None: + """Central dispatcher for every Slack request. + + Acknowledges the request immediately (required by Slack's 3-second + timeout), then resolves the ``(request_type, request_id)`` pair to + a handler function via :data:`MAIN_MAPPER` and invokes it. + + A unique correlation ID is assigned to every incoming request and + attached to all log entries emitted while processing it. + """ + set_correlation_id() request_type, request_id = get_request_type(body) - run_function = safe_get(safe_get(MAIN_MAPPER, request_type), request_id) + + # Most requests are acked immediately. Certain view_submission + # handlers need to control the ack themselves (e.g. to respond with + # response_action="update" for multi-step modals). For those, we + # defer the ack and expose it via context["ack"]. + defer_ack = request_type == "view_submission" and request_id in _DEFERRED_ACK_VIEWS + ack_called = False + + if defer_ack: + def _tracked_ack(*args, **kwargs): + nonlocal ack_called + ack_called = True + return ack(*args, **kwargs) + context["ack"] = _tracked_ack + else: + ack() + + _logger.info( + "request_received", + extra={ + "request_type": request_type, + "request_id": request_id, + "team_id": safe_get(body, "team_id"), + }, + ) + _logger.debug("request_body", extra={"body": json.dumps(_redact_sensitive(body))}) + + run_function = MAIN_MAPPER.get(request_type, {}).get(request_id) if run_function: - run_function(body, client, logger, context) + try: + run_function(body, client, logger, context) + if defer_ack and not ack_called: + ack() + emit_metric( + "request_handled", + duration_ms=round(get_request_duration_ms(), 1), + request_type=request_type, + request_id=request_id, + ) + except Exception: + if defer_ack and not ack_called: + ack() + emit_metric( + "request_error", + request_type=request_type, + request_id=request_id, + ) + raise else: - logger.error( - f"no handler for path: {safe_get(safe_get(MAIN_MAPPER, request_type), request_id) or request_type+', '+request_id}" + if defer_ack and not ack_called: + ack() + _logger.error( + "no_handler", + extra={ + "request_type": request_type, + "request_id": request_id, + }, ) @@ -53,8 +202,52 @@ def main_response(body, logger, client, ack, context): app.event(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS) app.action(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS) app.view(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS) -app.command(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS) if __name__ == "__main__": + if LOCAL_DEVELOPMENT: + import threading + from http.server import BaseHTTPRequestHandler, HTTPServer + + class FederationHTTPHandler(BaseHTTPRequestHandler): + """Lightweight HTTP handler for federation API endpoints.""" + + def do_GET(self): + if self.path.startswith("/api/federation"): + self._handle_federation("GET") + else: + self.send_error(404) + + def do_POST(self): + if self.path.startswith("/api/federation"): + self._handle_federation("POST") + else: + self.send_error(404) + + _MAX_BODY = 1_048_576 # 1 MB + + def _handle_federation(self, method: str): + try: + content_len = min(int(self.headers.get("Content-Length", 0)), self._MAX_BODY) + except (TypeError, ValueError): + content_len = 0 + body_str = self.rfile.read(content_len).decode() if content_len else "" + headers = {k: v for k, v in self.headers.items()} + + status, resp = dispatch_federation_request(method, self.path, body_str, headers) + + self.send_response(status) + self.send_header("Content-Type", "application/json") + self.end_headers() + self.wfile.write(json.dumps(resp).encode()) + + def log_message(self, format, *args): + pass + + if FEDERATION_ENABLED: + fed_server = HTTPServer(("0.0.0.0", 3001), FederationHTTPHandler) + fed_thread = threading.Thread(target=fed_server.serve_forever, daemon=True) + fed_thread.start() + _logger.info("Federation API server started on port 3001") + app.start(3000) diff --git a/syncbot/builders/__init__.py b/syncbot/builders/__init__.py new file mode 100644 index 0000000..e47b0de --- /dev/null +++ b/syncbot/builders/__init__.py @@ -0,0 +1,48 @@ +"""Builders package – Slack modal and home-tab UI constructors. + +Re-exports every public symbol so that ``import builders`` / +``from builders import X`` continues to work after the split. +""" + +from builders._common import ( + _format_channel_ref, + _get_group_members, + _get_groups_for_workspace, + _get_workspace_info, +) +from builders.channel_sync import ( + _build_inline_channel_sync, +) +from builders.home import ( + _REFRESH_BUTTON_BLOCK_INDEX, + _home_tab_content_hash, + build_home_tab, + refresh_home_tab_for_workspace, +) +from builders.sync import build_join_sync_form, build_new_sync_form +from builders.user_mapping import ( + _USER_MAPPING_REFRESH_BUTTON_INDEX, + _user_mapping_content_hash, + build_user_mapping_edit_modal, + build_user_mapping_screen, + build_user_matching_entry, +) + +__all__ = [ + "_build_inline_channel_sync", + "_format_channel_ref", + "_get_group_members", + "_get_groups_for_workspace", + "_get_workspace_info", + "_REFRESH_BUTTON_BLOCK_INDEX", + "_home_tab_content_hash", + "build_home_tab", + "build_join_sync_form", + "build_new_sync_form", + "_USER_MAPPING_REFRESH_BUTTON_INDEX", + "_user_mapping_content_hash", + "build_user_mapping_edit_modal", + "build_user_mapping_screen", + "build_user_matching_entry", + "refresh_home_tab_for_workspace", +] diff --git a/syncbot/builders/_common.py b/syncbot/builders/_common.py new file mode 100644 index 0000000..a871bfa --- /dev/null +++ b/syncbot/builders/_common.py @@ -0,0 +1,158 @@ +"""Shared helpers for builder modules.""" + +import logging + +from slack_sdk.web import WebClient + +import helpers +from db import DbManager +from db.schemas import Workspace, WorkspaceGroup, WorkspaceGroupMember +from helpers import get_user_id_from_body, is_user_authorized, safe_get + +_logger = logging.getLogger(__name__) + + +def _get_user_id(body: dict) -> str | None: + """Extract the acting user ID from any Slack request body.""" + return safe_get(body, "event", "user") or safe_get(body, "user", "id") or safe_get(body, "user_id") + + +def _get_team_id(body: dict) -> str | None: + """Extract the team ID from any Slack request body.""" + return ( + safe_get(body, "view", "team_id") + or safe_get(body, "team_id") + or safe_get(body, "team", "id") + or safe_get(body, "event", "view", "team_id") + ) + + +def _deny_unauthorized(body: dict, client: WebClient, logger) -> bool: + """Check authorization and send an ephemeral denial if the user is not an admin. + + Returns *True* if the user was denied (caller should return early). + """ + user_id = get_user_id_from_body(body) + if not user_id: + logger.warning("authorization_denied: could not determine user_id from request body") + return True + + if is_user_authorized(client, user_id): + return False + + channel_id = safe_get(body, "channel_id") or safe_get(body, "channel", "id") + _logger.warning( + "authorization_denied", + extra={"user_id": user_id, "action": "config"}, + ) + + if channel_id: + try: + client.chat_postEphemeral( + channel=channel_id, + user=user_id, + text=":lock: Only workspace admins and owners can configure SyncBot.", + ) + except Exception: + _logger.debug("Could not send ephemeral denial — user may have invoked from a modal") + + return True + + +def _get_groups_for_workspace(workspace_id: int) -> list[tuple[WorkspaceGroup, WorkspaceGroupMember]]: + """Return all active groups the workspace belongs to, with membership info.""" + members = DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.workspace_id == workspace_id, + WorkspaceGroupMember.status == "active", + WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + results: list[tuple[WorkspaceGroup, WorkspaceGroupMember]] = [] + for m in members: + groups = DbManager.find_records( + WorkspaceGroup, + [WorkspaceGroup.id == m.group_id, WorkspaceGroup.status == "active"], + ) + if groups: + results.append((groups[0], m)) + return results + + +def _get_group_members(group_id: int) -> list[WorkspaceGroupMember]: + """Return all active members of a group.""" + return DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.group_id == group_id, + WorkspaceGroupMember.status == "active", + WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + + +def _get_workspace_info(workspace: Workspace) -> dict: + """Fetch workspace icon URL and domain from the Slack API (cached 24h).""" + result: dict[str, str | None] = {"icon_url": None, "domain": None} + if not workspace or not workspace.bot_token: + return result + + cache_key = f"ws_info:{workspace.id}" + cached = helpers._cache_get(cache_key) + if cached: + return cached + + try: + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + info = ws_client.team_info() + result["icon_url"] = helpers.safe_get(info, "team", "icon", "image_88") or helpers.safe_get( + info, "team", "icon", "image_68" + ) + domain = helpers.safe_get(info, "team", "domain") + if domain: + result["domain"] = f"" + helpers._cache_set(cache_key, result, ttl=86400) + except Exception as exc: + _logger.debug(f"_get_workspace_meta: team_info call failed: {exc}") + return result + + +def _format_channel_ref( + channel_id: str, + workspace: Workspace, + is_local: bool = True, + *, + include_workspace_in_link: bool = True, +) -> str: + """Format a channel reference for display in Block Kit mrkdwn.""" + if is_local: + return f"<#{channel_id}>" + + ws_name = workspace.workspace_name if workspace and workspace.workspace_name else "Partner" + + if not workspace or not workspace.bot_token: + return f"#{channel_id} ({ws_name})" if include_workspace_in_link else f"#{channel_id}" + + cache_key = f"chan_ref:{channel_id}:{include_workspace_in_link}" + cached = helpers._cache_get(cache_key) + if cached: + return cached + + ch_name = channel_id + try: + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + info = ws_client.conversations_info(channel=channel_id) + ch_name = helpers.safe_get(info, "channel", "name") or channel_id + except Exception as e: + _logger.warning( + "format_channel_ref_failed", + extra={"channel_id": channel_id, "workspace": ws_name, "error": str(e)}, + ) + + deep_link = f"https://slack.com/app_redirect?channel={channel_id}&team={workspace.team_id}" + link_text = f"#{ch_name} ({ws_name})" if include_workspace_in_link else f"#{ch_name}" + result = f"<{deep_link}|{link_text}>" + if ch_name != channel_id: + helpers._cache_set(cache_key, result, ttl=3600) + return result diff --git a/syncbot/builders/channel_sync.py b/syncbot/builders/channel_sync.py new file mode 100644 index 0000000..4ba44f5 --- /dev/null +++ b/syncbot/builders/channel_sync.py @@ -0,0 +1,179 @@ +"""Channel sync form builders.""" + +import logging +from logging import Logger + +from slack_sdk.web import WebClient + +import helpers +from builders._common import ( + _deny_unauthorized, + _format_channel_ref, +) +from db import DbManager +from db.schemas import PostMeta, Sync, SyncChannel, Workspace, WorkspaceGroup, WorkspaceGroupMember +from helpers import safe_get +from slack import actions, orm +from slack.blocks import context, section + +_logger = logging.getLogger(__name__) + + +def _build_inline_channel_sync( + blocks: list, + group: WorkspaceGroup, + workspace_record: Workspace, + other_members: list[WorkspaceGroupMember], + context: dict | None = None, +) -> None: + """Append channel-sync blocks inline under a group on the Home tab. + + Shows: + - Active synced channels with Pause/Stop buttons + - Paused synced channels with Resume/Stop buttons + - Channels waiting for a subscriber with Unpublish button + - Available channels from other members with Start Syncing button + """ + syncs_for_group = DbManager.find_records( + Sync, + [Sync.group_id == group.id], + ) + + other_ws_ids = {m.workspace_id for m in other_members if m.workspace_id} + + published_syncs: list[tuple[Sync, SyncChannel, list[SyncChannel], bool]] = [] + waiting_syncs: list[tuple[Sync, SyncChannel]] = [] + available_syncs: list[tuple[Sync, list[SyncChannel]]] = [] + + for sync in syncs_for_group: + channels = DbManager.find_records( + SyncChannel, + [SyncChannel.sync_id == sync.id, SyncChannel.deleted_at.is_(None)], + ) + my_channel = next((c for c in channels if c.workspace_id == workspace_record.id), None) + other_channels = [c for c in channels if c.workspace_id != workspace_record.id] + + if my_channel and other_channels: + is_paused = my_channel.status == "paused" or any(c.status == "paused" for c in other_channels) + published_syncs.append((sync, my_channel, other_channels, is_paused)) + elif my_channel and not other_channels: + waiting_syncs.append((sync, my_channel)) + elif not my_channel and other_channels: + if sync.sync_mode == "direct" and sync.target_workspace_id != workspace_record.id: + continue + available_syncs.append((sync, other_channels)) + + published_syncs.sort(key=lambda t: (t[0].title or "").lower()) + waiting_syncs.sort(key=lambda t: (t[0].title or "").lower()) + available_syncs.sort(key=lambda t: (t[0].title or "").lower()) + + if not published_syncs and not waiting_syncs and not available_syncs: + return + + blocks.append(context("*Synced Channels*")) + + for sync, my_ch, other_chs, is_paused in published_syncs: + my_ref = _format_channel_ref(my_ch.channel_id, workspace_record, is_local=True) + + # Workspace names for bracket: local first, then others + local_name = helpers.resolve_workspace_name(workspace_record) or f"Workspace {workspace_record.id}" + other_names: list[str] = [] + for och in other_chs: + och_ws = helpers.get_workspace_by_id(och.workspace_id, context=context) + other_names.append(helpers.resolve_workspace_name(och_ws) if och_ws else f"Workspace {och.workspace_id}") + all_ws_names = [local_name] + other_names + + if sync.sync_mode == "direct": + mode_tag = f" _[1-to-1: {', '.join(all_ws_names)}]_" if all_ws_names else "" + else: + mode_tag = f" _[Any: {', '.join(all_ws_names)}]_" if all_ws_names else "" + + if is_paused: + icon = ":double_vertical_bar:" + toggle_btn = orm.ButtonElement( + label="Resume Syncing", + action=f"{actions.CONFIG_RESUME_SYNC}_{sync.id}", + value=str(sync.id), + ) + else: + icon = ":arrows_counterclockwise:" + toggle_btn = orm.ButtonElement( + label="Pause Syncing", + action=f"{actions.CONFIG_PAUSE_SYNC}_{sync.id}", + value=str(sync.id), + ) + + blocks.append( + section(f"{icon} {my_ref}{mode_tag}") + ) + + context_parts: list[str] = [] + if getattr(my_ch, "created_at", None): + context_parts.append(f"Synced since: {my_ch.created_at:%B %d, %Y}") + + msg_count = DbManager.count_records( + PostMeta, + [PostMeta.sync_channel_id == my_ch.id], + ) + context_parts.append(f"{msg_count} message{'s' if msg_count != 1 else ''} tracked") + + if context_parts: + blocks.append(context(" · ".join(context_parts))) + blocks.append( + orm.ActionsBlock( + elements=[ + toggle_btn, + orm.ButtonElement( + label="Stop Syncing", + action=f"{actions.CONFIG_STOP_SYNC}_{sync.id}", + value=str(sync.id), + style="danger", + ), + ] + ) + ) + + for sync, my_ch in waiting_syncs: + blocks.append( + section(f":outbox_tray: <#{my_ch.channel_id}> — _waiting for subscribers_") + ) + is_publisher = sync.publisher_workspace_id == workspace_record.id + if is_publisher: + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label="Unpublish", + action=f"{actions.CONFIG_UNPUBLISH_CHANNEL}_{my_ch.id}", + value=str(sync.id), + style="danger", + ), + ] + ) + ) + + for sync, other_chs in available_syncs: + publisher_ws = helpers.get_workspace_by_id(other_chs[0].workspace_id, context=context) if other_chs else None + publisher_name = helpers.resolve_workspace_name(publisher_ws) if publisher_ws else "another workspace" + sub_names_avail: list[str] = [] + for och in other_chs: + och_ws = helpers.get_workspace_by_id(och.workspace_id, context=context) + sub_names_avail.append(helpers.resolve_workspace_name(och_ws) if och_ws else f"Workspace {och.workspace_id}") + if sync.sync_mode == "direct": + mode_tag = f" _[1-to-1: {sub_names_avail[0]}]_" if sub_names_avail else "" + else: + mode_tag = f" _[Any: {', '.join(sub_names_avail)}]_" if sub_names_avail else "" + blocks.append( + section(f":inbox_tray: *{sync.title}* from {publisher_name}{mode_tag}") + ) + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label="Start Syncing", + action=f"{actions.CONFIG_SUBSCRIBE_CHANNEL}_{sync.id}", + value=str(sync.id), + ), + ] + ) + ) diff --git a/syncbot/builders/home.py b/syncbot/builders/home.py new file mode 100644 index 0000000..4cb7ece --- /dev/null +++ b/syncbot/builders/home.py @@ -0,0 +1,498 @@ +"""Home tab builder.""" + +import hashlib +import logging +from logging import Logger + +from slack_sdk.web import WebClient + +import constants +import helpers +from builders._common import ( + _get_group_members, + _get_groups_for_workspace, + _get_team_id, + _get_user_id, + _get_workspace_info, +) +from builders.channel_sync import _build_inline_channel_sync +from db import DbManager +from db.schemas import ( + FederatedWorkspace, + Sync, + SyncChannel, + UserMapping, + Workspace, + WorkspaceGroup, + WorkspaceGroupMember, +) +from slack import actions, orm +from slack.blocks import context as block_context, divider, header, section + +_logger = logging.getLogger(__name__) + +# Index of the Actions block that contains the Refresh button (after header at 0) +_REFRESH_BUTTON_BLOCK_INDEX = 1 + + +def _home_tab_content_hash(workspace_record: Workspace) -> str: + """Compute a stable hash of the data that drives the Home tab. + + Includes groups, members, syncs, sync channels (id/workspace/status), mapped counts, + and pending invite ids so the hash changes when anything visible on Home changes. + """ + workspace_id = workspace_record.id + workspace_name = (workspace_record.workspace_name or "") or "" + my_groups = _get_groups_for_workspace(workspace_id) + group_ids = sorted(g.id for g, _ in my_groups) + pending_invites = DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.workspace_id == workspace_id, + WorkspaceGroupMember.status == "pending", + WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + pending_ids = tuple(sorted(inv.id for inv in pending_invites)) + group_payload: list[tuple] = [] + for group, _ in my_groups: + members = _get_group_members(group.id) + syncs = DbManager.find_records(Sync, [Sync.group_id == group.id]) + sync_ids = [s.id for s in syncs] + # Sync channels drive the "Synced Channels" section + sync_channel_tuples: list[tuple] = [] + for sync in syncs: + channels = DbManager.find_records( + SyncChannel, + [ + SyncChannel.sync_id == sync.id, + SyncChannel.deleted_at.is_(None), + ], + ) + channel_sig = tuple( + (sc.workspace_id, sc.channel_id, sc.status or "active") + for sc in sorted(channels, key=lambda c: (c.workspace_id, c.channel_id)) + ) + sync_channel_tuples.append((sync.id, channel_sig)) + sync_channel_tuples.sort(key=lambda x: x[0]) + # Per-member channel_count and mapped_count (shown in group section) + member_sigs: list[tuple] = [] + for m in members: + ws_id = m.workspace_id or 0 + ch_count = 0 + if ws_id and sync_ids: + ch_count = len( + DbManager.find_records( + SyncChannel, + [ + SyncChannel.sync_id.in_(sync_ids), + SyncChannel.workspace_id == ws_id, + SyncChannel.deleted_at.is_(None), + ], + ) + ) + mapped_count = 0 + if ws_id: + mapped_count = len( + DbManager.find_records( + UserMapping, + [ + UserMapping.group_id == group.id, + UserMapping.target_workspace_id == ws_id, + UserMapping.match_method != "none", + ], + ) + ) + member_sigs.append((ws_id, ch_count, mapped_count)) + member_sigs.sort(key=lambda x: x[0]) + group_payload.append( + (group.id, len(members), len(syncs), tuple(sync_channel_tuples), tuple(member_sigs)) + ) + group_payload.sort(key=lambda x: x[0]) + payload = (workspace_id, workspace_name, tuple(group_ids), tuple(group_payload), pending_ids) + return hashlib.sha256(repr(payload).encode()).hexdigest() + + +def refresh_home_tab_for_workspace(workspace: Workspace, logger: Logger, context: dict | None = None) -> None: + """Publish an updated Home tab for every admin in *workspace*.""" + if not workspace or not workspace.bot_token or workspace.deleted_at: + return + ctx = context if context is not None else {} + try: + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + admin_ids = helpers.get_admin_ids(ws_client, team_id=workspace.team_id, context=ctx) + except Exception as e: + _logger.warning(f"refresh_home_tab_for_workspace: failed to get admins: {e}") + return + + synthetic_body = {"team": {"id": workspace.team_id}} + for uid in admin_ids: + try: + build_home_tab(synthetic_body, ws_client, logger, ctx, user_id=uid) + except Exception as e: + _logger.warning( + "refresh_home_tab_for_workspace: failed for user %s in workspace %s: %s", + uid, + getattr(workspace, "team_id", workspace.id if workspace else None), + e, + ) + + +def build_home_tab( + body: dict, + client: WebClient, + logger: Logger, + context: dict, + *, + user_id: str | None = None, + return_blocks: bool = False, +) -> list[dict] | None: + """Build and publish the App Home tab. If return_blocks is True, return block dicts and do not publish.""" + team_id = _get_team_id(body) + user_id = user_id or _get_user_id(body) + if not team_id or not user_id: + _logger.warning("build_home_tab: missing team_id or user_id") + return None + + workspace_record: Workspace = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return None + + is_admin = helpers.is_user_authorized(client, user_id) + + blocks: list[orm.BaseBlock] = [] + + blocks.append(header("SyncBot Configuration")) + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label=":arrows_counterclockwise: Refresh", + action=actions.CONFIG_REFRESH_HOME, + ), + orm.ButtonElement( + label=":floppy_disk: Backup/Restore", + action=actions.CONFIG_BACKUP_RESTORE, + ), + ] + ) + ) + blocks.append(divider()) + + if not is_admin: + blocks.append(block_context(":lock: Only workspace admins and owners can configure SyncBot.")) + block_dicts = orm.BlockView(blocks=blocks).as_form_field() + if return_blocks: + return block_dicts + client.views_publish(user_id=user_id, view={"type": "home", "blocks": block_dicts}) + return None + + # Compute hash for admin view so we can update cache after publish (manual or automatic) + current_hash = _home_tab_content_hash(workspace_record) + + # ── Workspace Groups ────────────────────────────────────── + blocks.append(section(":busts_in_silhouette: *Workspace Groups*")) + blocks.append(block_context("Create or join groups to sync channels with other workspaces.")) + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label=":heavy_plus_sign: Create Group", + action=actions.CONFIG_CREATE_GROUP, + ), + orm.ButtonElement( + label=":link: Join Group", + action=actions.CONFIG_JOIN_GROUP, + ), + ] + ) + ) + + my_groups = _get_groups_for_workspace(workspace_record.id) + + pending_invites = DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.workspace_id == workspace_record.id, + WorkspaceGroupMember.status == "pending", + WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + + if not my_groups and not pending_invites: + blocks.append( + block_context("_You are not in any groups yet. Create a new group or enter an invite code to join one._") + ) + else: + for group, my_membership in my_groups: + _build_group_section(blocks, group, my_membership, workspace_record, context) + + for invite in pending_invites: + _build_pending_invite_section(blocks, invite, context) + + # ── External Connections (federation) ───────────────────── + if constants.FEDERATION_ENABLED: + _build_federation_section(blocks, workspace_record) + + block_dicts = orm.BlockView(blocks=blocks).as_form_field() + if return_blocks: + return block_dicts + client.views_publish(user_id=user_id, view={"type": "home", "blocks": block_dicts}) + # Update cache so next manual Refresh skips full rebuild when data unchanged + helpers.refresh_after_full( + f"home_tab_hash:{team_id}", + f"home_tab_blocks:{team_id}:{user_id}", + f"refresh_at:home:{team_id}:{user_id}", + current_hash, + block_dicts, + ) + return None + + +def _build_pending_invite_section( + blocks: list, + invite: WorkspaceGroupMember, + context: dict | None = None, +) -> None: + """Append blocks for an incoming group invite the workspace hasn't responded to yet.""" + group = DbManager.get_record(WorkspaceGroup, id=invite.group_id) + if not group: + return + + inviting_members = DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.group_id == group.id, + WorkspaceGroupMember.status == "active", + WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + inviter_names = [] + for m in inviting_members: + if m.workspace_id: + ws = helpers.get_workspace_by_id(m.workspace_id, context=context) + inviter_names.append(helpers.resolve_workspace_name(ws) if ws else f"Workspace {m.workspace_id}") + + from_label = f" from {', '.join(inviter_names)}" if inviter_names else "" + + blocks.append(divider()) + blocks.append( + section(f":envelope: *{group.name}*{from_label}\n_You've been invited to join this group_") + ) + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label="Accept", + action=f"{actions.CONFIG_ACCEPT_GROUP_REQUEST}_{invite.id}", + value=str(invite.id), + style="primary", + ), + orm.ButtonElement( + label="Decline", + action=f"{actions.CONFIG_DECLINE_GROUP_REQUEST}_{invite.id}", + value=str(invite.id), + style="danger", + ), + ] + ) + ) + + +def _build_group_section( + blocks: list, + group: WorkspaceGroup, + my_membership: WorkspaceGroupMember, + workspace_record: Workspace, + context: dict | None = None, +) -> None: + """Append blocks for a single workspace group.""" + blocks.append(divider()) + + all_members = _get_group_members(group.id) + other_members = [m for m in all_members if m.workspace_id != workspace_record.id] + + role_tag = " _(creator)_" if my_membership.role == "creator" else "" + icon = ":link:" if len(other_members) > 0 else ":handshake:" + label_text = f"{icon} *{group.name}*{role_tag}" + + blocks.append(section(label_text)) + + syncs_for_group = DbManager.find_records(Sync, [Sync.group_id == group.id]) + sync_ids = [s.id for s in syncs_for_group] + + for m in all_members: + if m.workspace_id: + ws = helpers.get_workspace_by_id(m.workspace_id, context=context) + name = helpers.resolve_workspace_name(ws) if ws else f"Workspace {m.workspace_id}" + if m.workspace_id == workspace_record.id: + name += " _(you)_" + elif m.federated_workspace_id: + fed_ws = DbManager.get_record(FederatedWorkspace, id=m.federated_workspace_id) + name = f":globe_with_meridians: {fed_ws.name}" if fed_ws and fed_ws.name else "External" + else: + name = "Unknown" + + joined_str = f"{m.joined_at:%B %d, %Y}" if m.joined_at else "Unknown" + + ws_id = m.workspace_id + channel_count = 0 + if ws_id and sync_ids: + channels = DbManager.find_records( + SyncChannel, + [ + SyncChannel.sync_id.in_(sync_ids), + SyncChannel.workspace_id == ws_id, + SyncChannel.deleted_at.is_(None), + ], + ) + channel_count = len(channels) + + mapped_count = 0 + if ws_id: + mapped = DbManager.find_records( + UserMapping, + [ + UserMapping.group_id == group.id, + UserMapping.target_workspace_id == ws_id, + UserMapping.match_method != "none", + ], + ) + mapped_count = len(mapped) + + stats = ( + f"Member since {joined_str}" + f" · {channel_count} synced channel{'s' if channel_count != 1 else ''}" + f" · {mapped_count} mapped user{'s' if mapped_count != 1 else ''}" + ) + blocks.append(block_context(f"*{name}*\n{stats}")) + + pending_members = DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.group_id == group.id, + WorkspaceGroupMember.status == "pending", + WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + for pm in pending_members: + if pm.workspace_id: + pw = helpers.get_workspace_by_id(pm.workspace_id, context=context) + pname = helpers.resolve_workspace_name(pw) if pw else f"Workspace {pm.workspace_id}" + else: + pname = "Unknown" + blocks.append(block_context(f":hourglass_flowing_sand: *{pname}* — _Pending invite_")) + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label="Cancel Invite", + action=f"{actions.CONFIG_CANCEL_GROUP_REQUEST}_{pm.id}", + value=str(pm.id), + ), + ] + ) + ) + + # Action buttons for this group + group_actions: list[orm.ButtonElement] = [ + orm.ButtonElement( + label="Invite Workspace", + action=actions.CONFIG_INVITE_WORKSPACE, + value=str(group.id), + ), + orm.ButtonElement( + label="Publish Channel", + action=actions.CONFIG_PUBLISH_CHANNEL, + value=str(group.id), + ), + orm.ButtonElement( + label="User Mapping", + action=actions.CONFIG_MANAGE_USER_MATCHING, + value=str(group.id), + ), + ] + group_actions.append( + orm.ButtonElement( + label="Leave Group", + action=f"{actions.CONFIG_LEAVE_GROUP}_{group.id}", + style="danger", + value=str(group.id), + ), + ) + blocks.append(orm.ActionsBlock(elements=group_actions)) + + _build_inline_channel_sync(blocks, group, workspace_record, other_members, context) + + +def _build_federation_section( + blocks: list, + workspace_record: Workspace, +) -> None: + """Append the federation section to the home tab.""" + blocks.append(divider()) + blocks.append(block_context("\u200b")) + blocks.append(section("*External Connections*")) + blocks.append(block_context("Connect with Workspaces on other SyncBot deployments.")) + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label=":globe_with_meridians: Generate Connection Code", + action=actions.CONFIG_GENERATE_FEDERATION_CODE, + ), + orm.ButtonElement( + label=":link: Enter Connection Code", + action=actions.CONFIG_ENTER_FEDERATION_CODE, + ), + orm.ButtonElement( + label=":package: Data Migration", + action=actions.CONFIG_DATA_MIGRATION, + ), + ] + ) + ) + + fed_members = DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.federated_workspace_id.isnot(None), + WorkspaceGroupMember.deleted_at.is_(None), + WorkspaceGroupMember.status == "active", + ], + ) + + shown_fed: set[int] = set() + for fm in fed_members: + if not fm.federated_workspace_id or fm.federated_workspace_id in shown_fed: + continue + my_groups = _get_groups_for_workspace(workspace_record.id) + my_group_ids = {g.id for g, _ in my_groups} + if fm.group_id not in my_group_ids: + continue + + shown_fed.add(fm.federated_workspace_id) + fed_ws = DbManager.get_record(FederatedWorkspace, id=fm.federated_workspace_id) + if not fed_ws: + continue + + fed_ws_name = fed_ws.name or f"Connection {fed_ws.instance_id[:8]}" + status_icon = ":white_check_mark:" if fed_ws.status == "active" else ":warning:" + + blocks.append(block_context("\u200b")) + label_text = f"{status_icon} *{fed_ws_name}*" + label_text += f"\n:globe_with_meridians: {fed_ws.webhook_url}" + blocks.append(section(label_text)) + + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label="Remove Connection", + action=f"{actions.CONFIG_REMOVE_FEDERATION_CONNECTION}_{fm.id}", + style="danger", + value=str(fm.id), + ), + ] + ) + ) diff --git a/syncbot/builders/sync.py b/syncbot/builders/sync.py new file mode 100644 index 0000000..1450ff3 --- /dev/null +++ b/syncbot/builders/sync.py @@ -0,0 +1,95 @@ +"""Join/New sync form builders.""" + +import copy +import logging + +from slack_sdk.web import WebClient + +import helpers +from builders._common import _deny_unauthorized, _get_group_members, _get_groups_for_workspace +from db import DbManager +from db.schemas import Sync, SyncChannel, Workspace +from helpers import safe_get +from slack import actions, forms, orm + +_logger = logging.getLogger(__name__) + + +def build_join_sync_form( + body: dict, + client: WebClient, + logger, + context: dict, +) -> None: + """Pushes a new modal layer to join an existing sync.""" + if _deny_unauthorized(body, client, logger): + return + + trigger_id: str = safe_get(body, "trigger_id") + team_id = safe_get(body, "view", "team_id") + join_sync_form: orm.BlockView = copy.deepcopy(forms.JOIN_SYNC_FORM) + + workspace_record: Workspace = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + my_groups = _get_groups_for_workspace(workspace_record.id) + group_ws_ids: set[int] = {workspace_record.id} + for group, _ in my_groups: + for m in _get_group_members(group.id): + if m.workspace_id: + group_ws_ids.add(m.workspace_id) + + channel_sync_workspace_records: list[tuple[SyncChannel, Workspace]] = DbManager.find_join_records2( + left_cls=SyncChannel, + right_cls=Workspace, + filters=[Workspace.team_id == team_id, SyncChannel.deleted_at.is_(None)], + ) + already_joined_sync_ids = {record[0].sync_id for record in channel_sync_workspace_records} + + all_syncs: list[Sync] = DbManager.find_records(Sync, [True]) + eligible_syncs: list[Sync] = [] + + for sync in all_syncs: + if sync.id in already_joined_sync_ids: + continue + sync_channels = DbManager.find_records( + SyncChannel, + [SyncChannel.sync_id == sync.id, SyncChannel.deleted_at.is_(None)], + ) + if any(sc.workspace_id in group_ws_ids for sc in sync_channels): + eligible_syncs.append(sync) + + options = orm.as_selector_options( + [sync.title for sync in eligible_syncs], + [str(sync.id) for sync in eligible_syncs], + ) + join_sync_form.set_options({actions.CONFIG_JOIN_SYNC_SELECT: options}) + join_sync_form.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_JOIN_SYNC_SUMBIT, + title_text="Join Sync", + new_or_add="new", + ) + + +def build_new_sync_form( + body: dict, + client: WebClient, + logger, + context: dict, +) -> None: + """Pushes a new modal layer to create a new sync.""" + if _deny_unauthorized(body, client, logger): + return + + trigger_id: str = safe_get(body, "trigger_id") + new_sync_form: orm.BlockView = copy.deepcopy(forms.NEW_SYNC_FORM) + new_sync_form.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_NEW_SYNC_SUBMIT, + title_text="New Sync", + new_or_add="new", + ) diff --git a/syncbot/builders/user_mapping.py b/syncbot/builders/user_mapping.py new file mode 100644 index 0000000..c387d3a --- /dev/null +++ b/syncbot/builders/user_mapping.py @@ -0,0 +1,350 @@ +"""User mapping screen builders.""" + +import contextlib # noqa: I001 +import hashlib +import logging + +from slack_sdk.web import WebClient + +import helpers +from builders._common import ( + _deny_unauthorized, + _get_group_members, + _get_groups_for_workspace, + _get_team_id, + _get_user_id, +) +from db import DbManager +from db.schemas import UserDirectory, UserMapping, Workspace, WorkspaceGroup +from slack import actions, orm +from slack.blocks import actions as blocks_actions, button, context as block_context, divider, header, section + +_logger = logging.getLogger(__name__) + +# Index of the Actions block that contains the Refresh button (after header at 0) +_USER_MAPPING_REFRESH_BUTTON_INDEX = 1 + + +def _user_mapping_content_hash(workspace_record: Workspace, group_id: int | None) -> str: + """Compute a stable hash of the data that drives the user mapping screen (minimal DB).""" + workspace_id = workspace_record.id + gid = group_id or 0 + if gid: + members = _get_group_members(gid) + linked_workspace_ids = { + m.workspace_id for m in members if m.workspace_id and m.workspace_id != workspace_id + } + else: + my_groups = _get_groups_for_workspace(workspace_id) + linked_workspace_ids = set() + for g, _ in my_groups: + for m in _get_group_members(g.id): + if m.workspace_id and m.workspace_id != workspace_id: + linked_workspace_ids.add(m.workspace_id) + + all_mappings: list[UserMapping] = [] + for source_ws_id in linked_workspace_ids: + mappings = DbManager.find_records( + UserMapping, + [ + UserMapping.source_workspace_id == source_ws_id, + UserMapping.target_workspace_id == workspace_id, + ], + ) + all_mappings.extend(mappings) + + payload = (workspace_id, gid, tuple((m.id, m.match_method, m.target_user_id) for m in sorted(all_mappings, key=lambda x: x.id))) + return hashlib.sha256(repr(payload).encode()).hexdigest() + + +def build_user_matching_entry( + body: dict, + client: WebClient, + logger, + context: dict, +) -> None: + """Entry point when user clicks "User Mapping" on the Home tab.""" + if _deny_unauthorized(body, client, logger): + return + + raw_value = helpers.safe_get(body, "actions", 0, "value") + group_id = None + if raw_value: + with contextlib.suppress(TypeError, ValueError): + group_id = int(raw_value) + + user_id = _get_user_id(body) + team_id = _get_team_id(body) + if not user_id or not team_id: + return + + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + build_user_mapping_screen(client, workspace_record, user_id, group_id=group_id) + + +def build_user_mapping_screen( + client: WebClient, + workspace_record: Workspace, + user_id: str, + *, + group_id: int | None = None, + context: dict | None = None, + return_blocks: bool = False, +) -> list | None: + """Publish the user mapping screen on the Home tab. If return_blocks is True, return block dicts and do not publish.""" + group_name = "Group" + if group_id: + groups = DbManager.find_records(WorkspaceGroup, [WorkspaceGroup.id == group_id]) + if groups: + group_name = groups[0].name + + if group_id: + members = _get_group_members(group_id) + linked_workspace_ids = { + m.workspace_id for m in members if m.workspace_id and m.workspace_id != workspace_record.id + } + else: + my_groups = _get_groups_for_workspace(workspace_record.id) + linked_workspace_ids: set[int] = set() + for g, _ in my_groups: + for m in _get_group_members(g.id): + if m.workspace_id and m.workspace_id != workspace_record.id: + linked_workspace_ids.add(m.workspace_id) + + all_mappings: list[UserMapping] = [] + for source_ws_id in linked_workspace_ids: + mappings = DbManager.find_records( + UserMapping, + [ + UserMapping.source_workspace_id == source_ws_id, + UserMapping.target_workspace_id == workspace_record.id, + ], + ) + all_mappings.extend(mappings) + + unmapped = [m for m in all_mappings if m.target_user_id is None or m.match_method == "none"] + soft_matched = [m for m in all_mappings if m.match_method in ("name", "manual") and m.target_user_id is not None] + email_matched = [m for m in all_mappings if m.match_method == "email" and m.target_user_id is not None] + + _ws_name_lookup: dict[int, str] = {} + for source_ws_id in linked_workspace_ids: + ws = helpers.get_workspace_by_id(source_ws_id, context=context) + if ws: + _ws_name_lookup[source_ws_id] = helpers.resolve_workspace_name(ws) or "" + + def _display_for_mapping(m: UserMapping, ws_lookup: dict[int, str]) -> str: + """Formatted display string: normalized name + workspace in parens if present.""" + display = helpers.normalize_display_name(m.source_display_name or m.source_user_id) + ws_label = ws_lookup.get(m.source_workspace_id, "") + return f"{display} ({ws_label})" if ws_label else display + + unmapped.sort(key=lambda m: _display_for_mapping(m, _ws_name_lookup).lower()) + soft_matched.sort(key=lambda m: _display_for_mapping(m, _ws_name_lookup).lower()) + email_matched.sort(key=lambda m: _display_for_mapping(m, _ws_name_lookup).lower()) + + _email_lookup: dict[tuple[int, str], str] = {} + _avatar_lookup: dict[tuple[int, str], str] = {} + for source_ws_id in linked_workspace_ids: + ws = helpers.get_workspace_by_id(source_ws_id, context=context) + partner_client = None + if ws and ws.bot_token: + with contextlib.suppress(Exception): + partner_client = WebClient(token=helpers.decrypt_bot_token(ws.bot_token)) + dir_entries = DbManager.find_records( + UserDirectory, + [UserDirectory.workspace_id == source_ws_id, UserDirectory.deleted_at.is_(None)], + ) + for entry in dir_entries: + if entry.email: + _email_lookup[(source_ws_id, entry.slack_user_id)] = entry.email + if partner_client: + with contextlib.suppress(Exception): + _, avatar_url = helpers.get_user_info(partner_client, entry.slack_user_id) + if avatar_url: + _avatar_lookup[(source_ws_id, entry.slack_user_id)] = avatar_url + + def _user_context_block(mapping: UserMapping, label_text: str) -> orm.ContextBlock: + avatar_url = _avatar_lookup.get((mapping.source_workspace_id, mapping.source_user_id)) + elements: list = [] + if avatar_url: + elements.append( + orm.ImageContextElement( + image_url=avatar_url, + alt_text=mapping.source_display_name or "user", + ) + ) + elements.append(orm.ContextElement(initial_value=label_text)) + return orm.ContextBlock(elements=elements) + + group_val = str(group_id) if group_id else "0" + blocks: list[orm.BaseBlock] = [ + header(f"User Mapping — {group_name}"), + blocks_actions( + button(":arrow_left: Back", actions.CONFIG_USER_MAPPING_BACK, value=group_val), + button(":arrows_counterclockwise: Refresh", actions.CONFIG_USER_MAPPING_REFRESH, value=group_val), + ), + block_context(f":busts_in_silhouette: *{len(soft_matched) + len(email_matched)} mapped* \u00b7 *{len(unmapped)} unmapped*"), + divider(), + ] + + if unmapped: + blocks.append(section(":warning: *Unmapped Users*")) + blocks.append(block_context("\u200b")) + for m in unmapped: + blocks.append(_user_context_block(m, f"*{_display_for_mapping(m, _ws_name_lookup)}*")) + blocks.append(blocks_actions(button("Edit", f"{actions.CONFIG_USER_MAPPING_EDIT}_{m.id}", value=group_val))) + blocks.append(divider()) + + if soft_matched: + blocks.append(section(":pencil2: *Soft / Manual Matches*")) + blocks.append(block_context("\u200b")) + for m in soft_matched: + method_tag = "manual" if m.match_method == "manual" else "name" + blocks.append(_user_context_block(m, f"*{_display_for_mapping(m, _ws_name_lookup)}* \u2192 <@{m.target_user_id}> _[{method_tag}]_")) + blocks.append(blocks_actions(button("Edit", f"{actions.CONFIG_USER_MAPPING_EDIT}_{m.id}", value=group_val))) + blocks.append(divider()) + + if email_matched: + blocks.append(section(":lock: *Email Matches*")) + blocks.append(block_context("\u200b")) + for m in email_matched: + email_addr = _email_lookup.get((m.source_workspace_id, m.source_user_id), "") + email_tag = f"_{email_addr}_" if email_addr else "_[email]_" + blocks.append(_user_context_block(m, f"*{_display_for_mapping(m, _ws_name_lookup)}* \u2192 <@{m.target_user_id}> {email_tag}")) + blocks.append(divider()) + + if not unmapped and not soft_matched and not email_matched: + blocks.append(block_context("_No user mappings yet. Mappings are created automatically when " + "workspaces join a group and users share display names or emails._")) + + block_dicts = orm.BlockView(blocks=blocks).as_form_field() + if return_blocks: + return block_dicts + client.views_publish(user_id=user_id, view={"type": "home", "blocks": block_dicts}) + return None + + +def build_user_mapping_edit_modal( + body: dict, + client: WebClient, + logger, + context: dict, +) -> None: + """Open a modal to edit a single user mapping.""" + if _deny_unauthorized(body, client, logger): + return + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + action_id = helpers.safe_get(body, "actions", 0, "action_id") or "" + mapping_id_str = action_id.replace(actions.CONFIG_USER_MAPPING_EDIT + "_", "") + try: + mapping_id = int(mapping_id_str) + except (TypeError, ValueError): + _logger.warning(f"build_user_mapping_edit_modal: invalid mapping_id: {mapping_id_str}") + return + + raw_group = helpers.safe_get(body, "actions", 0, "value") or "0" + try: + group_id = int(raw_group) + except (TypeError, ValueError): + group_id = 0 + + mapping = DbManager.get_record(UserMapping, id=mapping_id) + if not mapping: + _logger.warning(f"build_user_mapping_edit_modal: mapping {mapping_id} not found") + return + + team_id = _get_team_id(body) + workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None + if not workspace_record: + return + + source_ws = helpers.get_workspace_by_id(mapping.source_workspace_id) + source_ws_name = helpers.resolve_workspace_name(source_ws) if source_ws else "Partner" + display = helpers.normalize_display_name(mapping.source_display_name or mapping.source_user_id) + + existing_mappings = DbManager.find_records( + UserMapping, + [ + UserMapping.source_workspace_id == mapping.source_workspace_id, + UserMapping.target_workspace_id == mapping.target_workspace_id, + UserMapping.target_user_id.isnot(None), + UserMapping.match_method != "none", + UserMapping.id != mapping.id, + ], + ) + taken_target_ids = {m.target_user_id for m in existing_mappings} + + directory = DbManager.find_records( + UserDirectory, + [UserDirectory.workspace_id == workspace_record.id, UserDirectory.deleted_at.is_(None)], + ) + directory.sort(key=lambda u: (u.display_name or u.real_name or u.slack_user_id).lower()) + + has_mapping = mapping.target_user_id is not None and mapping.match_method != "none" + options: list[orm.SelectorOption] = [] + if has_mapping: + options.append(orm.SelectorOption(name="\u274c Remove Mapping", value="__remove__")) + for entry in directory: + if entry.slack_user_id in taken_target_ids: + continue + label = entry.display_name or entry.real_name or entry.slack_user_id + if entry.email: + label = f"{label} ({entry.email})" + if len(label) > 75: + label = label[:72] + "..." + options.append(orm.SelectorOption(name=label, value=entry.slack_user_id)) + + initial_value = None + if mapping.target_user_id and mapping.match_method != "none": + initial_value = mapping.target_user_id + + avatar_accessory = None + if source_ws and source_ws.bot_token: + with contextlib.suppress(Exception): + partner_client = WebClient(token=helpers.decrypt_bot_token(source_ws.bot_token)) + _, avatar_url = helpers.get_user_info(partner_client, mapping.source_user_id) + if avatar_url: + avatar_accessory = orm.ImageAccessoryElement(image_url=avatar_url, alt_text=display) + + blocks: list[orm.BaseBlock] = [ + orm.SectionBlock(label=f"*{display}*\n_{source_ws_name}_", element=avatar_accessory), + ] + if mapping.target_user_id and mapping.match_method != "none": + blocks.append(block_context(f"Currently mapped to <@{mapping.target_user_id}> _[{mapping.match_method}]_")) + blocks.append(divider()) + if options: + blocks.append( + orm.InputBlock( + label="Map to user", + action=actions.CONFIG_USER_MAPPING_EDIT_SELECT, + element=orm.StaticSelectElement( + placeholder="Select a user...", + options=options, + initial_value=initial_value, + ), + optional=True, + ) + ) + else: + blocks.append(block_context("_No available users to map to. All users in your workspace " + "are already mapped to other users._")) + + meta = {"mapping_id": mapping_id, "group_id": group_id or 0} + modal_form = orm.BlockView(blocks=blocks) + modal_form.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_USER_MAPPING_EDIT_SUBMIT, + title_text="Edit Mapping", + submit_button_text="Save", + close_button_text="Cancel", + parent_metadata=meta, + new_or_add="new", + ) diff --git a/syncbot/constants.py b/syncbot/constants.py new file mode 100644 index 0000000..d11014f --- /dev/null +++ b/syncbot/constants.py @@ -0,0 +1,147 @@ +"""Application constants and startup configuration validation. + +This module defines the **names** of all environment variables the app +reads at runtime (the actual *values* come from ``os.environ``). It +also provides :func:`validate_config` which should be called once at +startup to fail fast on missing configuration. +""" + +import logging +import os + +_logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Environment-variable name constants +# +# Each value is the *name* of the env var, not its value. The actual values +# are read from os.environ at runtime. +# --------------------------------------------------------------------------- + +SLACK_BOT_TOKEN = "SLACK_BOT_TOKEN" +SLACK_STATE_S3_BUCKET_NAME = "ENV_SLACK_STATE_S3_BUCKET_NAME" +SLACK_INSTALLATION_S3_BUCKET_NAME = "ENV_SLACK_INSTALLATION_S3_BUCKET_NAME" +SLACK_CLIENT_ID = "ENV_SLACK_CLIENT_ID" +SLACK_CLIENT_SECRET = "ENV_SLACK_CLIENT_SECRET" +SLACK_SCOPES = "ENV_SLACK_SCOPES" +SLACK_SIGNING_SECRET = "SLACK_SIGNING_SECRET" +PASSWORD_ENCRYPT_KEY = "PASSWORD_ENCRYPT_KEY" +REQUIRE_ADMIN = "REQUIRE_ADMIN" + +DATABASE_HOST = "DATABASE_HOST" +ADMIN_DATABASE_USER = "ADMIN_DATABASE_USER" +ADMIN_DATABASE_PASSWORD = "ADMIN_DATABASE_PASSWORD" +ADMIN_DATABASE_SCHEMA = "ADMIN_DATABASE_SCHEMA" + +# When set to "true", app startup drops the database and reinitializes from db/init.sql. All data is lost. +DANGER_DROP_AND_INIT_DB = "DANGER_DROP_AND_INIT_DB" + +LOCAL_DEVELOPMENT = os.environ.get("LOCAL_DEVELOPMENT", "false").lower() == "true" + +_BOT_TOKEN_PLACEHOLDER = "xoxb-0-0" + + +def _has_real_bot_token() -> bool: + """Return *True* if SLACK_BOT_TOKEN looks like a genuine Slack token.""" + token = os.environ.get(SLACK_BOT_TOKEN, "").strip() + return token.startswith("xoxb-") and token != _BOT_TOKEN_PLACEHOLDER + + +HAS_REAL_BOT_TOKEN: bool = _has_real_bot_token() + +WARNING_BLOCK = "WARNING_BLOCK" + +MAX_HEIF_SIZE = 1000 + +# --------------------------------------------------------------------------- +# User-matching TTLs (seconds) +# +# How long a cached match result is considered "fresh" before re-checking. +# Manual matches never expire and can only be removed via the admin UI. +# --------------------------------------------------------------------------- + +MATCH_TTL_EMAIL = 30 * 24 * 3600 # 30 days for email-confirmed matches +MATCH_TTL_NAME = 14 * 24 * 3600 # 14 days for name-based matches +MATCH_TTL_NONE = 90 * 24 * 3600 # 90 days for no-match (team_join handles re-checks) +USER_DIR_REFRESH_TTL = 24 * 3600 # 24 hours per workspace directory refresh +USER_MATCHING_PAGE_SIZE = 40 # max unmatched users shown in the modal + +# Refresh button cooldown (seconds) when content hash unchanged +REFRESH_COOLDOWN_SECONDS = 60 + +SOFT_DELETE_RETENTION_DAYS = int(os.environ.get("SOFT_DELETE_RETENTION_DAYS", "30")) + +# --------------------------------------------------------------------------- +# Federation +# --------------------------------------------------------------------------- + +SYNCBOT_INSTANCE_ID = "SYNCBOT_INSTANCE_ID" +SYNCBOT_PUBLIC_URL = "SYNCBOT_PUBLIC_URL" +FEDERATION_ENABLED = os.environ.get("SYNCBOT_FEDERATION_ENABLED", "false").lower() == "true" + +AWS_ACCESS_KEY_ID = "AWS_ACCESS_KEY_ID" +AWS_SECRET_ACCESS_KEY = "AWS_SECRET_ACCESS_KEY" +S3_IMAGE_BUCKET = os.environ.get("S3_IMAGE_BUCKET", "") +S3_IMAGE_URL = os.environ.get("S3_IMAGE_URL", f"https://{S3_IMAGE_BUCKET}.s3.amazonaws.com/" if S3_IMAGE_BUCKET else "") +S3_ENABLED = bool(S3_IMAGE_BUCKET) +S3_VIDEO_ENABLED = os.environ.get("S3_VIDEO_ENABLED", "false").lower() == "true" + + +# --------------------------------------------------------------------------- +# Startup configuration validation +# +# Validates that all required environment variables are set before the app +# handles any requests. Fails fast in production; warns in local dev. +# --------------------------------------------------------------------------- + +# Required in all environments +_REQUIRED_ALWAYS = [ + DATABASE_HOST, + ADMIN_DATABASE_USER, + ADMIN_DATABASE_PASSWORD, + ADMIN_DATABASE_SCHEMA, +] + +# Required only in production (Lambda) +_REQUIRED_PRODUCTION = [ + SLACK_SIGNING_SECRET, + SLACK_CLIENT_ID, + SLACK_CLIENT_SECRET, + SLACK_SCOPES, + SLACK_STATE_S3_BUCKET_NAME, + SLACK_INSTALLATION_S3_BUCKET_NAME, + PASSWORD_ENCRYPT_KEY, +] + + +def _encryption_active() -> bool: + """Return True if bot-token encryption is configured with a real key.""" + key = os.environ.get(PASSWORD_ENCRYPT_KEY, "") + return bool(key) and key != "123" + + +def validate_config() -> None: + """Check that required environment variables are present. + + In production this raises immediately so the Lambda fails on cold-start + rather than silently misbehaving. In local development it only warns. + """ + required = list(_REQUIRED_ALWAYS) + if not LOCAL_DEVELOPMENT: + required.extend(_REQUIRED_PRODUCTION) + + missing = [var for var in required if not os.environ.get(var)] + + if missing: + msg = "Missing required environment variable(s): " + ", ".join(missing) + if LOCAL_DEVELOPMENT: + _logger.warning(msg + " (continuing in local-dev mode)") + else: + _logger.critical(msg) + raise OSError(msg) + + if not LOCAL_DEVELOPMENT and not _encryption_active(): + _logger.critical( + "Bot-token encryption is DISABLED in production. " + "Set PASSWORD_ENCRYPT_KEY to a strong passphrase to encrypt tokens at rest." + ) diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py new file mode 100644 index 0000000..1ebe94e --- /dev/null +++ b/syncbot/db/__init__.py @@ -0,0 +1,364 @@ +"""Database engine, session management, and the :class:`DbManager` CRUD helper. + +Key design decisions: + +* **Connection pooling** — Uses :class:`~sqlalchemy.pool.QueuePool` with + ``pool_pre_ping=True`` so that warm Lambda containers reuse connections + while stale ones are transparently replaced. +* **Automatic retry** — The :func:`_with_retry` decorator retries any + :class:`~sqlalchemy.exc.OperationalError` up to ``_MAX_RETRIES`` times, + disposing the engine between attempts to force a fresh connection. +""" + +import logging +import os +import ssl +from dataclasses import dataclass +from pathlib import Path +from typing import TypeVar +from urllib.parse import quote_plus + +from sqlalchemy import and_, create_engine, func, pool, text +from sqlalchemy.exc import OperationalError +from sqlalchemy.orm import sessionmaker + +import constants +from db.schemas import BaseClass + +_logger = logging.getLogger(__name__) + + +@dataclass +class DatabaseField: + name: str + value: object = None + + +GLOBAL_ENGINE = None +GLOBAL_SESSION = None +GLOBAL_SCHEMA = None + +# Maximum number of times to retry a DB operation on a transient connection error +_MAX_RETRIES = 2 + + +def _build_base_url(include_schema: bool = False) -> tuple[str, dict]: + """Build MySQL URL and connect_args for get_engine (no schema or with schema).""" + host = os.environ[constants.DATABASE_HOST] + user = quote_plus(os.environ[constants.ADMIN_DATABASE_USER]) + passwd = quote_plus(os.environ[constants.ADMIN_DATABASE_PASSWORD]) + schema = os.environ.get(constants.ADMIN_DATABASE_SCHEMA, "syncbot") + path = f"/{schema}" if include_schema else "" + db_url = f"mysql+pymysql://{user}:{passwd}@{host}:3306{path}?charset=utf8mb4" + connect_args: dict = {} + if not constants.LOCAL_DEVELOPMENT: + ca_path = "/etc/pki/tls/certs/ca-bundle.crt" + try: + ssl_ctx = ssl.create_default_context(cafile=ca_path) + except (OSError, ssl.SSLError): + ssl_ctx = ssl.create_default_context() + connect_args["ssl"] = ssl_ctx + return db_url, connect_args + + +def drop_and_init_db() -> None: + """Drop the database and reinitialize from db/init.sql. All data is lost. + + Only run when DANGER_DROP_AND_INIT_DB is set to true. Caller must check. + Resets GLOBAL_ENGINE and GLOBAL_SESSION so the next get_engine() uses a fresh DB. + """ + global GLOBAL_ENGINE, GLOBAL_SESSION, GLOBAL_SCHEMA + + _logger.warning( + "DANGER_DROP_AND_INIT_DB is set: dropping database and reinitializing from init.sql. All data will be lost." + ) + + schema = os.environ.get(constants.ADMIN_DATABASE_SCHEMA, "syncbot") + url_no_db, connect_args = _build_base_url(include_schema=False) + engine_no_db = create_engine(url_no_db, connect_args=connect_args, pool_pre_ping=True) + + with engine_no_db.connect() as conn: + conn.execute(text(f"DROP DATABASE IF EXISTS `{schema}`")) + conn.execute(text(f"CREATE DATABASE `{schema}` CHARACTER SET utf8mb4")) + conn.commit() + + engine_no_db.dispose() + + url_with_db, connect_args = _build_base_url(include_schema=True) + engine_with_db = create_engine(url_with_db, connect_args=connect_args, pool_pre_ping=True) + + init_path = Path(__file__).resolve().parent.parent.parent / "db" / "init.sql" + if not init_path.exists(): + _logger.error("drop_and_init_db: init.sql not found at %s", init_path) + engine_with_db.dispose() + return + + sql = init_path.read_text() + # Strip line comments and split into statements + lines = [] + for line in sql.splitlines(): + if "--" in line: + line = line[: line.index("--")].strip() + else: + line = line.strip() + if line: + lines.append(line) + combined = " ".join(lines) + statements = [s.strip() for s in combined.split(";") if s.strip()] + + with engine_with_db.connect() as conn: + for stmt in statements: + if stmt: + conn.execute(text(stmt)) + conn.commit() + + engine_with_db.dispose() + + GLOBAL_ENGINE = None + GLOBAL_SESSION = None + GLOBAL_SCHEMA = None + _logger.info("drop_and_init_db: database %s dropped and reinitialized from init.sql", schema) + + +def get_engine(echo: bool = False, schema: str = None): + """Return the global SQLAlchemy engine, creating it on first call. + + Uses QueuePool with pool_pre_ping so that stale connections (common + in Lambda warm containers) are detected and replaced transparently. + """ + global GLOBAL_ENGINE, GLOBAL_SCHEMA + + target_schema = schema or os.environ[constants.ADMIN_DATABASE_SCHEMA] + + if target_schema == GLOBAL_SCHEMA and GLOBAL_ENGINE is not None: + return GLOBAL_ENGINE + + host = os.environ[constants.DATABASE_HOST] + user = quote_plus(os.environ[constants.ADMIN_DATABASE_USER]) + passwd = quote_plus(os.environ[constants.ADMIN_DATABASE_PASSWORD]) + + db_url = f"mysql+pymysql://{user}:{passwd}@{host}:3306/{target_schema}?charset=utf8mb4" + + connect_args: dict = {} + if not constants.LOCAL_DEVELOPMENT: + ca_path = "/etc/pki/tls/certs/ca-bundle.crt" + try: + ssl_ctx = ssl.create_default_context(cafile=ca_path) + except (OSError, ssl.SSLError): + ssl_ctx = ssl.create_default_context() + connect_args["ssl"] = ssl_ctx + + GLOBAL_ENGINE = create_engine( + db_url, + echo=echo, + poolclass=pool.QueuePool, + pool_size=1, + max_overflow=1, + pool_recycle=3600, + pool_pre_ping=True, + connect_args=connect_args, + ) + GLOBAL_SCHEMA = target_schema + return GLOBAL_ENGINE + + +def get_session(echo: bool = False, schema: str = None): + if GLOBAL_SESSION: + return GLOBAL_SESSION + engine = get_engine(echo=echo, schema=schema) + return sessionmaker(bind=engine)() + + +def close_session(session): + """Close the session (return the connection to the pool).""" + if session is not None: + session.close() + + +T = TypeVar("T") + + +def _with_retry(fn): + """Decorator that retries a DB operation on transient OperationalErrors. + + Relies on ``pool_pre_ping=True`` to replace stale connections between + retries. Only disposes the engine after all retries are exhausted to + avoid disrupting other in-flight queries sharing the pool. + """ + + def wrapper(*args, **kwargs): + last_exc = None + for attempt in range(_MAX_RETRIES + 1): + try: + return fn(*args, **kwargs) + except OperationalError as exc: + last_exc = exc + if attempt < _MAX_RETRIES: + _logger.warning(f"DB operation {fn.__name__} failed (attempt {attempt + 1}), retrying: {exc}") + else: + _logger.error(f"DB operation {fn.__name__} failed after {_MAX_RETRIES + 1} attempts") + global GLOBAL_ENGINE + if GLOBAL_ENGINE is not None: + GLOBAL_ENGINE.dispose() + raise last_exc + + wrapper.__name__ = fn.__name__ + return wrapper + + +class DbManager: + @staticmethod + @_with_retry + def get_record(cls: T, id, schema=None) -> T: + session = get_session(schema=schema) + try: + x = session.query(cls).filter(cls.get_id() == id).first() + if x: + session.expunge(x) + return x + finally: + session.rollback() + close_session(session) + + @staticmethod + @_with_retry + def find_records(cls: T, filters, schema=None) -> list[T]: + session = get_session(schema=schema) + try: + records = session.query(cls).filter(and_(*filters)).all() + for r in records: + session.expunge(r) + return records + finally: + session.rollback() + close_session(session) + + @staticmethod + @_with_retry + def count_records(cls: T, filters, schema=None) -> int: + session = get_session(schema=schema) + try: + return session.query(func.count(cls.id)).filter(and_(*filters)).scalar() or 0 + finally: + session.rollback() + close_session(session) + + @staticmethod + @_with_retry + def find_join_records2(left_cls: T, right_cls: T, filters, schema=None) -> list[tuple[T]]: + session = get_session(schema=schema) + try: + records = session.query(left_cls, right_cls).join(right_cls).filter(and_(*filters)).all() + session.expunge_all() + return records + finally: + session.rollback() + close_session(session) + + @staticmethod + @_with_retry + def find_join_records3( + left_cls: T, right_cls1: T, right_cls2: T, filters, schema=None, left_join=False + ) -> list[tuple[T]]: + session = get_session(schema=schema) + try: + records = ( + session.query(left_cls, right_cls1, right_cls2) + .select_from(left_cls) + .join(right_cls1, isouter=left_join) + .join(right_cls2, isouter=left_join) + .filter(and_(*filters)) + .all() + ) + session.expunge_all() + return records + finally: + session.rollback() + close_session(session) + + @staticmethod + @_with_retry + def update_record(cls: T, id, fields, schema=None): + session = get_session(schema=schema) + try: + session.query(cls).filter(cls.get_id() == id).update(fields, synchronize_session="fetch") + session.flush() + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + + @staticmethod + @_with_retry + def update_records(cls: T, filters, fields, schema=None): + session = get_session(schema=schema) + try: + session.query(cls).filter(and_(*filters)).update(fields, synchronize_session="fetch") + session.flush() + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + + @staticmethod + @_with_retry + def create_record(record: BaseClass, schema=None) -> BaseClass: + session = get_session(schema=schema) + try: + session.add(record) + session.flush() + session.expunge(record) + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + return record + + @staticmethod + @_with_retry + def create_records(records: list[BaseClass], schema=None): + session = get_session(schema=schema) + try: + session.add_all(records) + session.flush() + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + + @staticmethod + @_with_retry + def delete_record(cls: T, id, schema=None): + session = get_session(schema=schema) + try: + session.query(cls).filter(cls.get_id() == id).delete() + session.flush() + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + + @staticmethod + @_with_retry + def delete_records(cls: T, filters, schema=None): + session = get_session(schema=schema) + try: + session.query(cls).filter(and_(*filters)).delete() + session.flush() + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + diff --git a/syncbot/db/schemas.py b/syncbot/db/schemas.py new file mode 100644 index 0000000..6bed678 --- /dev/null +++ b/syncbot/db/schemas.py @@ -0,0 +1,221 @@ +"""SQLAlchemy ORM models for the SyncBot database. + +Tables: + +* **workspaces** — One row per Slack workspace that has installed SyncBot. +* **workspace_groups** — Named groups of workspaces that can sync channels. +* **workspace_group_members** — Membership records linking workspaces to groups. +* **syncs** — Named sync groups (e.g. "East Coast AOs"). +* **sync_channels** — Links a Slack channel to a sync group via its workspace. + Supports soft deletes via ``deleted_at``. +* **post_meta** — Maps each synced message to its channel-specific + timestamp so edits, deletes, and thread replies can be propagated. +* **user_directory** — Cached copy of each workspace's user profiles, + used for cross-workspace name-based matching. +* **user_mappings** — Cross-workspace user match results (including + confirmed matches, name-based matches, manual admin matches, and + explicit "no match" records to avoid redundant lookups). +""" + +from typing import Any + +import sqlalchemy +from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import relationship +from sqlalchemy.types import DECIMAL + +BaseClass = declarative_base(mapper=sqlalchemy.orm.mapper) + + +class GetDBClass: + """Mixin providing helper accessors for ORM model classes.""" + + _column_keys: frozenset[str] | None = None + + @classmethod + def _get_column_keys(cls) -> frozenset[str]: + if cls._column_keys is None: + cls._column_keys = frozenset(c.key for c in cls.__table__.columns) + return cls._column_keys + + def get_id(self) -> Any: + return self.id + + def get(self, attr: str) -> Any: + if attr in self._get_column_keys(): + return getattr(self, attr) + return None + + def to_json(self) -> dict[str, Any]: + return {key: getattr(self, key) for key in self._get_column_keys()} + + def __repr__(self) -> str: + return str(self.to_json()) + + +class Workspace(BaseClass, GetDBClass): + __tablename__ = "workspaces" + id = Column(Integer, primary_key=True) + team_id = Column(String(100), unique=True) + workspace_name = Column(String(100)) + bot_token = Column(String(256)) + deleted_at = Column(DateTime, nullable=True, default=None) + + def get_id(): + return Workspace.team_id + + +class WorkspaceGroup(BaseClass, GetDBClass): + """A named group of workspaces that can sync channels together.""" + + __tablename__ = "workspace_groups" + id = Column(Integer, primary_key=True) + name = Column(String(100), nullable=False) + invite_code = Column(String(20), unique=True, nullable=False) + status = Column(String(20), nullable=False, default="active") + created_at = Column(DateTime, nullable=False) + created_by_workspace_id = Column(Integer, ForeignKey("workspaces.id"), nullable=False) + + def get_id(): + return WorkspaceGroup.id + + +class WorkspaceGroupMember(BaseClass, GetDBClass): + """Membership record linking a workspace (or federated workspace) to a group.""" + + __tablename__ = "workspace_group_members" + id = Column(Integer, primary_key=True) + group_id = Column(Integer, ForeignKey("workspace_groups.id"), nullable=False) + workspace_id = Column(Integer, ForeignKey("workspaces.id"), nullable=True) + federated_workspace_id = Column(Integer, ForeignKey("federated_workspaces.id"), nullable=True) + status = Column(String(20), nullable=False, default="active") + role = Column(String(20), nullable=False, default="member") + joined_at = Column(DateTime, nullable=True) + deleted_at = Column(DateTime, nullable=True, default=None) + dm_messages = Column(Text, nullable=True) + + group = relationship("WorkspaceGroup", backref="members") + workspace = relationship("Workspace", backref="group_memberships") + + def get_id(): + return WorkspaceGroupMember.id + + +class Sync(BaseClass, GetDBClass): + __tablename__ = "syncs" + id = Column(Integer, primary_key=True) + title = Column(String(100)) + description = Column(String(100)) + group_id = Column(Integer, ForeignKey("workspace_groups.id"), nullable=True) + sync_mode = Column(String(20), nullable=False, default="group") + target_workspace_id = Column(Integer, ForeignKey("workspaces.id"), nullable=True) + publisher_workspace_id = Column(Integer, ForeignKey("workspaces.id"), nullable=True) + + def get_id(): + return Sync.id + + +class SyncChannel(BaseClass, GetDBClass): + __tablename__ = "sync_channels" + id = Column(Integer, primary_key=True) + sync_id = Column(Integer, ForeignKey("syncs.id")) + workspace_id = Column(Integer, ForeignKey("workspaces.id")) + workspace = relationship("Workspace", backref="sync_channels") + channel_id = Column(String(100)) + status = Column(String(20), nullable=False, default="active") + created_at = Column(DateTime, nullable=False) + deleted_at = Column(DateTime, nullable=True, default=None) + + def get_id(): + return SyncChannel.channel_id + + +class PostMeta(BaseClass, GetDBClass): + __tablename__ = "post_meta" + id = Column(Integer, primary_key=True) + post_id = Column(String(100)) + sync_channel_id = Column(Integer, ForeignKey("sync_channels.id")) + ts = Column(DECIMAL(16, 6)) + + def get_id(): + return PostMeta.post_id + + +class UserDirectory(BaseClass, GetDBClass): + """Cached user profile from a Slack workspace, used for name matching.""" + + __tablename__ = "user_directory" + id = Column(Integer, primary_key=True) + workspace_id = Column(Integer, ForeignKey("workspaces.id")) + slack_user_id = Column(String(100), nullable=False) + email = Column(String(320), nullable=True) + real_name = Column(String(200), nullable=True) + display_name = Column(String(200), nullable=True) + normalized_name = Column(String(200), nullable=True) + updated_at = Column(DateTime, nullable=False) + deleted_at = Column(DateTime, nullable=True, default=None) + + def get_id(): + return UserDirectory.id + + +class UserMapping(BaseClass, GetDBClass): + """Cross-workspace user match result (or explicit no-match).""" + + __tablename__ = "user_mappings" + id = Column(Integer, primary_key=True) + source_workspace_id = Column(Integer, ForeignKey("workspaces.id")) + source_user_id = Column(String(100), nullable=False) + target_workspace_id = Column(Integer, ForeignKey("workspaces.id")) + target_user_id = Column(String(100), nullable=True) + match_method = Column(String(20), nullable=False, default="none") + source_display_name = Column(String(200), nullable=True) + matched_at = Column(DateTime, nullable=False) + group_id = Column(Integer, ForeignKey("workspace_groups.id"), nullable=True) + + def get_id(): + return UserMapping.id + + +class InstanceKey(BaseClass, GetDBClass): + """This instance's Ed25519 keypair, auto-generated on first boot. + + The private key is stored Fernet-encrypted using PASSWORD_ENCRYPT_KEY. + The public key is shared with federated workspaces during connection setup. + """ + + __tablename__ = "instance_keys" + id = Column(Integer, primary_key=True) + public_key = Column(Text, nullable=False) + private_key_encrypted = Column(Text, nullable=False) + created_at = Column(DateTime, nullable=False) + + def get_id(): + return InstanceKey.id + + +class FederatedWorkspace(BaseClass, GetDBClass): + """A remote SyncBot instance that this instance can communicate with. + + Each federated workspace has a unique ``instance_id`` (UUID), a + ``webhook_url`` for pushing events, and a ``public_key`` (Ed25519 PEM) + used to verify inbound request signatures. + ``primary_team_id`` and ``primary_workspace_name`` are optional and set + when the connection is from a workspace that migrated to the remote instance. + """ + + __tablename__ = "federated_workspaces" + id = Column(Integer, primary_key=True) + instance_id = Column(String(64), unique=True, nullable=False) + webhook_url = Column(String(500), nullable=False) + public_key = Column(Text, nullable=False) + status = Column(String(20), nullable=False, default="active") + name = Column(String(200), nullable=True) + primary_team_id = Column(String(100), nullable=True) + primary_workspace_name = Column(String(100), nullable=True) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=True) + + def get_id(): + return FederatedWorkspace.id diff --git a/syncbot/federation/__init__.py b/syncbot/federation/__init__.py new file mode 100644 index 0000000..967fb35 --- /dev/null +++ b/syncbot/federation/__init__.py @@ -0,0 +1,58 @@ +"""Cross-instance federation for SyncBot. + +Re-exports public API from :mod:`federation.core` and +:mod:`federation.api` so callers can use ``import federation`` +and access all federation functions directly. +""" + +from federation.core import ( + FEDERATION_USER_AGENT, + build_delete_payload, + build_edit_payload, + build_message_payload, + build_reaction_payload, + federation_sign, + federation_verify, + generate_federation_code, + get_instance_id, + get_or_create_federated_workspace, + get_or_create_instance_keypair, + get_public_url, + initiate_federation_connect, + parse_federation_code, + ping_federated_workspace, + push_delete, + push_edit, + push_message, + push_reaction, + push_users, + sign_body, + validate_webhook_url, + verify_body, +) + +__all__ = [ + "FEDERATION_USER_AGENT", + "build_delete_payload", + "build_edit_payload", + "build_message_payload", + "build_reaction_payload", + "federation_sign", + "federation_verify", + "generate_federation_code", + "get_instance_id", + "get_or_create_federated_workspace", + "get_or_create_instance_keypair", + "get_public_url", + "initiate_federation_connect", + "parse_federation_code", + "ping_federated_workspace", + "push_delete", + "push_edit", + "push_message", + "push_reaction", + "push_users", + "sign_body", + "validate_webhook_url", + "verify_body", +] diff --git a/syncbot/federation/api.py b/syncbot/federation/api.py new file mode 100644 index 0000000..cb43fba --- /dev/null +++ b/syncbot/federation/api.py @@ -0,0 +1,653 @@ +"""Federation API request handlers. + +These handlers process incoming HTTP requests from remote SyncBot instances. +They are called by the federation HTTP server (local dev) or the Lambda +handler (production) and return ``(status_code, response_dict)`` tuples. + +All federation endpoints require the ``SyncBot-Federation`` User-Agent; +requests without it receive an opaque 404, making the endpoints invisible +to scanners. + +Endpoints: + +* ``POST /api/federation/pair`` -- Accept an incoming connection request +* ``POST /api/federation/message`` -- Receive a forwarded message +* ``POST /api/federation/message/edit`` -- Receive a message edit +* ``POST /api/federation/message/delete`` -- Receive a message delete +* ``POST /api/federation/message/react`` -- Receive a reaction +* ``POST /api/federation/users`` -- Exchange user directory +* ``GET /api/federation/ping`` -- Health check +""" + +import json +import logging +import re +from datetime import UTC, datetime + +from slack_sdk.web import WebClient + +import constants +import helpers +from db import DbManager, schemas +from federation import core as federation + +_logger = logging.getLogger(__name__) + +_NOT_FOUND = (404, {"message": "Not Found"}) + + +def _find_post_records(post_id: str, sync_channel_id: int) -> list[schemas.PostMeta]: + """Look up PostMeta records for a given post_id + sync channel.""" + pid = post_id if isinstance(post_id, bytes) else post_id.encode()[:100] + return DbManager.find_records( + schemas.PostMeta, + [schemas.PostMeta.post_id == pid, schemas.PostMeta.sync_channel_id == sync_channel_id], + ) + +_PAIRING_CODE_RE = re.compile(r"^FED-[0-9A-Fa-f]{8}$") + +_FIELD_MAX_LENGTHS = { + "channel_id": 20, + "text": 40_000, + "post_id": 100, + "reaction": 100, + "instance_id": 64, + "webhook_url": 500, + "code": 20, + "action": 10, +} + + +# --------------------------------------------------------------------------- +# Input validation helper +# --------------------------------------------------------------------------- + + +def _validate_fields(body: dict, required: list[str], extras: list[str] | None = None) -> str | None: + """Check required fields are present, non-empty, and within length limits. + + Returns an error string on failure, or *None* if valid. + """ + for field in required: + val = body.get(field) + if val is None or (isinstance(val, str) and not val.strip()): + return f"missing_{field}" + + for field in required + (extras or []): + val = body.get(field) + max_len = _FIELD_MAX_LENGTHS.get(field) + if max_len and isinstance(val, str) and len(val) > max_len: + return f"{field}_too_long" + + return None + + +# --------------------------------------------------------------------------- +# Authentication helpers +# --------------------------------------------------------------------------- + + +def _has_federation_user_agent(headers: dict) -> bool: + ua = headers.get("User-Agent", "") or headers.get("user-agent", "") + return "SyncBot-Federation" in ua + + +def _verify_federated_request(body_str: str, headers: dict) -> schemas.FederatedWorkspace | None: + """Verify the Ed25519 signature on an incoming federation request. + + Returns the :class:`FederatedWorkspace` record if valid, or *None*. + """ + sig = headers.get("X-Federation-Signature", "") + ts = headers.get("X-Federation-Timestamp", "") + instance_id = headers.get("X-Federation-Instance", "") + + if not sig or not ts or not instance_id: + return None + + matches = DbManager.find_records( + schemas.FederatedWorkspace, + [schemas.FederatedWorkspace.instance_id == instance_id], + ) + fed_ws = matches[0] if matches else None + if not fed_ws or fed_ws.status != "active": + return None + + if not federation.federation_verify(body_str, sig, ts, fed_ws.public_key): + _logger.warning( + "federation_auth_failed — remote workspace may have regenerated its keypair; reconnection required", + extra={"instance_id": instance_id}, + ) + return None + + return fed_ws + + +# --------------------------------------------------------------------------- +# Channel access scoping +# --------------------------------------------------------------------------- + + +def _federated_has_channel_access(fed_ws: schemas.FederatedWorkspace, sync_channel: schemas.SyncChannel) -> bool: + """Return *True* if *fed_ws* is authorised to interact with *sync_channel*. + + The federated workspace must be linked to the sync's group via a + WorkspaceGroupMember whose ``federated_workspace_id`` matches. + """ + sync = DbManager.get_record(schemas.Sync, id=sync_channel.sync_id) + if not sync or not sync.group_id: + return False + fed_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == sync.group_id, + schemas.WorkspaceGroupMember.federated_workspace_id == fed_ws.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + return bool(fed_members) + + +def _resolve_channel_for_federated( + channel_id: str, + fed_ws: schemas.FederatedWorkspace, + *, + require_active: bool = False, +) -> tuple[schemas.SyncChannel, schemas.Workspace] | None: + """Look up a sync channel, verify federated access, and return the workspace. + + Returns ``(sync_channel, workspace)`` or *None* if any check fails. + """ + filters = [ + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.deleted_at.is_(None), + ] + if require_active: + filters.append(schemas.SyncChannel.status == "active") + + records = DbManager.find_records(schemas.SyncChannel, filters) + if not records: + return None + + sc = records[0] + if not _federated_has_channel_access(fed_ws, sc): + return None + + workspace = helpers.get_workspace_by_id(sc.workspace_id) + if not workspace or not workspace.bot_token: + return None + + return sc, workspace + + +def _get_local_workspace_ids(fed_ws: schemas.FederatedWorkspace) -> set[int]: + """Return local workspace IDs that participate in groups shared with *fed_ws*.""" + fed_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.federated_workspace_id == fed_ws.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + ws_ids: set[int] = set() + for fm in fed_members: + group_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == fm.group_id, + schemas.WorkspaceGroupMember.workspace_id.isnot(None), + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + for m in group_members: + if m.workspace_id: + ws_ids.add(m.workspace_id) + return ws_ids + + +# --------------------------------------------------------------------------- +# POST /api/federation/pair +# --------------------------------------------------------------------------- + + +def handle_pair(body: dict, body_str: str, headers: dict) -> tuple[int, dict]: + """Accept an incoming connection request from a remote instance. + + The remote instance sends its ``code``, ``webhook_url``, ``instance_id``, + and ``public_key``. The request must be signed with the sender's private + key so we can verify it matches the included public key. + """ + err = _validate_fields(body, ["code", "webhook_url", "instance_id", "public_key"]) + if err: + return 400, {"error": err} + + code = body["code"] + remote_url = body["webhook_url"] + remote_instance_id = body["instance_id"] + remote_public_key = body["public_key"] + + if not _PAIRING_CODE_RE.match(code): + return 400, {"error": "invalid_code_format"} + + if not federation.validate_webhook_url(remote_url): + return 400, {"error": "invalid_webhook_url"} + + sig = headers.get("X-Federation-Signature", "") + ts = headers.get("X-Federation-Timestamp", "") + if not sig or not ts: + return 401, {"error": "missing_signature"} + + if not federation.federation_verify(body_str, sig, ts, remote_public_key): + return 401, {"error": "invalid_signature"} + + groups = DbManager.find_records( + schemas.WorkspaceGroup, + [schemas.WorkspaceGroup.invite_code == code, schemas.WorkspaceGroup.status == "active"], + ) + if not groups: + return _NOT_FOUND + group = groups[0] + + existing_fed = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + for m in existing_fed: + if m.federated_workspace_id: + fed_ws_check = DbManager.get_record(schemas.FederatedWorkspace, id=m.federated_workspace_id) + if fed_ws_check and fed_ws_check.instance_id == remote_instance_id: + return 409, {"error": "already_connected"} + + fed_ws_name = f"Connection {remote_instance_id[:8]}" + _team_id = body.get("team_id") + primary_team_id = _team_id.strip() if isinstance(_team_id, str) and _team_id.strip() else None + primary_workspace_name = body.get("workspace_name") if isinstance(body.get("workspace_name"), str) else None + + fed_ws = federation.get_or_create_federated_workspace( + instance_id=remote_instance_id, + webhook_url=remote_url, + public_key=remote_public_key, + name=fed_ws_name, + primary_team_id=primary_team_id, + primary_workspace_name=primary_workspace_name, + ) + + now = datetime.now(UTC) + member = schemas.WorkspaceGroupMember( + group_id=group.id, + federated_workspace_id=fed_ws.id, + status="active", + role="member", + joined_at=now, + ) + DbManager.create_record(member) + + # Instance A detection: if the connecting side sent team_id, soft-delete the matching local workspace + if primary_team_id: + local_workspaces = DbManager.find_records( + schemas.Workspace, + [schemas.Workspace.team_id == primary_team_id], + ) + if local_workspaces: + local_ws = local_workspaces[0] + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == local_ws.id], + {schemas.Workspace.deleted_at: now}, + ) + _logger.info( + "federation_local_workspace_soft_deleted", + extra={"team_id": primary_team_id, "workspace_id": local_ws.id}, + ) + + _, our_public_key = federation.get_or_create_instance_keypair() + + _logger.info( + "federation_connection_accepted", + extra={ + "group_id": group.id, + "remote_instance": remote_instance_id, + }, + ) + + return 200, { + "ok": True, + "instance_id": federation.get_instance_id(), + "public_key": our_public_key, + "group_id": group.id, + } + + +# --------------------------------------------------------------------------- +# POST /api/federation/message +# --------------------------------------------------------------------------- + + +def handle_message(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, dict]: + """Receive and post a forwarded message from a federated workspace.""" + err = _validate_fields(body, ["channel_id"], extras=["text", "post_id"]) + if err: + return 400, {"error": err} + + channel_id = body["channel_id"] + text = body.get("text", "") + user = body.get("user", {}) + post_id = body.get("post_id", "") + thread_post_id = body.get("thread_post_id") + images = body.get("images", [])[:10] + + resolved = _resolve_channel_for_federated(channel_id, fed_ws, require_active=True) + if not resolved: + return _NOT_FOUND + sc, workspace = resolved + + user_name = user.get("display_name", "Remote User") + user_avatar = user.get("avatar_url") + workspace_name = user.get("workspace_name", "Remote") + + try: + thread_ts = None + if thread_post_id: + post_records = DbManager.find_records( + schemas.PostMeta, + [ + schemas.PostMeta.post_id == thread_post_id, + schemas.PostMeta.sync_channel_id == sc.id, + ], + ) + if post_records: + thread_ts = str(post_records[0].ts) + + photo_blocks = [] + if images: + for img in images: + photo_blocks.append( + { + "type": "image", + "image_url": img.get("url", ""), + "alt_text": img.get("alt_text", "Shared image"), + } + ) + + res = helpers.post_message( + bot_token=helpers.decrypt_bot_token(workspace.bot_token), + channel_id=channel_id, + msg_text=text, + user_name=user_name, + user_profile_url=user_avatar, + workspace_name=workspace_name, + blocks=photo_blocks if photo_blocks else None, + thread_ts=thread_ts, + ) + + ts = helpers.safe_get(res, "ts") + + if post_id and ts: + pm = schemas.PostMeta( + post_id=post_id if isinstance(post_id, bytes) else post_id.encode()[:100], + sync_channel_id=sc.id, + ts=float(ts), + ) + DbManager.create_record(pm) + + _logger.info( + "federation_message_received", + extra={"channel_id": channel_id, "remote": fed_ws.instance_id}, + ) + + return 200, {"ok": True, "ts": ts} + + except Exception: + _logger.exception("federation_message_error", extra={"channel_id": channel_id}) + return 500, {"error": "internal_error"} + + +# --------------------------------------------------------------------------- +# POST /api/federation/message/edit +# --------------------------------------------------------------------------- + + +def handle_message_edit(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, dict]: + """Receive and apply a message edit from a federated workspace.""" + err = _validate_fields(body, ["post_id", "channel_id"], extras=["text"]) + if err: + return 400, {"error": err} + + post_id = body["post_id"] + text = body.get("text", "") + channel_id = body["channel_id"] + + resolved = _resolve_channel_for_federated(channel_id, fed_ws) + if not resolved: + return _NOT_FOUND + sc, workspace = resolved + + post_records = _find_post_records(post_id, sc.id) + + updated = 0 + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + for pm in post_records: + try: + ws_client.chat_update(channel=channel_id, ts=str(pm.ts), text=text) + updated += 1 + except Exception: + _logger.warning("federation_edit_failed", extra={"channel_id": channel_id, "ts": str(pm.ts)}) + + return 200, {"ok": True, "updated": updated} + + +# --------------------------------------------------------------------------- +# POST /api/federation/message/delete +# --------------------------------------------------------------------------- + + +def handle_message_delete(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, dict]: + """Receive and apply a message deletion from a federated workspace.""" + err = _validate_fields(body, ["post_id", "channel_id"]) + if err: + return 400, {"error": err} + + post_id = body["post_id"] + channel_id = body["channel_id"] + + resolved = _resolve_channel_for_federated(channel_id, fed_ws) + if not resolved: + return _NOT_FOUND + sc, workspace = resolved + + post_records = _find_post_records(post_id, sc.id) + + deleted = 0 + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + for pm in post_records: + try: + ws_client.chat_delete(channel=channel_id, ts=str(pm.ts)) + deleted += 1 + except Exception: + _logger.warning("federation_delete_failed", extra={"channel_id": channel_id, "ts": str(pm.ts)}) + + return 200, {"ok": True, "deleted": deleted} + + +# --------------------------------------------------------------------------- +# POST /api/federation/message/react +# --------------------------------------------------------------------------- + + +def handle_message_react(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, dict]: + """Receive and apply a reaction add/remove from a federated workspace.""" + err = _validate_fields(body, ["post_id", "channel_id", "reaction"], extras=["action"]) + if err: + return 400, {"error": err} + + post_id = body["post_id"] + channel_id = body["channel_id"] + reaction = body["reaction"] + action = body.get("action", "add") + + resolved = _resolve_channel_for_federated(channel_id, fed_ws) + if not resolved: + return _NOT_FOUND + sc, workspace = resolved + + post_records = _find_post_records(post_id, sc.id) + + applied = 0 + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + for pm in post_records: + try: + if action == "add": + ws_client.reactions_add(channel=channel_id, timestamp=str(pm.ts), name=reaction) + else: + ws_client.reactions_remove(channel=channel_id, timestamp=str(pm.ts), name=reaction) + applied += 1 + except Exception: + _logger.warning("federation_react_failed", extra={"channel_id": channel_id, "ts": str(pm.ts)}) + + return 200, {"ok": True, "applied": applied} + + +# --------------------------------------------------------------------------- +# POST /api/federation/users +# --------------------------------------------------------------------------- + + +def handle_users(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, dict]: + """Exchange user directory with a federated workspace. + + Only returns users from workspaces that share groups with this federated workspace. + """ + remote_users = body.get("users", [])[:5000] + workspace_id = body.get("workspace_id") + + if remote_users and workspace_id: + now = datetime.now(UTC) + for u in remote_users: + existing = DbManager.find_records( + schemas.UserDirectory, + [ + schemas.UserDirectory.workspace_id == workspace_id, + schemas.UserDirectory.slack_user_id == u.get("user_id", ""), + ], + ) + if existing: + DbManager.update_records( + schemas.UserDirectory, + [schemas.UserDirectory.id == existing[0].id], + { + schemas.UserDirectory.email: u.get("email"), + schemas.UserDirectory.real_name: u.get("real_name"), + schemas.UserDirectory.display_name: u.get("display_name"), + schemas.UserDirectory.updated_at: now, + }, + ) + else: + record = schemas.UserDirectory( + workspace_id=workspace_id, + slack_user_id=u.get("user_id", ""), + email=u.get("email"), + real_name=u.get("real_name"), + display_name=u.get("display_name"), + updated_at=now, + ) + DbManager.create_record(record) + + _logger.info( + "federation_users_received", + extra={"remote": fed_ws.instance_id, "count": len(remote_users)}, + ) + + allowed_ws_ids = _get_local_workspace_ids(fed_ws) + + local_users = [] + for ws_id in allowed_ws_ids: + ws = helpers.get_workspace_by_id(ws_id) + if not ws or ws.deleted_at: + continue + users = DbManager.find_records( + schemas.UserDirectory, + [schemas.UserDirectory.workspace_id == ws_id, schemas.UserDirectory.deleted_at.is_(None)], + ) + for u in users: + local_users.append( + { + "user_id": u.slack_user_id, + "email": u.email, + "real_name": u.real_name, + "display_name": u.display_name, + "workspace_id": ws_id, + } + ) + + return 200, {"ok": True, "users": local_users} + + +# --------------------------------------------------------------------------- +# GET /api/federation/ping +# --------------------------------------------------------------------------- + + +def handle_ping() -> tuple[int, dict]: + """Health check -- returns instance identity.""" + return 200, { + "ok": True, + "instance_id": federation.get_instance_id(), + "timestamp": datetime.now(UTC).isoformat(), + } + + +# --------------------------------------------------------------------------- +# Request dispatcher +# --------------------------------------------------------------------------- + + +def dispatch_federation_request(method: str, path: str, body_str: str, headers: dict) -> tuple[int, dict]: + """Route an incoming federation HTTP request to the appropriate handler. + + Returns ``(status_code, response_dict)``. + + Requests without the ``SyncBot-Federation`` User-Agent receive a plain + 404 identical to API Gateway's response for non-existent paths. + """ + if not _has_federation_user_agent(headers): + return _NOT_FOUND + + if path == "/api/federation/ping" and method == "GET": + return handle_ping() + + if not constants.FEDERATION_ENABLED: + return _NOT_FOUND + + if method != "POST": + return _NOT_FOUND + + try: + body = json.loads(body_str) if body_str else {} + except json.JSONDecodeError: + return 400, {"error": "invalid_json"} + + if path == "/api/federation/pair": + return handle_pair(body, body_str, headers) + + fed_ws = _verify_federated_request(body_str, headers) + if not fed_ws: + return _NOT_FOUND + + if path == "/api/federation/message": + return handle_message(body, fed_ws) + elif path == "/api/federation/message/edit": + return handle_message_edit(body, fed_ws) + elif path == "/api/federation/message/delete": + return handle_message_delete(body, fed_ws) + elif path == "/api/federation/message/react": + return handle_message_react(body, fed_ws) + elif path == "/api/federation/users": + return handle_users(body, fed_ws) + + return _NOT_FOUND diff --git a/syncbot/federation/core.py b/syncbot/federation/core.py new file mode 100644 index 0000000..4d4702d --- /dev/null +++ b/syncbot/federation/core.py @@ -0,0 +1,676 @@ +"""Cross-instance federation for SyncBot. + +Provides: + +* **Ed25519 signing and verification** of inter-instance HTTP requests. +* **Auto-generated keypair** created on first boot and stored in the DB. +* **HTTP client** for pushing events (messages, edits, deletes, reactions, + user-directory exchanges) to federated workspaces. +* **Connection code** generation and parsing (encodes webhook URL + code + + instance ID + public key). +* **Payload builders** for standardised federation message formats. +""" + +import base64 +import ipaddress +import json +import logging +import os +import secrets +import time +import uuid +from datetime import UTC, datetime +from urllib.parse import urlparse + +import requests +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey +from cryptography.hazmat.primitives.serialization import ( + Encoding, + NoEncryption, + PrivateFormat, + PublicFormat, + load_pem_private_key, + load_pem_public_key, +) + +import constants +from db import DbManager, schemas + +_logger = logging.getLogger(__name__) + +FEDERATION_USER_AGENT = "SyncBot-Federation/1.0" + +# --------------------------------------------------------------------------- +# Instance identity +# --------------------------------------------------------------------------- + +_INSTANCE_ID: str | None = None + + +def get_instance_id() -> str: + """Return a persistent UUID identifying this SyncBot instance. + + Reads from ``SYNCBOT_INSTANCE_ID`` env var. If not set, generates one + and stores it in-memory for the lifetime of the process. + """ + global _INSTANCE_ID + if _INSTANCE_ID: + return _INSTANCE_ID + _INSTANCE_ID = os.environ.get("SYNCBOT_INSTANCE_ID") or str(uuid.uuid4()) + return _INSTANCE_ID + + +def get_public_url() -> str: + """Return the public base URL of this instance (no trailing slash).""" + url = os.environ.get("SYNCBOT_PUBLIC_URL", "").rstrip("/") + if not url: + _logger.warning("SYNCBOT_PUBLIC_URL is not set — federation will not work") + return url + + +# --------------------------------------------------------------------------- +# Ed25519 keypair management +# --------------------------------------------------------------------------- + +_cached_private_key = None +_cached_public_pem: str | None = None + + +def get_or_create_instance_keypair(): + """Return this instance's Ed25519 (private_key, public_key_pem). + + Auto-generates and persists the keypair on first call. The private key + is Fernet-encrypted at rest in the ``instance_keys`` table. + """ + global _cached_private_key, _cached_public_pem + if _cached_private_key and _cached_public_pem: + return _cached_private_key, _cached_public_pem + + from helpers import decrypt_bot_token, encrypt_bot_token + + existing = DbManager.find_records(schemas.InstanceKey, []) + if existing: + private_pem = decrypt_bot_token(existing[0].private_key_encrypted) + private_key = load_pem_private_key(private_pem.encode(), password=None) + _cached_private_key = private_key + _cached_public_pem = existing[0].public_key + return private_key, existing[0].public_key + + private_key = Ed25519PrivateKey.generate() + public_pem = private_key.public_key().public_bytes( + Encoding.PEM, PublicFormat.SubjectPublicKeyInfo + ).decode() + private_pem = private_key.private_bytes( + Encoding.PEM, PrivateFormat.PKCS8, NoEncryption() + ).decode() + + record = schemas.InstanceKey( + public_key=public_pem, + private_key_encrypted=encrypt_bot_token(private_pem), + created_at=datetime.now(UTC), + ) + DbManager.create_record(record) + + _cached_private_key = private_key + _cached_public_pem = public_pem + _logger.info("instance_keypair_generated") + return private_key, public_pem + + +# --------------------------------------------------------------------------- +# Ed25519 signing / verification +# --------------------------------------------------------------------------- + +_TIMESTAMP_MAX_AGE = 300 # 5 minutes + + +def federation_sign(body: str) -> tuple[str, str]: + """Sign *body* with this instance's Ed25519 private key. + + Returns ``(signature_b64, timestamp_str)``. + """ + private_key, _ = get_or_create_instance_keypair() + ts = str(int(time.time())) + signing_str = f"{ts}:{body}".encode() + sig = private_key.sign(signing_str) + return base64.b64encode(sig).decode(), ts + + +def federation_verify(body: str, signature_b64: str, timestamp: str, public_key_pem: str) -> bool: + """Verify an incoming federation request using the sender's public key. + + Returns *True* if the signature is valid and the timestamp is fresh. + """ + try: + ts_int = int(timestamp) + except (TypeError, ValueError): + return False + + if abs(time.time() - ts_int) > _TIMESTAMP_MAX_AGE: + _logger.warning("federation_verify: timestamp too old/future", extra={"ts": timestamp}) + return False + + try: + public_key = load_pem_public_key(public_key_pem.encode()) + signing_str = f"{timestamp}:{body}".encode() + public_key.verify(base64.b64decode(signature_b64), signing_str) + return True + except (InvalidSignature, ValueError, TypeError): + return False + + +def sign_body(body: str) -> str: + """Sign *body* only (no timestamp). Used for migration export integrity.""" + private_key, _ = get_or_create_instance_keypair() + sig = private_key.sign(body.encode()) + return base64.b64encode(sig).decode() + + +def verify_body(body: str, signature_b64: str, public_key_pem: str) -> bool: + """Verify a signature over *body* (no timestamp). Used for migration import.""" + try: + public_key = load_pem_public_key(public_key_pem.encode()) + public_key.verify(base64.b64decode(signature_b64), body.encode()) + return True + except (InvalidSignature, ValueError, TypeError): + return False + + +# --------------------------------------------------------------------------- +# URL validation (SSRF protection) +# --------------------------------------------------------------------------- + +_PRIVATE_NETWORKS = [ + ipaddress.ip_network("10.0.0.0/8"), + ipaddress.ip_network("172.16.0.0/12"), + ipaddress.ip_network("192.168.0.0/16"), + ipaddress.ip_network("127.0.0.0/8"), + ipaddress.ip_network("169.254.0.0/16"), + ipaddress.ip_network("::1/128"), + ipaddress.ip_network("fc00::/7"), + ipaddress.ip_network("fe80::/10"), +] + + +def validate_webhook_url(url: str) -> bool: + """Return *True* if *url* is safe to use as a federation webhook target. + + Rejects private/loopback IPs (SSRF protection) and requires HTTPS in + production. HTTP is allowed only when ``LOCAL_DEVELOPMENT`` is true. + """ + if not url: + return False + + try: + parsed = urlparse(url) + except Exception: + return False + + if constants.LOCAL_DEVELOPMENT: + if parsed.scheme not in ("http", "https"): + return False + else: + if parsed.scheme != "https": + return False + + hostname = parsed.hostname + if not hostname: + return False + + import socket + try: + addr_infos = socket.getaddrinfo(hostname, None) + for info in addr_infos: + addr = ipaddress.ip_address(info[4][0]) + for net in _PRIVATE_NETWORKS: + if addr in net: + _logger.warning( + "federation_ssrf_blocked", + extra={"url": url, "resolved_ip": str(addr)}, + ) + return False + except (socket.gaierror, ValueError): + return False + + return True + + +# --------------------------------------------------------------------------- +# Connection code generation / parsing +# --------------------------------------------------------------------------- + + +def generate_federation_code(workspace_id: int, label: str | None = None) -> tuple[str, str]: + """Generate a federation connection code and create a pending group record. + + Returns ``(encoded_payload, raw_code)`` where *encoded_payload* is the + base64-encoded JSON string the admin shares with the remote instance. + The payload includes this instance's public key for signature verification. + """ + raw_code = "FED-" + secrets.token_hex(4).upper() + public_url = get_public_url() + instance_id = get_instance_id() + _, public_key_pem = get_or_create_instance_keypair() + + payload = { + "code": raw_code, + "webhook_url": public_url, + "instance_id": instance_id, + "public_key": public_key_pem, + } + encoded = base64.urlsafe_b64encode(json.dumps(payload).encode()).decode() + + now = datetime.now(UTC) + group = schemas.WorkspaceGroup( + name=label or "External connection", + invite_code=raw_code, + status="active", + created_at=now, + created_by_workspace_id=workspace_id, + ) + DbManager.create_record(group) + + member = schemas.WorkspaceGroupMember( + group_id=group.id, + workspace_id=workspace_id, + status="active", + role="creator", + joined_at=now, + ) + DbManager.create_record(member) + + return encoded, raw_code + + +def parse_federation_code(encoded: str) -> dict | None: + """Decode a federation connection payload. + + Returns ``{"code": ..., "webhook_url": ..., "instance_id": ..., + "public_key": ...}`` or *None* if the payload is invalid. + """ + try: + decoded = base64.urlsafe_b64decode(encoded.encode()).decode() + payload = json.loads(decoded) + if all(k in payload for k in ("code", "webhook_url", "instance_id")): + return payload + except Exception as exc: + _logger.debug(f"decode_federation_code: invalid payload: {exc}") + return None + + +# --------------------------------------------------------------------------- +# Federated workspace management +# --------------------------------------------------------------------------- + + +def get_or_create_federated_workspace( + instance_id: str, + webhook_url: str, + public_key: str, + name: str | None = None, + *, + primary_team_id: str | None = None, + primary_workspace_name: str | None = None, +) -> schemas.FederatedWorkspace: + """Find or create a federated workspace record.""" + matches = DbManager.find_records( + schemas.FederatedWorkspace, + [schemas.FederatedWorkspace.instance_id == instance_id], + ) + existing = matches[0] if matches else None + if existing: + update_fields = { + schemas.FederatedWorkspace.webhook_url: webhook_url, + schemas.FederatedWorkspace.public_key: public_key, + schemas.FederatedWorkspace.status: "active", + schemas.FederatedWorkspace.updated_at: datetime.now(UTC), + } + if primary_team_id is not None: + update_fields[schemas.FederatedWorkspace.primary_team_id] = primary_team_id + if primary_workspace_name is not None: + update_fields[schemas.FederatedWorkspace.primary_workspace_name] = primary_workspace_name + DbManager.update_records( + schemas.FederatedWorkspace, + [schemas.FederatedWorkspace.id == existing.id], + update_fields, + ) + return DbManager.get_record(schemas.FederatedWorkspace, existing.id) + + fed_ws = schemas.FederatedWorkspace( + instance_id=instance_id, + webhook_url=webhook_url, + public_key=public_key, + status="active", + name=name, + primary_team_id=primary_team_id, + primary_workspace_name=primary_workspace_name, + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), + ) + DbManager.create_record(fed_ws) + return DbManager.get_record(schemas.FederatedWorkspace, fed_ws.id) + + +# --------------------------------------------------------------------------- +# HTTP client — push events to a federated workspace +# --------------------------------------------------------------------------- + +_REQUEST_TIMEOUT = 15 # seconds +_MAX_RETRIES = 3 +_RETRY_BACKOFF = [1, 2, 4] # seconds between retries + + +def _federation_request( + fed_ws: schemas.FederatedWorkspace, + path: str, + payload: dict, + method: str = "POST", +) -> dict | None: + """Send an authenticated request to a federated workspace. + + Signs the request with this instance's Ed25519 private key. + Retries up to :data:`_MAX_RETRIES` times on transient failures. + """ + url = fed_ws.webhook_url.rstrip("/") + path + body = json.dumps(payload) + + start_time = time.time() + + for attempt in range(_MAX_RETRIES): + try: + sig, ts = federation_sign(body) + headers = { + "Content-Type": "application/json", + "User-Agent": FEDERATION_USER_AGENT, + "X-Federation-Signature": sig, + "X-Federation-Timestamp": ts, + "X-Federation-Instance": get_instance_id(), + } + resp = requests.request(method, url, data=body, headers=headers, timeout=_REQUEST_TIMEOUT) + elapsed = round((time.time() - start_time) * 1000, 1) + + if resp.status_code == 200: + _logger.debug( + "federation_request_ok", + extra={"url": url, "elapsed_ms": elapsed, "attempts": attempt + 1}, + ) + try: + return resp.json() + except Exception as exc: + _logger.debug(f"federation_request: non-JSON success response: {exc}") + return {"ok": True} + elif resp.status_code >= 500: + _logger.warning( + "federation_request_retry", + extra={ + "url": url, + "status": resp.status_code, + "attempt": attempt + 1, + "remote": fed_ws.instance_id, + }, + ) + if attempt < _MAX_RETRIES - 1: + time.sleep(_RETRY_BACKOFF[attempt]) + continue + elif resp.status_code == 401: + _logger.error( + "federation_auth_rejected", + extra={ + "url": url, + "remote": fed_ws.instance_id, + "message": "Keypair may have changed — reconnection required", + }, + ) + return None + else: + _logger.error( + "federation_request_failed", + extra={ + "url": url, + "status": resp.status_code, + "body": resp.text[:500], + "remote": fed_ws.instance_id, + }, + ) + return None + except requests.exceptions.Timeout: + _logger.warning( + "federation_request_timeout", + extra={"url": url, "attempt": attempt + 1, "remote": fed_ws.instance_id}, + ) + if attempt < _MAX_RETRIES - 1: + time.sleep(_RETRY_BACKOFF[attempt]) + except requests.exceptions.ConnectionError as e: + _logger.warning( + "federation_connection_error", + extra={"url": url, "attempt": attempt + 1, "error": str(e), "remote": fed_ws.instance_id}, + ) + if attempt < _MAX_RETRIES - 1: + time.sleep(_RETRY_BACKOFF[attempt]) + except Exception as e: + _logger.error( + "federation_request_error", + extra={"url": url, "error": str(e), "remote": fed_ws.instance_id}, + ) + return None + + elapsed = round((time.time() - start_time) * 1000, 1) + _logger.error( + "federation_request_exhausted", + extra={"url": url, "elapsed_ms": elapsed, "attempts": _MAX_RETRIES, "remote": fed_ws.instance_id}, + ) + return None + + +def push_message(fed_ws: schemas.FederatedWorkspace, payload: dict) -> dict | None: + """Forward a message (new post, thread reply) to a federated workspace.""" + return _federation_request(fed_ws, "/api/federation/message", payload) + + +def push_edit(fed_ws: schemas.FederatedWorkspace, payload: dict) -> dict | None: + """Forward a message edit to a federated workspace.""" + return _federation_request(fed_ws, "/api/federation/message/edit", payload) + + +def push_delete(fed_ws: schemas.FederatedWorkspace, payload: dict) -> dict | None: + """Forward a message deletion to a federated workspace.""" + return _federation_request(fed_ws, "/api/federation/message/delete", payload) + + +def push_reaction(fed_ws: schemas.FederatedWorkspace, payload: dict) -> dict | None: + """Forward a reaction add/remove to a federated workspace.""" + return _federation_request(fed_ws, "/api/federation/message/react", payload) + + +def push_users(fed_ws: schemas.FederatedWorkspace, payload: dict) -> dict | None: + """Exchange user directory with a federated workspace.""" + return _federation_request(fed_ws, "/api/federation/users", payload) + + +def initiate_federation_connect( + remote_url: str, + code: str, + *, + team_id: str | None = None, + workspace_name: str | None = None, +) -> dict | None: + """Call the remote instance's /api/federation/pair endpoint. + + Signs the request with this instance's Ed25519 private key so the + receiver can verify we control the keypair advertised in the connection code. + Optionally sends team_id and workspace_name so the remote (Instance A) can + tag the connection and soft-delete the matching local workspace. + """ + _, public_key_pem = get_or_create_instance_keypair() + + url = remote_url.rstrip("/") + "/api/federation/pair" + payload = { + "code": code, + "webhook_url": get_public_url(), + "instance_id": get_instance_id(), + "public_key": public_key_pem, + } + if team_id: + payload["team_id"] = team_id + if workspace_name: + payload["workspace_name"] = workspace_name + body = json.dumps(payload) + sig, ts = federation_sign(body) + + for attempt in range(_MAX_RETRIES): + try: + resp = requests.post( + url, + data=body, + headers={ + "Content-Type": "application/json", + "User-Agent": FEDERATION_USER_AGENT, + "X-Federation-Signature": sig, + "X-Federation-Timestamp": ts, + "X-Federation-Instance": get_instance_id(), + }, + timeout=_REQUEST_TIMEOUT, + ) + if resp.status_code == 200: + _logger.info("federation_pair_success", extra={"url": url}) + return resp.json() + elif resp.status_code >= 500: + _logger.warning( + "federation_pair_retry", + extra={"url": url, "status": resp.status_code, "attempt": attempt + 1}, + ) + if attempt < _MAX_RETRIES - 1: + time.sleep(_RETRY_BACKOFF[attempt]) + continue + else: + _logger.error( + "federation_pair_failed", + extra={"url": url, "status": resp.status_code, "body": resp.text[:500]}, + ) + return None + except requests.exceptions.ConnectionError as e: + _logger.warning( + "federation_pair_connection_error", + extra={"url": url, "attempt": attempt + 1, "error": str(e)}, + ) + if attempt < _MAX_RETRIES - 1: + time.sleep(_RETRY_BACKOFF[attempt]) + except requests.exceptions.Timeout: + _logger.warning( + "federation_pair_timeout", + extra={"url": url, "attempt": attempt + 1}, + ) + if attempt < _MAX_RETRIES - 1: + time.sleep(_RETRY_BACKOFF[attempt]) + except Exception as e: + _logger.error("federation_pair_error", extra={"url": url, "error": str(e)}) + return None + + _logger.error("federation_pair_exhausted", extra={"url": url, "attempts": _MAX_RETRIES}) + return None + + +def ping_federated_workspace(fed_ws: schemas.FederatedWorkspace) -> bool: + """Check if a federated workspace is reachable.""" + url = fed_ws.webhook_url.rstrip("/") + "/api/federation/ping" + try: + resp = requests.get( + url, + headers={"User-Agent": FEDERATION_USER_AGENT}, + timeout=5, + ) + return resp.status_code == 200 + except Exception as exc: + _logger.debug(f"ping_federated_workspace: failed to reach {fed_ws.instance_id}: {exc}") + return False + + +# --------------------------------------------------------------------------- +# Payload builders +# --------------------------------------------------------------------------- + + +def build_message_payload( + *, + msg_type: str = "message", + sync_id: int, + post_id: str, + channel_id: str, + user_name: str, + user_avatar_url: str | None, + workspace_name: str, + text: str, + thread_post_id: str | None = None, + images: list[dict] | None = None, + timestamp: str | None = None, +) -> dict: + """Build a standardised federation message payload.""" + return { + "type": msg_type, + "sync_id": sync_id, + "post_id": post_id, + "channel_id": channel_id, + "user": { + "display_name": user_name, + "avatar_url": user_avatar_url, + "workspace_name": workspace_name, + }, + "text": text, + "thread_post_id": thread_post_id, + "images": images or [], + "timestamp": timestamp, + } + + +def build_edit_payload( + *, + post_id: str, + channel_id: str, + text: str, + timestamp: str, +) -> dict: + """Build a federation edit payload.""" + return { + "type": "edit", + "post_id": post_id, + "channel_id": channel_id, + "text": text, + "timestamp": timestamp, + } + + +def build_delete_payload( + *, + post_id: str, + channel_id: str, + timestamp: str, +) -> dict: + """Build a federation delete payload.""" + return { + "type": "delete", + "post_id": post_id, + "channel_id": channel_id, + "timestamp": timestamp, + } + + +def build_reaction_payload( + *, + post_id: str, + channel_id: str, + reaction: str, + action: str, + user_name: str, + timestamp: str, +) -> dict: + """Build a federation reaction payload.""" + return { + "type": "react", + "post_id": post_id, + "channel_id": channel_id, + "reaction": reaction, + "action": action, + "user_name": user_name, + "timestamp": timestamp, + } diff --git a/syncbot/handlers/__init__.py b/syncbot/handlers/__init__.py new file mode 100644 index 0000000..6a12f13 --- /dev/null +++ b/syncbot/handlers/__init__.py @@ -0,0 +1,135 @@ +"""Handlers package – Slack event, action, and view-submission handlers. + +Re-exports every public symbol so that ``import handlers`` / +``from handlers import X`` continues to work after the split. +""" + +from handlers._common import ( + EventContext, + _get_authorized_workspace, + _parse_private_metadata, + _sanitize_text, +) +from handlers.channel_sync import ( + handle_pause_sync, + handle_publish_channel, + handle_publish_channel_submit, + handle_publish_mode_submit, + handle_resume_sync, + handle_stop_sync, + handle_stop_sync_confirm, + handle_subscribe_channel, + handle_subscribe_channel_submit, + handle_unpublish_channel, +) +from handlers.export_import import ( + handle_backup_download, + handle_backup_restore, + handle_backup_restore_confirm_submit, + handle_backup_restore_submit, + handle_data_migration, + handle_data_migration_confirm_submit, + handle_data_migration_export, + handle_data_migration_submit, +) +from handlers.federation_cmds import ( + handle_enter_federation_code, + handle_federation_code_submit, + handle_federation_label_submit, + handle_generate_federation_code, + handle_remove_federation_connection, +) +from handlers.group_manage import ( + handle_leave_group, + handle_leave_group_confirm, +) +from handlers.groups import ( + handle_accept_group_invite, + handle_create_group, + handle_create_group_submit, + handle_decline_group_invite, + handle_invite_workspace, + handle_invite_workspace_submit, + handle_join_group, + handle_join_group_submit, +) +from handlers.messages import ( + _handle_reaction, + _is_own_bot_message, + _parse_event_fields, + respond_to_message_event, +) +from handlers.sync import ( + check_join_sync_channel, + handle_app_home_opened, + handle_join_sync_submission, + handle_member_joined_channel, + handle_new_sync_submission, + handle_refresh_home, + handle_remove_sync, +) +from handlers.tokens import handle_tokens_revoked +from handlers.users import ( + handle_team_join, + handle_user_mapping_back, + handle_user_mapping_edit_submit, + handle_user_mapping_refresh, + handle_user_profile_changed, +) + +__all__ = [ + "EventContext", + "_get_authorized_workspace", + "_handle_reaction", + "_is_own_bot_message", + "_parse_event_fields", + "_parse_private_metadata", + "_sanitize_text", + "check_join_sync_channel", + "handle_app_home_opened", + "handle_backup_download", + "handle_backup_restore", + "handle_backup_restore_confirm_submit", + "handle_backup_restore_submit", + "handle_data_migration", + "handle_data_migration_confirm_submit", + "handle_data_migration_export", + "handle_data_migration_submit", + "handle_accept_group_invite", + "handle_create_group", + "handle_create_group_submit", + "handle_decline_group_invite", + "handle_enter_federation_code", + "handle_federation_code_submit", + "handle_federation_label_submit", + "handle_generate_federation_code", + "handle_invite_workspace", + "handle_invite_workspace_submit", + "handle_join_group", + "handle_join_group_submit", + "handle_join_sync_submission", + "handle_leave_group", + "handle_leave_group_confirm", + "handle_member_joined_channel", + "handle_new_sync_submission", + "handle_pause_sync", + "handle_publish_channel", + "handle_publish_channel_submit", + "handle_publish_mode_submit", + "handle_refresh_home", + "handle_remove_federation_connection", + "handle_remove_sync", + "handle_resume_sync", + "handle_stop_sync", + "handle_stop_sync_confirm", + "handle_subscribe_channel", + "handle_subscribe_channel_submit", + "handle_team_join", + "handle_tokens_revoked", + "handle_unpublish_channel", + "handle_user_mapping_back", + "handle_user_mapping_edit_submit", + "handle_user_mapping_refresh", + "handle_user_profile_changed", + "respond_to_message_event", +] diff --git a/syncbot/handlers/_common.py b/syncbot/handlers/_common.py new file mode 100644 index 0000000..a40f423 --- /dev/null +++ b/syncbot/handlers/_common.py @@ -0,0 +1,71 @@ +"""Shared handler utilities and types.""" + +import logging +from typing import Any + +import helpers +from db import schemas + +_logger = logging.getLogger(__name__) + +try: + from typing import TypedDict +except ImportError: + from typing_extensions import TypedDict + + +class EventContext(TypedDict): + """Strongly-typed dict returned by ``_parse_event_fields``.""" + + team_id: str | None + channel_id: str | None + user_id: str | None + msg_text: str + mentioned_users: list[dict[str, Any]] + thread_ts: str | None + ts: str | None + event_subtype: str | None + + +def _parse_private_metadata(body: dict) -> dict: + """Extract and parse JSON ``private_metadata`` from a view submission.""" + import json as _json + + raw = helpers.safe_get(body, "view", "private_metadata") or "{}" + try: + return _json.loads(raw) + except Exception as exc: + _logger.debug(f"_parse_private_metadata: bad JSON: {exc}") + return {} + + +def _get_authorized_workspace( + body: dict, client, context: dict, action_name: str +) -> tuple[str, schemas.Workspace] | None: + """Validate authorization and return ``(user_id, workspace_record)``. + + Returns *None* and logs a warning if the user is not authorized or + the workspace cannot be resolved. + """ + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": action_name}) + return None + + team_id = ( + helpers.safe_get(body, "view", "team_id") + or helpers.safe_get(body, "team", "id") + or helpers.safe_get(body, "team_id") + ) + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return None + + return user_id, workspace_record + + +def _sanitize_text(value: str, max_length: int = 100) -> str: + """Strip and truncate user-supplied text to prevent oversized DB writes.""" + if not value: + return value + return value.strip()[:max_length] diff --git a/syncbot/handlers/channel_sync.py b/syncbot/handlers/channel_sync.py new file mode 100644 index 0000000..2e47d51 --- /dev/null +++ b/syncbot/handlers/channel_sync.py @@ -0,0 +1,847 @@ +"""Channel sync handlers — publish, unpublish, subscribe, pause, resume, stop.""" + +import logging +from datetime import UTC, datetime +from logging import Logger + +from slack_sdk.web import WebClient + +import builders +import helpers +from builders._common import _format_channel_ref, _get_group_members +from db import DbManager, schemas +from handlers._common import _parse_private_metadata, _sanitize_text +from slack import actions, orm +from slack.blocks import context as block_context, section + +_logger = logging.getLogger(__name__) + +_MAX_PUBLISH_CHANNEL_OPTIONS = 100 + + +def _get_publishable_channel_options(client: WebClient, workspace_id: int) -> list[orm.SelectorOption]: + """Return selector options for channels that are not already published/synced in this workspace.""" + synced = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.workspace_id == workspace_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + synced_ids = {c.channel_id for c in synced} + + options: list[orm.SelectorOption] = [] + cursor = "" + try: + while len(options) < _MAX_PUBLISH_CHANNEL_OPTIONS: + resp = client.conversations_list( + types="public_channel,private_channel", + exclude_archived=True, + limit=200, + cursor=cursor or None, + ) + chs = helpers.safe_get(resp, "channels") or [] + for ch in chs: + cid = ch.get("id") + if not cid or cid in synced_ids: + continue + name = ch.get("name") or cid + label = f"#{name}" + if len(label) > 75: + label = label[:72] + "..." + options.append(orm.SelectorOption(name=label, value=cid)) + if len(options) >= _MAX_PUBLISH_CHANNEL_OPTIONS: + break + cursor = helpers.safe_get(resp, "response_metadata", "next_cursor") or "" + if not cursor: + break + except Exception as e: + _logger.warning(f"_get_publishable_channel_options: {e}") + + return options + + +def _build_publish_step2( + client: WebClient, + group_id: int, + sync_mode: str, + other_members: list, + workspace_id: int, +) -> orm.BlockView: + """Build the step-2 modal blocks: channel picker (only unpublished channels) + optional target workspace.""" + modal_blocks: list[orm.BaseBlock] = [] + + channel_options = _get_publishable_channel_options(client, workspace_id) + if not channel_options: + channel_options = [ + orm.SelectorOption(name="— No channels available (all are already published or synced) —", value="__none__"), + ] + modal_blocks.append( + orm.InputBlock( + label="Channel to Publish", + action=actions.CONFIG_PUBLISH_CHANNEL_SELECT, + element=orm.StaticSelectElement( + placeholder="Select a channel to publish", + options=channel_options, + ), + optional=False, + ) + ) + modal_blocks.append( + block_context("Select a channel from your workspace to make available for syncing.") + ) + + if sync_mode == "direct" and other_members: + ws_options: list[orm.SelectorOption] = [] + for m in other_members: + ws = helpers.get_workspace_by_id(m.workspace_id) + name = helpers.resolve_workspace_name(ws) if ws else f"Workspace {m.workspace_id}" + ws_options.append(orm.SelectorOption(name=name, value=str(m.workspace_id))) + + if ws_options: + modal_blocks.append( + orm.InputBlock( + label="Target Workspace", + action=actions.CONFIG_PUBLISH_DIRECT_TARGET, + element=orm.StaticSelectElement( + placeholder="Select target workspace", + options=ws_options, + ), + optional=False, + ) + ) + + return orm.BlockView(blocks=modal_blocks) + + +def handle_publish_channel( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open the publish-channel flow — always starts with step 1 (sync mode selection).""" + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "publish_channel"}) + return + + trigger_id = helpers.safe_get(body, "trigger_id") + raw_group_id = helpers.safe_get(body, "actions", 0, "value") + try: + group_id = int(raw_group_id) + except (TypeError, ValueError): + _logger.warning(f"publish_channel: invalid group_id: {raw_group_id!r}") + return + + mode_options = [ + orm.SelectorOption( + name="Available to entire group\nAny current or future member can subscribe", + value="group", + ), + orm.SelectorOption( + name="1-to-1 with a specific workspace\nOnly the selected workspace can subscribe", + value="direct", + ), + ] + step1_blocks: list[orm.BaseBlock] = [ + orm.InputBlock( + label="Sync Mode", + action=actions.CONFIG_PUBLISH_SYNC_MODE, + element=orm.RadioButtonsElement( + initial_value="group", + options=orm.as_selector_options( + [o.name for o in mode_options], + [o.value for o in mode_options], + ), + ), + optional=False, + ), + ] + orm.BlockView(blocks=step1_blocks).post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_PUBLISH_MODE_SUBMIT, + title_text="Publish Channel", + submit_button_text="Next", + parent_metadata={"group_id": group_id}, + new_or_add="new", + ) + + +def handle_publish_mode_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle step 1 submission: read the selected sync mode and show step 2.""" + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + return + + metadata = _parse_private_metadata(body) + group_id = metadata.get("group_id") + if not group_id: + return + + state_values = helpers.safe_get(body, "view", "state", "values") or {} + sync_mode = "group" + for _block_id, block_data in state_values.items(): + for action_id, action_data in block_data.items(): + if action_id == actions.CONFIG_PUBLISH_SYNC_MODE: + selected = helpers.safe_get(action_data, "selected_option", "value") + if selected: + sync_mode = selected + + team_id = helpers.safe_get(body, "view", "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None + + other_members = [] + if workspace_record: + group_members = _get_group_members(group_id) + other_members = [ + m for m in group_members + if m.workspace_id != workspace_record.id and m.workspace_id + ] + + if not workspace_record: + _logger.warning("handle_publish_mode_submit: no workspace_record") + return + step2 = _build_publish_step2(client, group_id, sync_mode, other_members, workspace_record.id) + updated_view = step2.as_ack_update( + callback_id=actions.CONFIG_PUBLISH_CHANNEL_SUBMIT, + title_text="Publish Channel", + submit_button_text="Publish", + parent_metadata={"group_id": group_id, "sync_mode": sync_mode}, + ) + ack_fn = context.get("ack") + if ack_fn: + ack_fn(response_action="update", view=updated_view) + else: + _logger.warning("handle_publish_mode_submit: no ack function in context") + + +def handle_publish_channel_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Create a Sync + SyncChannel for the publisher's channel, scoped to a group.""" + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "publish_channel_submit"}) + return + + team_id = helpers.safe_get(body, "view", "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + metadata = _parse_private_metadata(body) + group_id = metadata.get("group_id") + + if not group_id: + _logger.warning("publish_channel_submit: missing group_id in metadata") + return + + state_values = helpers.safe_get(body, "view", "state", "values") or {} + + sync_mode = metadata.get("sync_mode", "group") + target_workspace_id = None + + for _block_id, block_data in state_values.items(): + for action_id, action_data in block_data.items(): + if action_id == actions.CONFIG_PUBLISH_DIRECT_TARGET: + selected_opt = helpers.safe_get(action_data, "selected_option", "value") + if selected_opt: + try: + target_workspace_id = int(selected_opt) + except (TypeError, ValueError): + pass + + if sync_mode == "direct" and not target_workspace_id: + sync_mode = "group" + + ack_fn = context.get("ack") + + channel_id = None + for _block_id, block_data in state_values.items(): + for action_id, action_data in block_data.items(): + if action_id == actions.CONFIG_PUBLISH_CHANNEL_SELECT: + channel_id = action_data.get("selected_conversation") or action_data.get("selected_option", {}).get("value") + + if not channel_id or channel_id == "__none__": + if ack_fn: + ack_fn( + response_action="errors", + errors={actions.CONFIG_PUBLISH_CHANNEL_SELECT: "Select a channel to publish."}, + ) + return + + existing = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + if existing: + if ack_fn: + ack_fn( + response_action="errors", + errors={actions.CONFIG_PUBLISH_CHANNEL_SELECT: "This channel is already being synced."}, + ) + return + + if ack_fn: + ack_fn() + + try: + conv_info = client.conversations_info(channel=channel_id) + channel_name = helpers.safe_get(conv_info, "channel", "name") or channel_id + except Exception as exc: + _logger.debug(f"handle_publish_channel_submit: conversations_info failed for {channel_id}: {exc}") + channel_name = channel_id + + try: + client.conversations_join(channel=channel_id) + + sync_record = schemas.Sync( + title=_sanitize_text(channel_name), + description=None, + group_id=group_id, + sync_mode=sync_mode, + target_workspace_id=target_workspace_id if sync_mode == "direct" else None, + publisher_workspace_id=workspace_record.id, + ) + DbManager.create_record(sync_record) + + sync_channel_record = schemas.SyncChannel( + sync_id=sync_record.id, + channel_id=channel_id, + workspace_id=workspace_record.id, + created_at=datetime.now(UTC), + ) + DbManager.create_record(sync_channel_record) + + _logger.info( + "channel_published", + extra={ + "workspace_id": workspace_record.id, + "channel_id": channel_id, + "group_id": group_id, + "sync_id": sync_record.id, + "sync_mode": sync_mode, + }, + ) + except Exception as e: + _logger.error(f"Failed to publish channel {channel_id}: {e}") + + # Refresh Home for all admins in current workspace, then partner workspaces + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + _refresh_group_member_homes(group_id, workspace_record.id, logger, context=context) + + +def handle_unpublish_channel( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Unpublish a channel: hard-delete the Sync record. + + DB cascades remove all ``SyncChannel`` and ``PostMeta`` rows. + Only the original publisher can unpublish. + """ + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "unpublish_channel"}) + return + + team_id = ( + helpers.safe_get(body, "view", "team_id") + or helpers.safe_get(body, "team", "id") + or helpers.safe_get(body, "user", "team_id") + ) + workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None + admin_name, admin_label = helpers.format_admin_label(client, user_id, workspace_record) + + raw_value = helpers.safe_get(body, "actions", 0, "value") + try: + sync_id = int(raw_value) + except (TypeError, ValueError): + _logger.warning(f"Invalid sync_id for unpublish: {raw_value!r}") + return + + sync_record = DbManager.get_record(schemas.Sync, id=sync_id) + if not sync_record: + return + + if workspace_record and sync_record.publisher_workspace_id != workspace_record.id: + _logger.warning("unpublish_denied: not the publisher") + return + + group_id = sync_record.group_id + + all_channels = DbManager.find_records( + schemas.SyncChannel, + [schemas.SyncChannel.sync_id == sync_id, schemas.SyncChannel.deleted_at.is_(None)], + ) + + for ch in all_channels: + try: + ws = helpers.get_workspace_by_id(ch.workspace_id) + if ws and ws.bot_token: + name = admin_name if workspace_record and ch.workspace_id == workspace_record.id else admin_label + ws_client = WebClient(token=helpers.decrypt_bot_token(ws.bot_token)) + helpers.notify_synced_channels( + ws_client, + [ch.channel_id], + f":octagonal_sign: *{name}* unpublished this channel. Syncing is no longer available.", + ) + ws_client.conversations_leave(channel=ch.channel_id) + except Exception as e: + _logger.warning(f"Failed to notify/leave channel {ch.channel_id}: {e}") + + DbManager.delete_records(schemas.Sync, [schemas.Sync.id == sync_id]) + + _logger.info( + "channel_unpublished", + extra={"sync_id": sync_id, "group_id": group_id}, + ) + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + if group_id: + _refresh_group_member_homes(group_id, workspace_record.id if workspace_record else 0, logger, context=context) + + +def _toggle_sync_status( + body: dict, + client: WebClient, + logger: Logger, + context: dict, + *, + action_prefix: str, + target_status: str, + emoji: str, + verb: str, + log_event: str, +) -> None: + """Shared logic for pausing or resuming a channel sync.""" + action_id = helpers.safe_get(body, "actions", 0, "action_id") or "" + sync_id_str = action_id.replace(action_prefix + "_", "") + + try: + sync_id = int(sync_id_str) + except (TypeError, ValueError): + _logger.warning(f"{log_event}_invalid_id", extra={"action_id": action_id}) + return + + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + team_id = ( + helpers.safe_get(body, "view", "team_id") + or helpers.safe_get(body, "team", "id") + or helpers.safe_get(body, "user", "team_id") + ) + workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None + admin_name, admin_label = helpers.format_admin_label(client, user_id, workspace_record) + + all_channels = DbManager.find_records( + schemas.SyncChannel, + [schemas.SyncChannel.sync_id == sync_id, schemas.SyncChannel.deleted_at.is_(None)], + ) + + for ch in all_channels: + DbManager.update_records( + schemas.SyncChannel, + [schemas.SyncChannel.id == ch.id], + {schemas.SyncChannel.status: target_status}, + ) + + ws_cache: dict[int, schemas.Workspace | None] = {} + for ch in all_channels: + try: + ws = ws_cache.get(ch.workspace_id) or helpers.get_workspace_by_id(ch.workspace_id) + ws_cache[ch.workspace_id] = ws + if ws and ws.bot_token: + name = admin_name if workspace_record and ch.workspace_id == workspace_record.id else admin_label + partner_chs = [c for c in all_channels if c.workspace_id != ch.workspace_id] + if partner_chs: + p_ws = ws_cache.get(partner_chs[0].workspace_id) or helpers.get_workspace_by_id(partner_chs[0].workspace_id) + ws_cache[partner_chs[0].workspace_id] = p_ws + partner_ref = helpers.resolve_channel_name(partner_chs[0].channel_id, p_ws) + msg = f":{emoji}: *{name}* {verb} syncing with *{partner_ref}*." + else: + msg = f":{emoji}: *{name}* {verb} channel syncing." + ws_client = WebClient(token=helpers.decrypt_bot_token(ws.bot_token)) + helpers.notify_synced_channels(ws_client, [ch.channel_id], msg) + except Exception as e: + _logger.warning(f"Failed to notify channel {ch.channel_id} about {verb}: {e}") + + _logger.info(log_event, extra={"sync_id": sync_id, "channels": len(all_channels)}) + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + sync_record = DbManager.get_record(schemas.Sync, id=sync_id) + if sync_record and sync_record.group_id: + _refresh_group_member_homes(sync_record.group_id, workspace_record.id if workspace_record else 0, logger, context=context) + + +def handle_pause_sync(body: dict, client: WebClient, logger: Logger, context: dict) -> None: + """Pause an active channel sync.""" + _toggle_sync_status( + body, client, logger, context, + action_prefix=actions.CONFIG_PAUSE_SYNC, + target_status="paused", + emoji="double_vertical_bar", + verb="paused", + log_event="sync_paused", + ) + + +def handle_resume_sync(body: dict, client: WebClient, logger: Logger, context: dict) -> None: + """Resume a paused channel sync.""" + _toggle_sync_status( + body, client, logger, context, + action_prefix=actions.CONFIG_RESUME_SYNC, + target_status="active", + emoji="arrow_forward", + verb="resumed", + log_event="sync_resumed", + ) + + +def handle_stop_sync( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Show a confirmation modal before stopping a channel sync.""" + action_id = helpers.safe_get(body, "actions", 0, "action_id") or "" + sync_id_str = action_id.replace(actions.CONFIG_STOP_SYNC + "_", "") + + try: + sync_id = int(sync_id_str) + except (TypeError, ValueError): + _logger.warning("stop_sync_invalid_id", extra={"action_id": action_id}) + return + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + confirm_form = orm.BlockView( + blocks=[ + section( + ":warning: *Are you sure you want to stop syncing this channel?*\n\n" + "This will:\n" + "\u2022 Remove your workspace's sync history for this channel\n" + "\u2022 Remove this channel from the active sync\n" + "\u2022 Other workspaces in the sync will continue uninterrupted\n\n" + "_No messages will be deleted from any channel — only SyncBot's tracking history for your workspace is removed._" + ), + ] + ) + + confirm_form.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_STOP_SYNC_CONFIRM, + title_text="Stop Syncing", + submit_button_text="Stop Syncing", + close_button_text="Cancel", + parent_metadata={"sync_id": sync_id}, + ) + + +def handle_stop_sync_confirm( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Execute channel sync stop after confirmation. + + Removes only this workspace's ``SyncChannel`` and its ``PostMeta``. + Other workspaces' data and the Sync record remain intact. + """ + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "stop_sync_confirm"}) + return + + meta = _parse_private_metadata(body) + sync_id = meta.get("sync_id") + if not sync_id: + _logger.warning("stop_sync_confirm: missing sync_id in metadata") + return + + team_id = helpers.safe_get(body, "view", "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + admin_name, admin_label = helpers.format_admin_label(client, user_id, workspace_record) + + all_channels = DbManager.find_records( + schemas.SyncChannel, + [schemas.SyncChannel.sync_id == sync_id, schemas.SyncChannel.deleted_at.is_(None)], + ) + + my_channel = next((c for c in all_channels if c.workspace_id == workspace_record.id), None) + other_channels = [c for c in all_channels if c.workspace_id != workspace_record.id] + + for ch in all_channels: + try: + ws = helpers.get_workspace_by_id(ch.workspace_id) + if ws and ws.bot_token: + if ch.workspace_id == workspace_record.id and other_channels: + p_ws = helpers.get_workspace_by_id(other_channels[0].workspace_id) + partner_ref = helpers.resolve_channel_name(other_channels[0].channel_id, p_ws) + msg = f":octagonal_sign: *{admin_name}* stopped syncing with *{partner_ref}*." + elif ch.workspace_id != workspace_record.id: + my_ref = ( + helpers.resolve_channel_name(my_channel.channel_id, workspace_record) + if my_channel + else "the other workspace" + ) + msg = f":octagonal_sign: *{admin_label}* stopped syncing with *{my_ref}*." + else: + msg = f":octagonal_sign: *{admin_name}* stopped channel syncing." + ws_client = WebClient(token=helpers.decrypt_bot_token(ws.bot_token)) + helpers.notify_synced_channels(ws_client, [ch.channel_id], msg) + except Exception as e: + _logger.warning(f"Failed to notify channel {ch.channel_id}: {e}") + + if my_channel: + DbManager.delete_records(schemas.PostMeta, [schemas.PostMeta.sync_channel_id == my_channel.id]) + DbManager.delete_records(schemas.SyncChannel, [schemas.SyncChannel.id == my_channel.id]) + try: + client.conversations_leave(channel=my_channel.channel_id) + except Exception as e: + _logger.warning(f"Failed to leave channel {my_channel.channel_id}: {e}") + + _logger.info( + "sync_stopped", + extra={ + "sync_id": sync_id, + "workspace_id": workspace_record.id, + "channel_id": my_channel.channel_id if my_channel else None, + }, + ) + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + sync_record = DbManager.get_record(schemas.Sync, id=sync_id) + if sync_record and sync_record.group_id: + _refresh_group_member_homes(sync_record.group_id, workspace_record.id, logger, context=context) + + +def handle_subscribe_channel( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Push the channel picker modal for subscribing to an available channel. + + The channel list only shows channels that are not already in any sync + (excluding already-synced and published-but-unsubscribed channels). + """ + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "subscribe_channel"}) + return + + trigger_id = helpers.safe_get(body, "trigger_id") + sync_id = helpers.safe_get(body, "actions", 0, "value") + team_id = ( + helpers.safe_get(body, "view", "team_id") + or helpers.safe_get(body, "team", "id") + or helpers.safe_get(body, "user", "team_id") + ) + workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None + if not workspace_record: + _logger.warning("handle_subscribe_channel: no workspace_record") + return + + blocks: list[orm.BaseBlock] = [] + + if sync_id: + publisher_channels = DbManager.find_records( + schemas.SyncChannel, + [schemas.SyncChannel.sync_id == int(sync_id), schemas.SyncChannel.deleted_at.is_(None)], + ) + if publisher_channels: + pub_ch = publisher_channels[0] + pub_ws = helpers.get_workspace_by_id(pub_ch.workspace_id) + ch_ref = _format_channel_ref(pub_ch.channel_id, pub_ws, is_local=False) + blocks.append(section(f"Subscribing to: {ch_ref}")) + + channel_options = _get_publishable_channel_options(client, workspace_record.id) + if not channel_options: + channel_options = [ + orm.SelectorOption( + name="— No channels available (all are already in a sync) —", + value="__none__", + ), + ] + blocks.append( + orm.InputBlock( + label="Channel for Sync", + action=actions.CONFIG_SUBSCRIBE_CHANNEL_SELECT, + element=orm.StaticSelectElement( + placeholder="Select a channel to sync into", + options=channel_options, + ), + optional=False, + ) + ) + blocks.append( + block_context("Choose a channel in your workspace to receive synced messages.") + ) + + orm.BlockView(blocks=blocks).post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_SUBSCRIBE_CHANNEL_SUBMIT, + title_text="Subscribe to Channel", + submit_button_text="Subscribe", + parent_metadata={"sync_id": int(sync_id)} if sync_id else None, + new_or_add="new", + ) + + +def handle_subscribe_channel_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Subscribe to an available channel sync: create SyncChannel for subscriber.""" + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "subscribe_channel_submit"}) + return + + team_id = helpers.safe_get(body, "view", "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + metadata = _parse_private_metadata(body) + sync_id = metadata.get("sync_id") + + if not sync_id: + _logger.warning("subscribe_channel_submit: missing sync_id") + return + + state_values = helpers.safe_get(body, "view", "state", "values") or {} + channel_id = None + for _block_id, block_data in state_values.items(): + for action_id, action_data in block_data.items(): + if action_id == actions.CONFIG_SUBSCRIBE_CHANNEL_SELECT: + channel_id = ( + action_data.get("selected_conversation") + or helpers.safe_get(action_data, "selected_option", "value") + ) + + if not channel_id or channel_id == "__none__": + _logger.warning("subscribe_channel_submit: no channel selected") + return + + sync_record = DbManager.get_record(schemas.Sync, id=sync_id) + if not sync_record: + return + + group_id = sync_record.group_id + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + admin_name, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) + + publisher_channels: list = [] + try: + client.conversations_join(channel=channel_id) + + sync_channel_record = schemas.SyncChannel( + sync_id=sync_id, + channel_id=channel_id, + workspace_id=workspace_record.id, + created_at=datetime.now(UTC), + ) + DbManager.create_record(sync_channel_record) + + publisher_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.workspace_id != workspace_record.id, + ], + ) + + try: + if publisher_channels: + pub_ch = publisher_channels[0] + pub_ws = helpers.get_workspace_by_id(pub_ch.workspace_id) + partner_ref = helpers.resolve_channel_name(pub_ch.channel_id, pub_ws) + else: + partner_ref = sync_record.title or "the partner channel" + client.chat_postMessage( + channel=channel_id, + text=f":arrows_counterclockwise: *{admin_name}* started syncing this channel with *{partner_ref}*. Messages will be shared automatically.", + ) + except Exception as exc: + _logger.debug(f"subscribe_channel: failed to notify subscriber channel {channel_id}: {exc}") + + local_ref = helpers.resolve_channel_name(channel_id, workspace_record) + for pub_ch in publisher_channels: + try: + pub_ws = helpers.get_workspace_by_id(pub_ch.workspace_id) + if pub_ws: + pub_client = WebClient(token=helpers.decrypt_bot_token(pub_ws.bot_token)) + pub_client.chat_postMessage( + channel=pub_ch.channel_id, + text=f":arrows_counterclockwise: *{admin_label}* started syncing *{local_ref}* with this channel. Messages will be shared automatically.", + ) + except Exception as exc: + _logger.debug(f"subscribe_channel: failed to notify publisher channel {pub_ch.channel_id}: {exc}") + + _logger.info( + "channel_subscribed", + extra={ + "workspace_id": workspace_record.id, + "channel_id": channel_id, + "sync_id": sync_id, + "group_id": group_id, + }, + ) + except Exception as e: + _logger.error(f"Failed to subscribe to channel sync {sync_id}: {e}") + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + if group_id: + _refresh_group_member_homes(group_id, workspace_record.id, logger, context=context) + + +def _refresh_group_member_homes( + group_id: int, + exclude_workspace_id: int, + logger: Logger, + context: dict | None = None, +) -> None: + """Refresh the Home tab for all group members except the acting workspace. + + Uses context=None when refreshing partners so admin lookups are always + fresh for each workspace (avoids request-scoped cache from the acting ws). + """ + members = _get_group_members(group_id) + refreshed: set[int] = set() + for m in members: + if not m.workspace_id or m.workspace_id == exclude_workspace_id or m.workspace_id in refreshed: + continue + ws = helpers.get_workspace_by_id(m.workspace_id, context=context) + if ws: + builders.refresh_home_tab_for_workspace(ws, logger, context=None) + refreshed.add(m.workspace_id) diff --git a/syncbot/handlers/export_import.py b/syncbot/handlers/export_import.py new file mode 100644 index 0000000..6998a33 --- /dev/null +++ b/syncbot/handlers/export_import.py @@ -0,0 +1,508 @@ +"""Backup/Restore and Data Migration handlers (modals and submissions).""" + +import json +import logging +from datetime import UTC, datetime +from logging import Logger + +from slack_sdk.web import WebClient + +import constants +import helpers +from db import DbManager, schemas +from helpers import export_import as ei +from slack import actions + +_logger = logging.getLogger(__name__) + + +def _is_admin(client: WebClient, user_id: str, body: dict) -> bool: + return helpers.is_user_authorized(client, user_id) + + +# --------------------------------------------------------------------------- +# Backup/Restore +# --------------------------------------------------------------------------- + +def handle_backup_restore( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open Backup/Restore modal (admin only).""" + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + from slack import orm + + blocks = [ + orm.SectionBlock(label="*Download backup*\nGenerate a full-instance backup (JSON) and receive it in your DM."), + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label=":floppy_disk: Download backup", + action=actions.CONFIG_BACKUP_DOWNLOAD, + ), + ], + ), + orm.DividerBlock(), + orm.SectionBlock( + label="*Restore from backup*\nPaste the backup JSON below. You will be asked to confirm if the encryption key or integrity check does not match.", + ), + orm.InputBlock( + label="Backup JSON", + action=actions.CONFIG_BACKUP_RESTORE_JSON_INPUT, + element=orm.PlainTextInputElement( + placeholder='Paste backup JSON here (e.g. {"version": 1, ...})', + multiline=True, + max_length=3000, + ), + ), + ] + + view = orm.BlockView(blocks=blocks) + client.views_open( + trigger_id=trigger_id, + view={ + "type": "modal", + "callback_id": actions.CONFIG_BACKUP_RESTORE_SUBMIT, + "title": {"type": "plain_text", "text": "Backup / Restore"}, + "submit": {"type": "plain_text", "text": "Restore"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "blocks": view.as_form_field(), + }, + ) + + +def handle_backup_download( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Generate backup and send to user's DM (called from modal button).""" + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return + try: + payload = ei.build_full_backup() + json_str = json.dumps(payload, default=ei._json_serializer, indent=2) + client.files_upload( + content=json_str, + filename=f"syncbot-backup-{datetime.now(UTC).strftime('%Y%m%d-%H%M%S')}.json", + channels=user_id, + initial_comment="Your SyncBot full-instance backup. Keep this file secure.", + ) + except Exception as e: + _logger.exception("backup_download failed: %s", e) + return + # Optionally update the modal to say "Backup sent to your DM" + response_url = helpers.safe_get(body, "response_url") + if response_url: + try: + from slack_sdk.webhook import WebhookClient + w = WebhookClient(response_url) + w.send(text=":white_check_mark: Backup sent to your DM.") + except Exception: + pass + + +def handle_backup_restore_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> dict | None: + """Process restore submission. Returns response dict with errors or None to close.""" + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return None + + values = helpers.safe_get(body, "view", "state", "values") or {} + json_text = "" + for _block_id, block_data in values.items(): + for action_id, action_data in block_data.items(): + if action_id == actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: + json_text = (action_data.get("value") or "").strip() + + if not json_text: + return {"response_action": "errors", "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: "Paste backup JSON to restore."}} + + try: + data = json.loads(json_text) + except json.JSONDecodeError as e: + return {"response_action": "errors", "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: f"Invalid JSON: {e}"}} + + if data.get("version") != ei.BACKUP_VERSION: + return {"response_action": "errors", "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: f"Unsupported backup version (expected {ei.BACKUP_VERSION})."}} + + hmac_ok = ei.verify_backup_hmac(data) + key_ok = ei.verify_backup_encryption_key(data) + + # If warnings needed, store payload in cache and show confirmation modal + if not hmac_ok or not key_ok: + from helpers._cache import _cache_set + cache_key = f"restore_pending:{user_id}" + _cache_set(cache_key, data, ttl=600) + return { + "response_action": "push", + "view": { + "type": "modal", + "callback_id": actions.CONFIG_BACKUP_RESTORE_CONFIRM, + "title": {"type": "plain_text", "text": "Confirm restore"}, + "submit": {"type": "plain_text", "text": "Proceed anyway"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "private_metadata": user_id, + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ( + ("*Integrity check failed.* The file may have been modified or could be malicious. Only proceed if you intentionally edited the file.\n\n" if not hmac_ok else "") + + ("*Encryption key mismatch.* Restored bot tokens will not be usable; workspaces must reinstall the app to re-authorize.\n\n" if not key_ok else "") + + "Do you want to proceed with restore anyway?" + ), + }, + }, + ], + }, + } + + _do_restore(data, client, user_id) + return None + + +def handle_backup_restore_confirm_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> dict | None: + """Second-step restore when user confirmed warnings.""" + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return None + private_metadata = (helpers.safe_get(body, "view", "private_metadata") or "").strip() + if not private_metadata: + return {"response_action": "errors", "errors": {"": "Missing state."}} + from helpers._cache import _cache_get + data = _cache_get(f"restore_pending:{private_metadata}") + if not data: + return {"response_action": "errors", "errors": {"": "Restore data expired. Please paste the backup JSON again and submit."}} + _do_restore(data, client, user_id) + return None + + +def _do_restore(data: dict, client: WebClient, user_id: str) -> None: + """Run restore and invalidate caches.""" + try: + team_ids = ei.restore_full_backup(data, skip_hmac_check=True, skip_encryption_key_check=True) + ei.invalidate_home_tab_caches_for_all_teams(team_ids) + except Exception as e: + _logger.exception("restore failed: %s", e) + raise + # Refresh home for user + team_id = helpers.safe_get(client, "team_id") # not on client + # We don't have team_id here easily; the next time user opens Home they'll get fresh data due to cache clear. + + +# --------------------------------------------------------------------------- +# Data Migration +# --------------------------------------------------------------------------- + +def handle_data_migration( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open Data Migration modal (admin only, federation enabled).""" + if not constants.FEDERATION_ENABLED: + return + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + from slack import orm + + blocks = [ + orm.SectionBlock( + label="*Export*\nDownload your workspace data for migration to another instance. You will receive a JSON file in your DM.", + ), + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label=":outbox_tray: Export my workspace data", + action=actions.CONFIG_DATA_MIGRATION_EXPORT, + ), + ], + ), + orm.DividerBlock(), + orm.SectionBlock( + label="*Import*\nPaste a migration file JSON below. Existing sync channels in the federated group will be replaced.", + ), + orm.InputBlock( + label="Migration JSON", + action=actions.CONFIG_DATA_MIGRATION_JSON_INPUT, + element=orm.PlainTextInputElement( + placeholder='Paste migration JSON here (e.g. {"version": 1, "workspace": {...}, ...})', + multiline=True, + max_length=3000, + ), + ), + ] + + view = orm.BlockView(blocks=blocks) + client.views_open( + trigger_id=trigger_id, + view={ + "type": "modal", + "callback_id": actions.CONFIG_DATA_MIGRATION_SUBMIT, + "title": {"type": "plain_text", "text": "Data Migration"}, + "submit": {"type": "plain_text", "text": "Import"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "blocks": view.as_form_field(), + }, + ) + + +def handle_data_migration_export( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Export workspace migration JSON and send to user's DM.""" + if not constants.FEDERATION_ENABLED: + return + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + team_id = helpers.safe_get(body, "team", "id") or helpers.safe_get(body, "team_id") + if not _is_admin(client, user_id, body): + return + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + try: + payload = ei.build_migration_export(workspace_record.id, include_source_instance=True) + json_str = json.dumps(payload, default=ei._json_serializer, indent=2) + client.files_upload( + content=json_str, + filename=f"syncbot-migration-{workspace_record.team_id}-{datetime.now(UTC).strftime('%Y%m%d-%H%M%S')}.json", + channels=user_id, + initial_comment="Your SyncBot workspace migration file. Use it on the new instance after connecting via federation.", + ) + except Exception as e: + _logger.exception("data_migration_export failed: %s", e) + + +def handle_data_migration_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> dict | None: + """Process migration import submission.""" + if not constants.FEDERATION_ENABLED: + return None + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + team_id = helpers.safe_get(body, "view", "team_id") or helpers.safe_get(body, "team_id") + if not _is_admin(client, user_id, body): + return None + + values = helpers.safe_get(body, "view", "state", "values") or {} + json_text = "" + for _block_id, block_data in values.items(): + for action_id, action_data in block_data.items(): + if action_id == actions.CONFIG_DATA_MIGRATION_JSON_INPUT: + json_text = (action_data.get("value") or "").strip() + + if not json_text: + return {"response_action": "errors", "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Paste migration JSON to import."}} + + try: + data = json.loads(json_text) + except json.JSONDecodeError as e: + return {"response_action": "errors", "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: f"Invalid JSON: {e}"}} + + if data.get("version") != ei.MIGRATION_VERSION: + return {"response_action": "errors", "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: f"Unsupported migration version (expected {ei.MIGRATION_VERSION})."}} + + workspace_payload = data.get("workspace", {}) + export_team_id = workspace_payload.get("team_id") + if not export_team_id: + return {"response_action": "errors", "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Migration file missing workspace.team_id."}} + + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record or workspace_record.team_id != export_team_id: + return {"response_action": "errors", "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "This migration file is for a different workspace. Open the app from the workspace that matches the migration file."}} + + # Build team_id -> workspace_id on B + team_id_to_workspace_id = {workspace_record.team_id: workspace_record.id} + workspaces_b = DbManager.find_records(schemas.Workspace, [schemas.Workspace.deleted_at.is_(None)]) + for w in workspaces_b: + if w.team_id: + team_id_to_workspace_id[w.team_id] = w.id + + # Optional: establish connection if source_instance present + source = data.get("source_instance") + if source and source.get("connection_code"): + import secrets + from federation import core as federation + result = federation.initiate_federation_connect( + source["webhook_url"], + source["connection_code"], + team_id=workspace_record.team_id, + workspace_name=workspace_record.workspace_name or None, + ) + if result and result.get("ok"): + fed_ws = federation.get_or_create_federated_workspace( + instance_id=source["instance_id"], + webhook_url=source["webhook_url"], + public_key=source["public_key"], + name=f"Connection {source['instance_id'][:8]}", + ) + my_groups = helpers.get_groups_for_workspace(workspace_record.id) + my_group_ids = {g.id for g, _ in my_groups} + fed_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.federated_workspace_id == fed_ws.id, + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.status == "active", + ], + ) + found = False + for fm in fed_members: + if fm.group_id in my_group_ids: + found = True + break + if not found: + now = datetime.now(UTC) + new_group = schemas.WorkspaceGroup( + name=f"Federation — {fed_ws.name}", + invite_code=f"FED-{secrets.token_hex(4).upper()}", + status="active", + created_at=now, + created_by_workspace_id=workspace_record.id, + ) + DbManager.create_record(new_group) + DbManager.create_record(schemas.WorkspaceGroupMember( + group_id=new_group.id, + workspace_id=workspace_record.id, + status="active", + role="creator", + joined_at=now, + )) + DbManager.create_record(schemas.WorkspaceGroupMember( + group_id=new_group.id, + federated_workspace_id=fed_ws.id, + status="active", + role="member", + joined_at=now, + )) + + # Resolve federated group (W + connection to source instance) + my_groups = helpers.get_groups_for_workspace(workspace_record.id) + my_group_ids = {g.id for g, _ in my_groups} + fed_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.federated_workspace_id.isnot(None), + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.status == "active", + ], + ) + candidate_groups = [fm.group_id for fm in fed_members if fm.group_id in my_group_ids] + group_id = candidate_groups[0] if candidate_groups else None + if not group_id: + return {"response_action": "errors", "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "No federation connection found. Connect to the other instance first (Enter Connection Code), then import."}} + + sig_ok = ei.verify_migration_signature(data) + if not sig_ok and source: + # Store in cache and show confirmation modal (private_metadata size limit) + from helpers._cache import _cache_set + cache_key = f"migration_import_pending:{user_id}" + _cache_set(cache_key, { + "data": data, + "group_id": group_id, + "workspace_id": workspace_record.id, + "team_id_to_workspace_id": team_id_to_workspace_id, + }, ttl=600) + return { + "response_action": "push", + "view": { + "type": "modal", + "callback_id": actions.CONFIG_DATA_MIGRATION_CONFIRM, + "title": {"type": "plain_text", "text": "Confirm import"}, + "submit": {"type": "plain_text", "text": "Proceed anyway"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "private_metadata": user_id, + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Integrity check failed.* The file may have been modified or could be malicious. Only proceed if you intentionally edited the file.\n\nProceed with import anyway?", + }, + }, + ], + }, + } + + ei.import_migration_data( + data, + workspace_record.id, + group_id, + team_id_to_workspace_id=team_id_to_workspace_id, + ) + ei.invalidate_home_tab_caches_for_team(workspace_record.team_id) + return None + + +def handle_data_migration_confirm_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> dict | None: + """Second-step import when user confirmed tampering warning.""" + if not constants.FEDERATION_ENABLED: + return None + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return None + private_metadata = (helpers.safe_get(body, "view", "private_metadata") or "").strip() + if not private_metadata: + return {"response_action": "errors", "errors": {"": "Missing state."}} + from helpers._cache import _cache_get + meta = _cache_get(f"migration_import_pending:{private_metadata}") + if not meta: + return {"response_action": "errors", "errors": {"": "Import data expired. Please paste the migration JSON again and submit."}} + data = meta.get("data") + group_id = meta.get("group_id") + workspace_id = meta.get("workspace_id") + team_id_to_workspace_id = meta.get("team_id_to_workspace_id", {}) + if not data or not group_id or not workspace_id: + return {"response_action": "errors", "errors": {"": "Missing import data."}} + + workspace_record = DbManager.get_record(schemas.Workspace, workspace_id) + if not workspace_record: + return {"response_action": "errors", "errors": {"": "Workspace not found."}} + + ei.import_migration_data( + data, + workspace_record.id, + group_id, + team_id_to_workspace_id=team_id_to_workspace_id, + ) + ei.invalidate_home_tab_caches_for_team(workspace_record.team_id) + return None diff --git a/syncbot/handlers/federation_cmds.py b/syncbot/handlers/federation_cmds.py new file mode 100644 index 0000000..e0dd65d --- /dev/null +++ b/syncbot/handlers/federation_cmds.py @@ -0,0 +1,363 @@ +"""Federation command handlers — code generation, entry, and connection via Slack UI.""" + +import logging +import secrets +from datetime import UTC, datetime, timedelta +from logging import Logger + +from slack_sdk.web import WebClient + +import builders +import constants +import federation +import helpers +from db import DbManager, schemas +from slack import actions, orm + +_logger = logging.getLogger(__name__) + + +def _exchange_user_directory( + fed_ws: schemas.FederatedWorkspace, + workspace_record: schemas.Workspace, +) -> None: + """Push our local user directory to a federated workspace and store theirs.""" + local_users = DbManager.find_records( + schemas.UserDirectory, + [schemas.UserDirectory.workspace_id == workspace_record.id], + ) + users_payload = [ + { + "user_id": u.slack_user_id, + "email": u.email, + "real_name": u.real_name, + "display_name": u.display_name, + } + for u in local_users + ] + + result = federation.push_users( + fed_ws, + { + "users": users_payload, + "workspace_id": workspace_record.id, + }, + ) + + if result and result.get("users"): + remote_users = result["users"] + now = datetime.now(UTC) + for u in remote_users: + remote_ws_id = u.get("workspace_id") + if not remote_ws_id: + continue + existing = DbManager.find_records( + schemas.UserDirectory, + [ + schemas.UserDirectory.workspace_id == remote_ws_id, + schemas.UserDirectory.slack_user_id == u.get("user_id", ""), + ], + ) + if existing: + DbManager.update_records( + schemas.UserDirectory, + [schemas.UserDirectory.id == existing[0].id], + { + schemas.UserDirectory.email: u.get("email"), + schemas.UserDirectory.real_name: u.get("real_name"), + schemas.UserDirectory.display_name: u.get("display_name"), + schemas.UserDirectory.updated_at: now, + }, + ) + else: + record = schemas.UserDirectory( + workspace_id=remote_ws_id, + slack_user_id=u.get("user_id", ""), + email=u.get("email"), + real_name=u.get("real_name"), + display_name=u.get("display_name"), + updated_at=now, + ) + DbManager.create_record(record) + + _logger.info( + "federation_user_exchange_complete", + extra={"remote": fed_ws.instance_id, "sent": len(users_payload), "received": len(remote_users)}, + ) + + +def handle_generate_federation_code( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open a modal asking for a label before generating the connection code.""" + if not constants.FEDERATION_ENABLED: + return + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + blocks = [ + orm.InputBlock( + label="Name for this connection", + action=actions.CONFIG_FEDERATION_LABEL_INPUT, + element=orm.PlainTextInputElement( + placeholder="e.g. East Coast SyncBot, Partner Org...", + ), + optional=False, + ), + orm.ContextBlock( + element=orm.ContextElement( + initial_value="Give this connection a friendly name so you can identify it later.", + ), + ), + ] + + view = orm.BlockView(blocks=blocks) + client.views_open( + trigger_id=trigger_id, + view={ + "type": "modal", + "callback_id": actions.CONFIG_FEDERATION_LABEL_SUBMIT, + "title": {"type": "plain_text", "text": "New Connection"}, + "submit": {"type": "plain_text", "text": "Generate Code"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "blocks": view.as_form_field(), + }, + ) + + +def handle_federation_label_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Generate the connection code after the admin provides a label.""" + if not constants.FEDERATION_ENABLED: + return + + team_id = helpers.safe_get(body, "view", "team_id") or helpers.safe_get(body, "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + public_url = federation.get_public_url() + if not public_url: + _logger.warning("federation_no_public_url") + return + + values = helpers.safe_get(body, "view", "state", "values") or {} + label = "" + for block_data in values.values(): + for action_id, action_data in block_data.items(): + if action_id == actions.CONFIG_FEDERATION_LABEL_INPUT: + label = (action_data.get("value") or "").strip() + + encoded, raw_code = federation.generate_federation_code(workspace_record.id, label=label or None) + + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if user_id: + try: + dm = client.conversations_open(users=[user_id]) + dm_channel = helpers.safe_get(dm, "channel", "id") + if dm_channel: + expires_ts = int((datetime.now(UTC) + timedelta(hours=24)).timestamp()) + client.chat_postMessage( + channel=dm_channel, + text=":globe_with_meridians: *Connection Code Generated*" + + (f" — _{label}_" if label else "") + + f"\n\nShare this code with the admin of the other SyncBot instance:\n\n```{encoded}```" + + f"\nThis code expires .", + ) + except Exception as e: + _logger.warning(f"Failed to DM connection code: {e}") + + _logger.info( + "federation_code_generated", + extra={"workspace_id": workspace_record.id, "code": raw_code, "label": label}, + ) + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +def handle_enter_federation_code( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open a modal for the admin to paste a federation code.""" + if not constants.FEDERATION_ENABLED: + return + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + blocks = [ + orm.InputBlock( + label="Paste the connection code from the remote SyncBot instance", + action=actions.CONFIG_FEDERATION_CODE_INPUT, + element=orm.PlainTextInputElement( + placeholder="Paste the full code here...", + multiline=True, + ), + ), + ] + + view = orm.BlockView(blocks=blocks) + client.views_open( + trigger_id=trigger_id, + view={ + "type": "modal", + "callback_id": actions.CONFIG_FEDERATION_CODE_SUBMIT, + "title": {"type": "plain_text", "text": "Enter Connection Code"}, + "submit": {"type": "plain_text", "text": "Connect"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "blocks": view.as_form_field(), + }, + ) + + +def handle_federation_code_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Process a submitted federation code and initiate cross-instance connection.""" + if not constants.FEDERATION_ENABLED: + return + + team_id = helpers.safe_get(body, "view", "team_id") or helpers.safe_get(body, "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + values = helpers.safe_get(body, "view", "state", "values") or {} + code_text = "" + for _block_id, block_data in values.items(): + for action_id, action_data in block_data.items(): + if action_id == actions.CONFIG_FEDERATION_CODE_INPUT: + code_text = (action_data.get("value") or "").strip() + + if not code_text: + _logger.warning("federation_code_submit: empty code") + return + + payload = federation.parse_federation_code(code_text) + if not payload: + _logger.warning("federation_code_submit: invalid code format") + return + + remote_url = payload["webhook_url"] + remote_code = payload["code"] + remote_instance_id = payload["instance_id"] + + result = federation.initiate_federation_connect( + remote_url, + remote_code, + team_id=workspace_record.team_id, + workspace_name=workspace_record.workspace_name or None, + ) + if not result or not result.get("ok"): + _logger.error( + "federation_connect_failed", + extra={"remote_url": remote_url, "result": result}, + ) + return + + remote_public_key = result.get("public_key", "") + + fed_ws = federation.get_or_create_federated_workspace( + instance_id=remote_instance_id, + webhook_url=remote_url, + public_key=remote_public_key, + name=f"Connection {remote_instance_id[:8]}", + ) + + now = datetime.now(UTC) + group = schemas.WorkspaceGroup( + name=f"Federation — {fed_ws.name}", + invite_code=f"FED-{secrets.token_hex(4).upper()}", + status="active", + created_at=now, + created_by_workspace_id=workspace_record.id, + ) + DbManager.create_record(group) + + local_member = schemas.WorkspaceGroupMember( + group_id=group.id, + workspace_id=workspace_record.id, + status="active", + role="creator", + joined_at=now, + ) + DbManager.create_record(local_member) + + fed_member = schemas.WorkspaceGroupMember( + group_id=group.id, + federated_workspace_id=fed_ws.id, + status="active", + role="member", + joined_at=now, + ) + DbManager.create_record(fed_member) + + _logger.info( + "federation_connection_established", + extra={ + "workspace_id": workspace_record.id, + "remote_instance": remote_instance_id, + "federated_workspace_id": fed_ws.id, + "group_id": group.id, + }, + ) + + _exchange_user_directory(fed_ws, workspace_record) + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +def handle_remove_federation_connection( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Remove a federation connection (group membership).""" + action_data = helpers.safe_get(body, "actions", 0) or {} + action_id: str = action_data.get("action_id", "") + member_id_str = action_id.replace(f"{actions.CONFIG_REMOVE_FEDERATION_CONNECTION}_", "") + + try: + member_id = int(member_id_str) + except (TypeError, ValueError): + _logger.warning("remove_federation_connection_invalid_id", extra={"action_id": action_id}) + return + + member = DbManager.get_record(schemas.WorkspaceGroupMember, id=member_id) + if not member: + return + + from datetime import UTC, datetime + now = datetime.now(UTC) + DbManager.update_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == member_id], + { + schemas.WorkspaceGroupMember.status: "inactive", + schemas.WorkspaceGroupMember.deleted_at: now, + }, + ) + + _logger.info("federation_connection_removed", extra={"member_id": member_id}) + + team_id = helpers.safe_get(body, "team", "id") or helpers.safe_get(body, "view", "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None + if workspace_record: + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) diff --git a/syncbot/handlers/group_manage.py b/syncbot/handlers/group_manage.py new file mode 100644 index 0000000..3e8aabd --- /dev/null +++ b/syncbot/handlers/group_manage.py @@ -0,0 +1,210 @@ +"""Group management handlers — leave group with confirmation.""" + +import logging +from logging import Logger + +from slack_sdk.web import WebClient + +import builders +import helpers +from db import DbManager, schemas +from slack import actions, orm + +_logger = logging.getLogger(__name__) + + +def handle_leave_group( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Show a confirmation modal before leaving a workspace group.""" + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "leave_group"}) + return + + action_data = helpers.safe_get(body, "actions", 0) or {} + action_id: str = action_data.get("action_id", "") + group_id_str = action_id.replace(f"{actions.CONFIG_LEAVE_GROUP}_", "") + + try: + group_id = int(group_id_str) + except (TypeError, ValueError): + _logger.warning("leave_group_invalid_id", extra={"action_id": action_id}) + return + + groups = DbManager.find_records(schemas.WorkspaceGroup, [schemas.WorkspaceGroup.id == group_id]) + if not groups: + return + group = groups[0] + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + confirm_form = orm.BlockView( + blocks=[ + orm.SectionBlock( + label=( + f":warning: *Are you sure you want to leave the group \"{group.name}\"?*\n\n" + "This will:\n" + "\u2022 Stop all channel syncs you have in this group\n" + "\u2022 Remove your synced message history from this group\n" + "\u2022 Remove your user mappings for this group\n\n" + "_Other members will continue syncing uninterrupted._" + ), + ), + ] + ) + + confirm_form.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_LEAVE_GROUP_CONFIRM, + title_text="Leave Group", + submit_button_text="Leave", + close_button_text="Cancel", + parent_metadata={"group_id": group_id}, + ) + + +def handle_leave_group_confirm( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Execute group departure after confirmation. + + - Soft-deletes the membership record + - Removes this workspace's SyncChannels (and their PostMeta) for group syncs + - Leaves all affected Slack channels + - Cleans up syncs this workspace published (if all subscribers are gone) + - Removes user mappings scoped to this group + - Notifies remaining group members + """ + from handlers._common import _parse_private_metadata + + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "leave_group_confirm"}) + return + + meta = _parse_private_metadata(body) + group_id = meta.get("group_id") + if not group_id: + _logger.warning("leave_group_confirm: missing group_id in metadata") + return + + team_id = helpers.safe_get(body, "view", "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + groups = DbManager.find_records(schemas.WorkspaceGroup, [schemas.WorkspaceGroup.id == group_id]) + if not groups: + return + group = groups[0] + + members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.workspace_id == workspace_record.id, + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + if not members: + _logger.warning("leave_group_confirm: not a member", extra={"group_id": group_id}) + return + + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + _, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) + + syncs_in_group = DbManager.find_records(schemas.Sync, [schemas.Sync.group_id == group_id]) + + for sync in syncs_in_group: + my_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync.id, + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + for ch in my_channels: + DbManager.delete_records(schemas.PostMeta, [schemas.PostMeta.sync_channel_id == ch.id]) + DbManager.delete_records(schemas.SyncChannel, [schemas.SyncChannel.id == ch.id]) + try: + client.conversations_leave(channel=ch.channel_id) + except Exception as e: + _logger.warning(f"Failed to leave channel {ch.channel_id}: {e}") + + if sync.publisher_workspace_id == workspace_record.id: + remaining = DbManager.find_records( + schemas.SyncChannel, + [schemas.SyncChannel.sync_id == sync.id, schemas.SyncChannel.deleted_at.is_(None)], + ) + if not remaining: + DbManager.delete_records(schemas.Sync, [schemas.Sync.id == sync.id]) + + DbManager.delete_records( + schemas.UserMapping, + [ + schemas.UserMapping.group_id == group_id, + ( + (schemas.UserMapping.source_workspace_id == workspace_record.id) + | (schemas.UserMapping.target_workspace_id == workspace_record.id) + ), + ], + ) + + from datetime import UTC, datetime + now = datetime.now(UTC) + for m in members: + DbManager.update_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == m.id], + { + schemas.WorkspaceGroupMember.status: "inactive", + schemas.WorkspaceGroupMember.deleted_at: now, + }, + ) + + _logger.info( + "group_left", + extra={"workspace_id": workspace_record.id, "group_id": group_id, "group_name": group.name}, + ) + + remaining_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + + if not remaining_members: + DbManager.delete_records(schemas.WorkspaceGroup, [schemas.WorkspaceGroup.id == group_id]) + _logger.info("group_deleted_empty", extra={"group_id": group_id}) + else: + for m in remaining_members: + if not m.workspace_id: + continue + partner = helpers.get_workspace_by_id(m.workspace_id) + if not partner or not partner.bot_token or partner.deleted_at: + continue + try: + partner_client = WebClient(token=helpers.decrypt_bot_token(partner.bot_token)) + helpers.notify_admins_dm( + partner_client, + f":wave: *{admin_label}* left the group *{group.name}*.", + ) + builders.refresh_home_tab_for_workspace(partner, logger, context=context) + except Exception as e: + _logger.warning(f"Failed to notify group member {m.workspace_id}: {e}") + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) diff --git a/syncbot/handlers/groups.py b/syncbot/handlers/groups.py new file mode 100644 index 0000000..5e2683e --- /dev/null +++ b/syncbot/handlers/groups.py @@ -0,0 +1,775 @@ +"""Workspace group handlers — create, join, accept, cancel.""" + +import contextlib +import logging +import secrets +import string +from datetime import UTC, datetime, timedelta +from logging import Logger + +from slack_sdk.web import WebClient + +import builders +import helpers +from db import DbManager, schemas +from slack import actions, forms, orm +from slack.blocks import context as block_context, divider, section + +_logger = logging.getLogger(__name__) + +_INVITE_CODE_CHARS = string.ascii_uppercase + string.digits + + +def _generate_invite_code(length: int = 7) -> str: + """Generate a random alphanumeric invite code like ``A7X-K9M``.""" + raw = "".join(secrets.choice(_INVITE_CODE_CHARS) for _ in range(length)) + return f"{raw[:3]}-{raw[3:]}" if length >= 6 else raw + + +def _activate_group_membership( + client: WebClient, + workspace_record: "schemas.Workspace", + group: "schemas.WorkspaceGroup", +) -> None: + """Refresh user directories and seed mappings for all existing group members.""" + try: + helpers._refresh_user_directory(client, workspace_record.id) + except Exception as e: + _logger.warning(f"Failed to refresh user directory for workspace {workspace_record.id}: {e}") + + members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.workspace_id != workspace_record.id, + ], + ) + + partner_clients: list[tuple[WebClient, int]] = [] + + for member in members: + if not member.workspace_id: + continue + partner = helpers.get_workspace_by_id(member.workspace_id) + if not partner or not partner.bot_token or partner.deleted_at: + continue + + try: + partner_client = WebClient(token=helpers.decrypt_bot_token(partner.bot_token)) + helpers._refresh_user_directory(partner_client, partner.id) + partner_clients.append((partner_client, partner.id)) + except Exception as e: + _logger.warning(f"Failed to refresh user directory for workspace {partner.id}: {e}") + + try: + helpers.seed_user_mappings(workspace_record.id, partner.id, group_id=group.id) + helpers.seed_user_mappings(partner.id, workspace_record.id, group_id=group.id) + except Exception as e: + _logger.warning(f"Failed to seed user mappings: {e}") + + try: + helpers.run_auto_match_for_workspace(client, workspace_record.id) + except Exception as e: + _logger.warning(f"Auto-match failed for workspace {workspace_record.id}: {e}") + + for p_client, p_id in partner_clients: + try: + helpers.run_auto_match_for_workspace(p_client, p_id) + except Exception as e: + _logger.warning(f"Auto-match failed for partner workspace {p_id}: {e}") + + +def handle_create_group( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open a modal for naming a new workspace group.""" + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "create_group"}) + return + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + view = orm.BlockView( + blocks=[ + orm.InputBlock( + label="Group Name", + action=actions.CONFIG_CREATE_GROUP_NAME, + element=orm.PlainTextInputElement(placeholder="e.g. East Coast AOs, Partner Org..."), + optional=False, + ), + orm.ContextBlock( + element=orm.ContextElement( + initial_value="Give this group a friendly name. An invite code will be generated " + "that other workspace admins can use to join.", + ), + ), + ] + ) + + view.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_CREATE_GROUP_SUBMIT, + title_text="Create Group", + submit_button_text="Create", + ) + + +def handle_create_group_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Create the workspace group and add this workspace as the creator.""" + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "create_group_submit"}) + return + + team_id = helpers.safe_get(body, "view", "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + values = helpers.safe_get(body, "view", "state", "values") or {} + group_name = "" + for block_data in values.values(): + for action_id, action_data in block_data.items(): + if action_id == actions.CONFIG_CREATE_GROUP_NAME: + group_name = (action_data.get("value") or "").strip() + + if not group_name: + _logger.warning("create_group_submit: empty group name") + return + + if len(group_name) > 100: + group_name = group_name[:100] + + code = _generate_invite_code() + now = datetime.now(UTC) + + group = schemas.WorkspaceGroup( + name=group_name, + invite_code=code, + status="active", + created_at=now, + created_by_workspace_id=workspace_record.id, + ) + DbManager.create_record(group) + + member = schemas.WorkspaceGroupMember( + group_id=group.id, + workspace_id=workspace_record.id, + status="active", + role="creator", + joined_at=now, + ) + DbManager.create_record(member) + + _logger.info( + "group_created", + extra={ + "workspace_id": workspace_record.id, + "group_id": group.id, + "group_name": group_name, + "invite_code": code, + }, + ) + + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + if acting_user_id: + try: + dm = client.conversations_open(users=[acting_user_id]) + dm_channel = helpers.safe_get(dm, "channel", "id") + if dm_channel: + client.chat_postMessage( + channel=dm_channel, + text=f":white_check_mark: *Group Created* — *{group_name}*\n\n" + f"Share this invite code with admins of other workspaces:\n\n`{code}`", + ) + except Exception as e: + _logger.warning(f"Failed to DM invite code: {e}") + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +def handle_join_group( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open a modal for entering a group invite code.""" + import copy + + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "join_group"}) + return + + trigger_id = helpers.safe_get(body, "trigger_id") + enter_form = copy.deepcopy(forms.ENTER_GROUP_CODE_FORM) + enter_form.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_JOIN_GROUP_SUBMIT, + title_text="Join Group", + new_or_add="new", + ) + + +def handle_join_group_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Validate an invite code and join the workspace group.""" + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "join_group_submit"}) + return + + team_id = helpers.safe_get(body, "view", "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + form_data = forms.ENTER_GROUP_CODE_FORM.get_selected_values(body) + raw_code = (helpers.safe_get(form_data, actions.CONFIG_JOIN_GROUP_CODE) or "").strip().upper() + + if "-" not in raw_code and len(raw_code) >= 6: + raw_code = f"{raw_code[:3]}-{raw_code[3:]}" + + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + + rate_key = f"group_join_attempts:{workspace_record.id}" + attempts = helpers._cache_get(rate_key) or 0 + if attempts >= 5: + _logger.warning("group_join_rate_limited", extra={"workspace_id": workspace_record.id}) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + return + + groups = DbManager.find_records( + schemas.WorkspaceGroup, + [ + schemas.WorkspaceGroup.invite_code == raw_code, + schemas.WorkspaceGroup.status == "active", + ], + ) + + if not groups: + helpers._cache_set(rate_key, attempts + 1, ttl=900) + _logger.warning("group_code_invalid", extra={"code": raw_code}) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + return + + group = groups[0] + + if group.created_by_workspace_id == workspace_record.id: + _logger.warning("group_self_join", extra={"workspace_id": workspace_record.id}) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + return + + existing = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group.id, + schemas.WorkspaceGroupMember.workspace_id == workspace_record.id, + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + if existing: + _logger.info("group_already_member", extra={"workspace_id": workspace_record.id, "group_id": group.id}) + builders.build_home_tab(body, client, logger, context, user_id=acting_user_id) + return + + now = datetime.now(UTC) + member = schemas.WorkspaceGroupMember( + group_id=group.id, + workspace_id=workspace_record.id, + status="active", + role="member", + joined_at=now, + ) + DbManager.create_record(member) + + _logger.info( + "group_joined", + extra={ + "workspace_id": workspace_record.id, + "group_id": group.id, + "group_name": group.name, + }, + ) + + _activate_group_membership(client, workspace_record, group) + + ws_name = helpers.resolve_workspace_name(workspace_record) + _, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) + + other_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.workspace_id != workspace_record.id, + ], + ) + for m in other_members: + if not m.workspace_id: + continue + partner = helpers.get_workspace_by_id(m.workspace_id) + if not partner or not partner.bot_token or partner.deleted_at: + continue + try: + partner_client = WebClient(token=helpers.decrypt_bot_token(partner.bot_token)) + helpers.notify_admins_dm( + partner_client, + f":handshake: *{admin_label}* joined the group *{group.name}*.", + ) + builders.refresh_home_tab_for_workspace(partner, logger, context=context) + except Exception as e: + _logger.warning(f"Failed to notify group member {m.workspace_id}: {e}") + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +# --------------------------------------------------------------------------- +# Invite workspace to group +# --------------------------------------------------------------------------- + + +def handle_invite_workspace( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open a modal for inviting a workspace to a group.""" + import constants + + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + return + + trigger_id = helpers.safe_get(body, "trigger_id") + raw_group_id = helpers.safe_get(body, "actions", 0, "value") + try: + group_id = int(raw_group_id) + except (TypeError, ValueError): + _logger.warning(f"invite_workspace: invalid group_id: {raw_group_id!r}") + return + + group = DbManager.get_record(schemas.WorkspaceGroup, id=group_id) + if not group: + return + + current_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.group_id == group_id], + ) + member_ws_ids = {m.workspace_id for m in current_members if m.workspace_id} + + all_workspaces = DbManager.find_records( + schemas.Workspace, + [schemas.Workspace.deleted_at.is_(None)], + ) + eligible = [ws for ws in all_workspaces if ws.id not in member_ws_ids and ws.bot_token] + + modal_blocks: list = [] + + if eligible: + ws_options = [ + orm.SelectorOption( + name=helpers.resolve_workspace_name(ws), + value=str(ws.id), + ) + for ws in eligible + ] + modal_blocks.append( + orm.InputBlock( + label="Send a direct invite", + action=actions.CONFIG_INVITE_WORKSPACE_SELECT, + element=orm.StaticSelectElement( + placeholder="Select a workspace", + options=ws_options, + ), + optional=True, + ) + ) + modal_blocks.append( + block_context( + "A DM will be sent to the workspace's admins " + "with an invitation to join this group.", + ) + ) + + modal_blocks.append(divider()) + modal_blocks.append(section(":memo: *Invite Code*")) + modal_blocks.append( + block_context( + "Share this code with an admin from another workspace:" + f"\n\n`{group.invite_code}`" + ) + ) + + if constants.FEDERATION_ENABLED: + modal_blocks.append(divider()) + modal_blocks.append(section(":globe_with_meridians: *External Workspace*")) + modal_blocks.append( + block_context( + "For workspaces running their own SyncBot instance, " + f"share this code for them to join externally:\n\n`{group.invite_code}`" + ) + ) + + submit_text = "Send Invite" if eligible else "None" + view = orm.BlockView(blocks=modal_blocks) + view.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_INVITE_WORKSPACE_SUBMIT, + title_text="Invite Workspace", + submit_button_text=submit_text, + parent_metadata={"group_id": group_id}, + new_or_add="new", + ) + + +def handle_invite_workspace_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Send a DM invite to admins of the selected workspace.""" + import json as _json + + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + return + + team_id = helpers.safe_get(body, "view", "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + metadata = helpers.safe_get(body, "view", "private_metadata") + try: + meta = _json.loads(metadata) if metadata else {} + except (ValueError, TypeError): + meta = {} + group_id = meta.get("group_id") + if not group_id: + return + + group = DbManager.get_record(schemas.WorkspaceGroup, id=group_id) + if not group: + return + + values = helpers.safe_get(body, "view", "state", "values") or {} + selected_ws_id = None + for block_data in values.values(): + for action_id, action_data in block_data.items(): + if action_id == actions.CONFIG_INVITE_WORKSPACE_SELECT: + sel = action_data.get("selected_option") + if sel: + selected_ws_id = sel.get("value") + + if not selected_ws_id: + return + + try: + target_ws_id = int(selected_ws_id) + except (TypeError, ValueError): + return + + target_ws = helpers.get_workspace_by_id(target_ws_id) + if not target_ws or not target_ws.bot_token or target_ws.deleted_at: + _logger.warning(f"invite_workspace_submit: target workspace {target_ws_id} not available") + return + + existing = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.workspace_id == target_ws_id, + ], + ) + if existing: + _logger.info(f"invite_workspace_submit: workspace {target_ws_id} already in group {group_id}") + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + return + + member = schemas.WorkspaceGroupMember( + group_id=group_id, + workspace_id=target_ws_id, + status="pending", + role="member", + joined_at=None, + ) + DbManager.create_record(member) + + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + _, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) + + target_client = WebClient(token=helpers.decrypt_bot_token(target_ws.bot_token)) + + invite_blocks = [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": f":handshake: *{admin_label}* has invited your workspace " + f"to join the group *{group.name}*.", + }, + }, + { + "type": "actions", + "elements": [ + { + "type": "button", + "text": {"type": "plain_text", "text": "Accept"}, + "style": "primary", + "action_id": f"{actions.CONFIG_ACCEPT_GROUP_REQUEST}_{member.id}", + "value": str(member.id), + }, + { + "type": "button", + "text": {"type": "plain_text", "text": "Decline"}, + "style": "danger", + "action_id": f"{actions.CONFIG_DECLINE_GROUP_REQUEST}_{member.id}", + "value": str(member.id), + }, + ], + }, + ] + + dm_entries = helpers.notify_admins_dm_blocks( + target_client, + f"{admin_label} has invited your workspace to join the group {group.name}.", + invite_blocks, + ) + helpers.save_dm_messages_to_group_member(member.id, dm_entries) + + _logger.info( + "group_invite_sent", + extra={ + "group_id": group_id, + "target_workspace_id": target_ws_id, + "member_id": member.id, + }, + ) + + builders.refresh_home_tab_for_workspace(target_ws, logger, context=context) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +# --------------------------------------------------------------------------- +# Accept / Decline group invite +# --------------------------------------------------------------------------- + + +def handle_accept_group_invite( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Accept a pending group invite from a DM button.""" + raw_member_id = helpers.safe_get(body, "actions", 0, "value") + try: + member_id = int(raw_member_id) + except (TypeError, ValueError): + _logger.warning(f"accept_group_invite: invalid member_id: {raw_member_id!r}") + return + + member = DbManager.get_record(schemas.WorkspaceGroupMember, id=member_id) + if not member or member.status != "pending": + _logger.info(f"accept_group_invite: member {member_id} not pending") + return + + group = DbManager.get_record(schemas.WorkspaceGroup, id=member.group_id) + if not group: + return + + workspace_record = helpers.get_workspace_by_id(member.workspace_id) + if not workspace_record: + return + + now = datetime.now(UTC) + DbManager.update_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == member_id], + { + schemas.WorkspaceGroupMember.status: "active", + schemas.WorkspaceGroupMember.joined_at: now, + }, + ) + + _activate_group_membership(client, workspace_record, group) + + _update_invite_dms( + member, + workspace_record, + f":white_check_mark: Your workspace has joined the group *{group.name}*.", + ) + + other_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.workspace_id != workspace_record.id, + ], + ) + ws_name = helpers.resolve_workspace_name(workspace_record) + for m in other_members: + if not m.workspace_id: + continue + partner = helpers.get_workspace_by_id(m.workspace_id) + if not partner or not partner.bot_token or partner.deleted_at: + continue + try: + partner_client = WebClient(token=helpers.decrypt_bot_token(partner.bot_token)) + helpers.notify_admins_dm( + partner_client, + f":handshake: *{ws_name}* has joined the group *{group.name}*.", + ) + builders.refresh_home_tab_for_workspace(partner, logger, context=context) + except Exception as e: + _logger.warning(f"Failed to notify group member {m.workspace_id}: {e}") + + _logger.info( + "group_invite_accepted", + extra={ + "member_id": member_id, + "group_id": group.id, + "workspace_id": workspace_record.id, + }, + ) + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +def handle_decline_group_invite( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Decline a pending group invite from a DM button.""" + raw_member_id = helpers.safe_get(body, "actions", 0, "value") + try: + member_id = int(raw_member_id) + except (TypeError, ValueError): + _logger.warning(f"decline_group_invite: invalid member_id: {raw_member_id!r}") + return + + member = DbManager.get_record(schemas.WorkspaceGroupMember, id=member_id) + if not member or member.status != "pending": + _logger.info(f"decline_group_invite: member {member_id} not pending") + return + + group = DbManager.get_record(schemas.WorkspaceGroup, id=member.group_id) + group_name = group.name if group else "the group" + + target_ws = helpers.get_workspace_by_id(member.workspace_id) if member.workspace_id else None + + _update_invite_dms( + member, + target_ws, + f":x: The invitation to join *{group_name}* was declined.", + ) + + group_id = member.group_id + + DbManager.delete_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == member_id], + ) + + _logger.info( + "group_invite_declined", + extra={"member_id": member_id, "group_id": group_id}, + ) + + all_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + for m in all_members: + if not m.workspace_id: + continue + partner = helpers.get_workspace_by_id(m.workspace_id) + if not partner or not partner.bot_token or partner.deleted_at: + continue + with contextlib.suppress(Exception): + builders.refresh_home_tab_for_workspace(partner, logger, context=context) + + +def _update_invite_dms( + member: schemas.WorkspaceGroupMember, + workspace: schemas.Workspace | None, + new_text: str, +) -> None: + """Replace the original invite DM content with an updated message so the invite + is removed and replaced by the success message (e.g. workspace joined the group). + """ + import json as _json + + if not member.dm_messages: + _logger.debug("_update_invite_dms: no dm_messages on member %s", member.id) + return + if not workspace or not workspace.bot_token: + return + + try: + entries = _json.loads(member.dm_messages) + except (ValueError, TypeError): + _logger.warning("_update_invite_dms: invalid dm_messages JSON for member %s", member.id) + return + + if not entries: + return + + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + blocks = [{"type": "section", "text": {"type": "mrkdwn", "text": new_text}}] + for entry in entries: + ch = entry.get("channel") + ts = entry.get("ts") + if not ch or ts is None: + continue + ts_str = str(ts).strip() + if not ts_str: + continue + try: + ws_client.chat_update( + channel=ch, + ts=ts_str, + text=new_text, + blocks=blocks, + ) + except Exception as e: + _logger.warning( + "_update_invite_dms: failed to update DM channel=%s ts=%s: %s", + ch, + ts_str, + e, + ) diff --git a/syncbot/handlers/messages.py b/syncbot/handlers/messages.py new file mode 100644 index 0000000..4195ddf --- /dev/null +++ b/syncbot/handlers/messages.py @@ -0,0 +1,704 @@ +"""Message sync handlers — new posts, replies, edits, deletes, reactions.""" + +import logging +import uuid +from logging import Logger + +from slack_sdk.web import WebClient + +import constants +import federation +import helpers +from db import DbManager, schemas +from handlers._common import EventContext +from logger import emit_metric +from slack import orm + + +def _find_source_workspace_id(records: list[tuple], channel_id: str, ws_index: int = 1) -> int | None: + """Return the workspace ID from the record whose channel matches *channel_id*.""" + for rec in records: + sc = rec[ws_index - 1] if ws_index > 1 else rec[0] + ws = rec[ws_index] + if sc.channel_id == channel_id: + return ws.id + return None + +_logger = logging.getLogger(__name__) + + +def _parse_event_fields(body: dict, client: WebClient) -> EventContext: + """Extract the common fields every message handler needs.""" + event: dict = body.get("event", {}) + msg_text: str = helpers.safe_get(event, "text") or helpers.safe_get(event, "message", "text") + msg_text = msg_text if msg_text else " " + + return EventContext( + team_id=helpers.safe_get(body, "team_id"), + channel_id=helpers.safe_get(event, "channel"), + user_id=(helpers.safe_get(event, "user") or helpers.safe_get(event, "message", "user")), + msg_text=msg_text, + mentioned_users=helpers.parse_mentioned_users(msg_text, client), + thread_ts=helpers.safe_get(event, "thread_ts"), + ts=( + helpers.safe_get(event, "message", "ts") + or helpers.safe_get(event, "previous_message", "ts") + or helpers.safe_get(event, "ts") + ), + event_subtype=helpers.safe_get(event, "subtype"), + ) + + +def _build_file_context(body: dict, client: WebClient, logger: Logger) -> tuple[list[dict], list[dict], list[dict]]: + """Process files attached to a message event. + + Returns ``(s3_photo_list, photo_blocks, direct_files)`` where: + + * *s3_photo_list* — files uploaded to S3 (have a ``path`` key for + cleanup after syncing). + * *photo_blocks* — Slack Block Kit ``image`` blocks for S3-hosted + images, ready to include in ``chat.postMessage``. + * *direct_files* — files downloaded to ``/tmp`` that should be + uploaded directly to each target channel via + ``files_upload_v2``. + """ + event = body.get("event", {}) + files = (helpers.safe_get(event, "files") or helpers.safe_get(event, "message", "files") or [])[:20] + event_subtype = helpers.safe_get(event, "subtype") + + images = [f for f in files if f.get("mimetype", "").startswith("image")] + videos = [f for f in files if f.get("mimetype", "").startswith("video")] + + s3_photo_list: list[dict] = [] + photo_blocks: list[dict] = [] + direct_files: list[dict] = [] + + is_edit = event_subtype in ("message_changed", "message_deleted") + + if constants.S3_ENABLED: + if is_edit: + photo_names = [ + f"{p['id']}.png" if p.get("filetype") == "heic" else f"{p['id']}.{p.get('filetype', 'png')}" + for p in images + ] + s3_photo_list = [{"url": f"{constants.S3_IMAGE_URL}{name}", "name": name} for name in photo_names] + else: + s3_photo_list = helpers.upload_photos(files=images, client=client, logger=logger) + + photo_blocks = [orm.ImageBlock(image_url=p["url"], alt_text=p["name"]).as_form_field() for p in s3_photo_list] + + if constants.S3_VIDEO_ENABLED and not is_edit: + s3_photo_list.extend(helpers.upload_photos(files=videos, client=client, logger=logger)) + elif not is_edit: + direct_files.extend(helpers.download_slack_files(videos, client, logger)) + else: + if not is_edit: + direct_files = helpers.download_slack_files(images + videos, client, logger) + + # Handle GIFs/images from attachments (e.g. GIPHY bot, Slack GIF picker, + # unfurled URLs) when no file attachments are present. We always use + # image blocks for these since the URLs are publicly accessible — this + # avoids a download/re-upload round-trip and gives us a proper message + # ts for PostMeta so reactions work correctly. + if not files and not is_edit: + attachments = event.get("attachments") or helpers.safe_get(event, "message", "attachments") or [] + for att in attachments: + img_url = att.get("image_url") or att.get("thumb_url") + + # Slack's built-in GIF picker nests the image inside blocks + if not img_url: + for blk in att.get("blocks") or []: + if blk.get("type") == "image" and blk.get("image_url"): + img_url = blk["image_url"] + break + + # Also check top-level event blocks for image blocks + if not img_url: + for blk in event.get("blocks") or []: + if blk.get("type") == "image" and blk.get("image_url"): + img_url = blk["image_url"] + break + + if not img_url: + _logger.info( + "attachment_no_image_url", extra={"att_keys": list(att.keys()), "fallback": att.get("fallback")} + ) + continue + + name = att.get("fallback") or "attachment.gif" + photo_blocks.append(orm.ImageBlock(image_url=img_url, alt_text=name).as_form_field()) + + return s3_photo_list, photo_blocks, direct_files + + +def _get_workspace_name(records: list, channel_id: str, workspace_index: int) -> str | None: + """Pull the workspace name for the originating channel from a record list.""" + return helpers.safe_get( + [r[workspace_index].workspace_name for r in records if r[workspace_index - 1].channel_id == channel_id], + 0, + ) + + +def _handle_new_post( + body: dict, + client: WebClient, + logger: Logger, + ctx: EventContext, + photo_list: list[dict], + photo_blocks: list[dict], + direct_files: list[dict] | None = None, +) -> None: + """Sync a brand-new top-level message to all linked channels.""" + team_id = ctx["team_id"] + channel_id = ctx["channel_id"] + msg_text = ctx["msg_text"] + mentioned_users = ctx["mentioned_users"] + user_id = ctx["user_id"] + + sync_records = helpers.get_sync_list(team_id, channel_id) + if not sync_records: + if user_id: + try: + client.chat_postMessage( + channel=channel_id, + text=":wave: Hello! I'm SyncBot. I was added to this channel, but this channel " + "doesn't seem to be part of a Sync. I'm leaving now. Please open the SyncBot Home " + "tab to configure me.", + ) + client.conversations_leave(channel=channel_id) + except Exception as e: + logger.error(f"Failed to notify and leave unconfigured channel {channel_id}: {e}") + return + + if user_id: + user_name, user_profile_url = helpers.get_user_info(client, user_id) + else: + user_name, user_profile_url = helpers.get_bot_info_from_event(body) + + workspace_name = _get_workspace_name(sync_records, channel_id, workspace_index=1) + posted_from = f"({workspace_name})" if workspace_name else "(via SyncBot)" + + post_uuid = uuid.uuid4().hex + post_list: list[schemas.PostMeta] = [] + + source_workspace_id = _find_source_workspace_id(sync_records, channel_id) + + fed_ws = None + if sync_records: + fed_ws = helpers.get_federated_workspace_for_sync(sync_records[0][0].sync_id) + + for sync_channel, workspace in sync_records: + try: + if sync_channel.channel_id == channel_id: + ts = helpers.safe_get(body, "event", "ts") + elif fed_ws and workspace.id != source_workspace_id: + image_payloads = [] + for block in photo_blocks or []: + if block.get("type") == "image": + image_payloads.append( + { + "url": block.get("image_url", ""), + "alt_text": block.get("alt_text", "Shared image"), + } + ) + payload = federation.build_message_payload( + sync_id=sync_channel.sync_id, + post_id=post_uuid, + channel_id=sync_channel.channel_id, + user_name=user_name, + user_avatar_url=user_profile_url, + workspace_name=workspace_name, + text=msg_text, + images=image_payloads, + timestamp=helpers.safe_get(body, "event", "ts"), + ) + result = federation.push_message(fed_ws, payload) + ts = helpers.safe_get(result, "ts") if result else helpers.safe_get(body, "event", "ts") + if not ts: + ts = helpers.safe_get(body, "event", "ts") + else: + bot_token = helpers.decrypt_bot_token(workspace.bot_token) + target_client = WebClient(token=bot_token) + adapted_text = helpers.apply_mentioned_users( + msg_text, + client, + target_client, + mentioned_users, + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + ) + source_ws = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + adapted_text = helpers.resolve_channel_references(adapted_text, client, source_ws) + + target_display_name, target_icon_url = helpers.get_display_name_and_icon_for_synced_message( + user_id or "", + source_workspace_id or 0, + user_name, + user_profile_url, + target_client, + workspace.id, + ) + name_for_target = target_display_name or user_name or "Someone" + + if direct_files and not msg_text.strip(): + _, file_ts = helpers.upload_files_to_slack( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + files=direct_files, + initial_comment=f"Shared by {name_for_target} {posted_from}", + ) + ts = file_ts or helpers.safe_get(body, "event", "ts") + else: + res = helpers.post_message( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + msg_text=adapted_text, + user_name=name_for_target, + user_profile_url=target_icon_url or user_profile_url, + workspace_name=workspace_name, + blocks=photo_blocks, + ) + ts = helpers.safe_get(res, "ts") or helpers.safe_get(body, "event", "ts") + + if direct_files: + helpers.upload_files_to_slack( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + files=direct_files, + thread_ts=ts, + ) + + if ts: + post_list.append(schemas.PostMeta(post_id=post_uuid, sync_channel_id=sync_channel.id, ts=float(ts))) + except Exception as exc: + _logger.error(f"Failed to sync new post to channel {sync_channel.channel_id}: {exc}") + + synced = len(post_list) + failed = len(sync_records) - synced + emit_metric("messages_synced", value=synced, sync_type="new_post") + if failed: + emit_metric("sync_failures", value=failed, sync_type="new_post") + + helpers.cleanup_temp_files(photo_list, direct_files) + + if post_list: + DbManager.create_records(post_list) + + +def _handle_thread_reply( + body: dict, + client: WebClient, + logger: Logger, + ctx: EventContext, + photo_blocks: list[dict], + direct_files: list[dict] | None = None, +) -> None: + """Sync a threaded reply to all linked channels.""" + channel_id = ctx["channel_id"] + msg_text = ctx["msg_text"] + mentioned_users = ctx["mentioned_users"] + user_id = ctx["user_id"] + thread_ts = ctx["thread_ts"] + + post_records = helpers.get_post_records(thread_ts) + if not post_records: + return + + workspace_name = _get_workspace_name(post_records, channel_id, workspace_index=2) + posted_from = f"({workspace_name})" if workspace_name else "(via SyncBot)" + + if user_id: + user_name, user_profile_url = helpers.get_user_info(client, user_id) + else: + user_name, user_profile_url = helpers.get_bot_info_from_event(body) + + post_uuid = uuid.uuid4().hex + post_list: list[schemas.PostMeta] = [] + + source_workspace_id = _find_source_workspace_id(post_records, channel_id, ws_index=2) + + fed_ws = None + if post_records: + fed_ws = helpers.get_federated_workspace_for_sync(post_records[0][1].sync_id) + + thread_post_id = post_records[0][0].post_id if post_records else None + + for post_meta, sync_channel, workspace in post_records: + try: + if sync_channel.channel_id == channel_id: + ts = helpers.safe_get(body, "event", "ts") + elif fed_ws and workspace.id != source_workspace_id: + payload = federation.build_message_payload( + sync_id=sync_channel.sync_id, + post_id=post_uuid, + channel_id=sync_channel.channel_id, + user_name=user_name, + user_avatar_url=user_profile_url, + workspace_name=workspace_name, + text=msg_text, + thread_post_id=str(thread_post_id) if thread_post_id else None, + timestamp=helpers.safe_get(body, "event", "ts"), + ) + result = federation.push_message(fed_ws, payload) + ts = helpers.safe_get(result, "ts") if result else helpers.safe_get(body, "event", "ts") + if not ts: + ts = helpers.safe_get(body, "event", "ts") + else: + bot_token = helpers.decrypt_bot_token(workspace.bot_token) + target_client = WebClient(token=bot_token) + adapted_text = helpers.apply_mentioned_users( + msg_text, + client, + target_client, + mentioned_users, + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + ) + source_ws = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + adapted_text = helpers.resolve_channel_references(adapted_text, client, source_ws) + parent_ts = f"{post_meta.ts:.6f}" + + target_display_name, target_icon_url = helpers.get_display_name_and_icon_for_synced_message( + user_id or "", + source_workspace_id or 0, + user_name, + user_profile_url, + target_client, + workspace.id, + ) + name_for_target = target_display_name or user_name or "Someone" + + if direct_files and not msg_text.strip(): + _, file_ts = helpers.upload_files_to_slack( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + files=direct_files, + initial_comment=f"Shared by {name_for_target} {posted_from}", + thread_ts=parent_ts, + ) + ts = file_ts or helpers.safe_get(body, "event", "ts") + else: + res = helpers.post_message( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + msg_text=adapted_text, + user_name=name_for_target, + user_profile_url=target_icon_url or user_profile_url, + thread_ts=parent_ts, + workspace_name=workspace_name, + blocks=photo_blocks, + ) + ts = helpers.safe_get(res, "ts") + + if direct_files: + helpers.upload_files_to_slack( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + files=direct_files, + thread_ts=parent_ts, + ) + + if ts: + post_list.append(schemas.PostMeta(post_id=post_uuid, sync_channel_id=sync_channel.id, ts=float(ts))) + except Exception as exc: + _logger.error(f"Failed to sync thread reply to channel {sync_channel.channel_id}: {exc}") + + synced = len(post_list) + failed = len(post_records) - synced + emit_metric("messages_synced", value=synced, sync_type="thread_reply") + if failed: + emit_metric("sync_failures", value=failed, sync_type="thread_reply") + + helpers.cleanup_temp_files(None, direct_files) + + if post_list: + DbManager.create_records(post_list) + + +def _handle_message_edit( + client: WebClient, + logger: Logger, + ctx: EventContext, + photo_blocks: list[dict], +) -> None: + """Propagate an edited message to all linked channels.""" + channel_id = ctx["channel_id"] + msg_text = ctx["msg_text"] + mentioned_users = ctx["mentioned_users"] + ts = ctx["ts"] + + post_records = helpers.get_post_records(ts) + if not post_records: + return + + workspace_name = _get_workspace_name(post_records, channel_id, workspace_index=2) + + source_workspace_id = _find_source_workspace_id(post_records, channel_id, ws_index=2) + + fed_ws = None + if post_records: + fed_ws = helpers.get_federated_workspace_for_sync(post_records[0][1].sync_id) + + synced = 0 + failed = 0 + for post_meta, sync_channel, workspace in post_records: + if sync_channel.channel_id == channel_id: + continue + try: + if fed_ws and workspace.id != source_workspace_id: + payload = federation.build_edit_payload( + post_id=post_meta.post_id.hex() if isinstance(post_meta.post_id, bytes) else str(post_meta.post_id), + channel_id=sync_channel.channel_id, + text=msg_text, + timestamp=f"{post_meta.ts:.6f}", + ) + federation.push_edit(fed_ws, payload) + else: + bot_token = helpers.decrypt_bot_token(workspace.bot_token) + target_client = WebClient(token=bot_token) + adapted_text = helpers.apply_mentioned_users( + msg_text, + client, + target_client, + mentioned_users, + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + ) + source_ws = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + adapted_text = helpers.resolve_channel_references(adapted_text, client, source_ws) + helpers.post_message( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + msg_text=adapted_text, + update_ts=f"{post_meta.ts:.6f}", + workspace_name=workspace_name, + blocks=photo_blocks, + ) + synced += 1 + except Exception as exc: + failed += 1 + _logger.error(f"Failed to sync message edit to channel {sync_channel.channel_id}: {exc}") + + emit_metric("messages_synced", value=synced, sync_type="message_edit") + if failed: + emit_metric("sync_failures", value=failed, sync_type="message_edit") + + +def _handle_message_delete( + ctx: EventContext, + logger: Logger, +) -> None: + """Propagate a deleted message to all linked channels.""" + channel_id = ctx["channel_id"] + ts = ctx["ts"] + + post_records = helpers.get_post_records(ts) + if not post_records: + return + + fed_ws = None + if post_records: + fed_ws = helpers.get_federated_workspace_for_sync(post_records[0][1].sync_id) + + source_workspace_id = _find_source_workspace_id(post_records, channel_id, ws_index=2) + + synced = 0 + failed = 0 + for post_meta, sync_channel, workspace in post_records: + if sync_channel.channel_id == channel_id: + continue + try: + if fed_ws and workspace.id != source_workspace_id: + payload = federation.build_delete_payload( + post_id=post_meta.post_id.hex() if isinstance(post_meta.post_id, bytes) else str(post_meta.post_id), + channel_id=sync_channel.channel_id, + timestamp=f"{post_meta.ts:.6f}", + ) + federation.push_delete(fed_ws, payload) + else: + helpers.delete_message( + bot_token=helpers.decrypt_bot_token(workspace.bot_token), + channel_id=sync_channel.channel_id, + ts=f"{post_meta.ts:.6f}", + ) + synced += 1 + except Exception as exc: + failed += 1 + _logger.error(f"Failed to sync message delete to channel {sync_channel.channel_id}: {exc}") + + emit_metric("messages_synced", value=synced, sync_type="message_delete") + if failed: + emit_metric("sync_failures", value=failed, sync_type="message_delete") + + +def _handle_reaction( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Sync a reaction to all linked channels as a threaded message. + + Posts a short message (e.g. "reacted with :thumbsup: to ") using + the same Display Name (Workspace Name) impersonation used for synced + messages. The message is always threaded under the top-level synced + message, with a permalink to the exact message that was reacted to. + Only ``reaction_added`` events are synced. + """ + event = body.get("event", {}) + reaction = event.get("reaction") + user_id = event.get("user") + item = event.get("item", {}) + item_type = item.get("type") + channel_id = item.get("channel") + msg_ts = item.get("ts") + event_type = event.get("type") + + if event_type != "reaction_added": + return + + if not reaction or not channel_id or not msg_ts or item_type != "message": + return + + own_user_id = helpers.get_own_bot_user_id(client) + if own_user_id and user_id == own_user_id: + return + + reacted_records = helpers.get_post_records(msg_ts) + if not reacted_records: + _logger.info( + "reaction_no_post_meta", + extra={"msg_ts": msg_ts, "channel_id": channel_id, "float_ts": float(msg_ts)}, + ) + return + + fed_ws = helpers.get_federated_workspace_for_sync(reacted_records[0][1].sync_id) + + source_workspace_id = _find_source_workspace_id(reacted_records, channel_id, ws_index=2) + + user_name, user_profile_url = helpers.get_user_info(client, user_id) if user_id else (None, None) + source_ws = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + ws_name = helpers.resolve_workspace_name(source_ws) if source_ws else None + posted_from = f"({ws_name})" if ws_name else "(via SyncBot)" + + synced = 0 + failed = 0 + for post_meta, sync_channel, workspace in reacted_records: + if sync_channel.channel_id == channel_id: + continue + try: + if fed_ws and workspace.id != source_workspace_id: + payload = federation.build_reaction_payload( + post_id=str(post_meta.post_id), + channel_id=sync_channel.channel_id, + reaction=reaction, + action="add", + timestamp=f"{post_meta.ts:.6f}", + ) + federation.push_reaction(fed_ws, payload) + else: + target_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + target_msg_ts = f"{post_meta.ts:.6f}" + + target_display_name, target_icon_url = helpers.get_display_name_and_icon_for_synced_message( + user_id or "", + source_workspace_id or 0, + user_name, + user_profile_url, + target_client, + workspace.id, + ) + display_name = target_display_name or user_name or user_id or "Someone" + + permalink = None + is_thread_reply = False + try: + plink_resp = target_client.chat_getPermalink( + channel=sync_channel.channel_id, + message_ts=target_msg_ts, + ) + permalink = helpers.safe_get(plink_resp, "permalink") + is_thread_reply = permalink and "thread_ts=" in permalink + except Exception: + pass + + if is_thread_reply and permalink: + msg_text = f"reacted with :{reaction}: to <{permalink}|this message>" + else: + msg_text = f"reacted with :{reaction}:" + + target_client.chat_postMessage( + channel=sync_channel.channel_id, + text=msg_text, + username=f"{display_name} {posted_from}", + icon_url=target_icon_url or user_profile_url, + thread_ts=target_msg_ts, + unfurl_links=False, + unfurl_media=False, + ) + synced += 1 + except Exception as exc: + failed += 1 + _logger.error(f"Failed to sync reaction to channel {sync_channel.channel_id}: {exc}") + + emit_metric("messages_synced", value=synced, sync_type="reaction_add") + if failed: + emit_metric("sync_failures", value=failed, sync_type="reaction_add") + + +def _is_own_bot_message(body: dict, client: WebClient, context: dict) -> bool: + """Return *True* if the event was generated by SyncBot itself. + + Compares the ``bot_id`` in the event payload against SyncBot's own + bot ID. This replaces the old blanket ``bot_message`` filter so + that messages from *other* bots are synced normally while SyncBot's + own re-posts are still ignored (preventing infinite loops). + """ + event = body.get("event", {}) + event_bot_id = ( + event.get("bot_id") + or helpers.safe_get(event, "message", "bot_id") + or helpers.safe_get(event, "previous_message", "bot_id") + ) + if not event_bot_id: + return False + + own_bot_id = helpers.get_own_bot_id(client, context) + return event_bot_id == own_bot_id + + +def respond_to_message_event( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Dispatch incoming message events to the appropriate sub-handler.""" + ctx = _parse_event_fields(body, client) + event_type = helpers.safe_get(body, "event", "type") + event_subtype = ctx["event_subtype"] + + if event_type != "message": + return + + # Skip messages from SyncBot itself to prevent infinite sync loops. + # Messages from OTHER bots are synced normally. + if _is_own_bot_message(body, client, context): + return + + s3_photo_list, photo_blocks, direct_files = _build_file_context(body, client, logger) + + has_files = bool(photo_blocks or direct_files) + if ( + (not event_subtype) + or event_subtype == "bot_message" + or (event_subtype == "file_share" and (ctx["msg_text"] != "" or has_files)) + ): + if not ctx["thread_ts"]: + _handle_new_post(body, client, logger, ctx, s3_photo_list, photo_blocks, direct_files) + else: + _handle_thread_reply(body, client, logger, ctx, photo_blocks, direct_files) + elif event_subtype == "message_changed": + _handle_message_edit(client, logger, ctx, photo_blocks) + elif event_subtype == "message_deleted": + _handle_message_delete(ctx, logger) diff --git a/syncbot/handlers/sync.py b/syncbot/handlers/sync.py new file mode 100644 index 0000000..5c11909 --- /dev/null +++ b/syncbot/handlers/sync.py @@ -0,0 +1,401 @@ +"""Sync management handlers — create, join, remove syncs and Home tab.""" + +import logging +import time +from datetime import UTC, datetime +from logging import Logger + +from slack_sdk.web import WebClient + +import builders +import constants +import helpers +from db import DbManager, schemas +from handlers._common import _sanitize_text +from slack import actions, forms, orm + +_logger = logging.getLogger(__name__) + + +def handle_remove_sync( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +): + """Handles the "DeSync" button action by removing the SyncChannel record from the database. + + Requires admin/owner authorization (defense-in-depth). + """ + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "remove_sync"}) + return + + raw_value = helpers.safe_get(body, "actions", 0, "value") + try: + sync_channel_id = int(raw_value) + except (TypeError, ValueError): + _logger.warning(f"Invalid sync_channel_id value: {raw_value!r}") + return + + sync_channel_record = DbManager.get_record(schemas.SyncChannel, id=sync_channel_id) + if not sync_channel_record: + return + + team_id = helpers.safe_get(body, "team_id") + workspace_record = DbManager.get_record(schemas.Workspace, team_id=team_id) if team_id else None + if not workspace_record or sync_channel_record.workspace_id != workspace_record.id: + _logger.warning( + "ownership_denied", + extra={"sync_channel_id": sync_channel_id, "team_id": team_id, "action": "remove_sync"}, + ) + return + + DbManager.update_records( + schemas.SyncChannel, + [schemas.SyncChannel.id == sync_channel_id], + {schemas.SyncChannel.deleted_at: datetime.now(UTC)}, + ) + try: + client.conversations_leave(channel=sync_channel_record.channel_id) + except Exception as e: + logger.warning(f"Failed to leave channel {sync_channel_record.channel_id}: {e}") + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + partner_chs = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync_channel_record.sync_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.workspace_id != workspace_record.id, + ], + ) + for p_ch in partner_chs: + p_ws = helpers.get_workspace_by_id(p_ch.workspace_id, context=context) + if p_ws: + builders.refresh_home_tab_for_workspace(p_ws, logger, context=context) + + +def handle_app_home_opened( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle the ``app_home_opened`` event by publishing the Home tab.""" + helpers.purge_stale_soft_deletes() + builders.build_home_tab(body, client, logger, context) + + +def handle_refresh_home( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle the Refresh button on the Home tab. + + Uses content hash and cached blocks: full refresh only when data changed. + When hash matches and within 60s cooldown, re-publishes with cooldown message. + """ + team_id = helpers.safe_get(body, "view", "team_id") or helpers.safe_get(body, "team", "id") + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not team_id or not user_id: + return + + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + current_hash = builders._home_tab_content_hash(workspace_record) + hash_key = f"home_tab_hash:{team_id}" + blocks_key = f"home_tab_blocks:{team_id}:{user_id}" + refresh_at_key = f"refresh_at:home:{team_id}:{user_id}" + + action, cached_blocks, remaining = helpers.refresh_cooldown_check( + current_hash, hash_key, blocks_key, refresh_at_key + ) + cooldown_sec = getattr(constants, "REFRESH_COOLDOWN_SECONDS", 60) + + if action == "cooldown" and cached_blocks is not None and remaining is not None: + blocks_with_message = helpers.inject_cooldown_message( + cached_blocks, builders._REFRESH_BUTTON_BLOCK_INDEX, remaining + ) + client.views_publish(user_id=user_id, view={"type": "home", "blocks": blocks_with_message}) + return + if action == "cached" and cached_blocks is not None: + client.views_publish(user_id=user_id, view={"type": "home", "blocks": cached_blocks}) + helpers._cache_set(refresh_at_key, time.monotonic(), ttl=cooldown_sec * 2) + return + + # Full refresh: clear workspace name caches and refresh all workspace names + stale_keys = [k for k in helpers._CACHE if k.startswith("ws_name_refresh:")] + for k in stale_keys: + helpers._CACHE.pop(k, None) + + all_workspaces = DbManager.find_records( + schemas.Workspace, + [schemas.Workspace.deleted_at.is_(None)], + ) + for ws in all_workspaces: + try: + if ws.id == workspace_record.id: + ws_client = client + elif ws.bot_token: + ws_client = WebClient(token=helpers.decrypt_bot_token(ws.bot_token)) + else: + continue + + info = ws_client.team_info() + current_name = info["team"]["name"] + if current_name and current_name != ws.workspace_name: + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == ws.id], + {schemas.Workspace.workspace_name: current_name}, + ) + _logger.info( + "workspace_name_refreshed", + extra={"workspace_id": ws.id, "new_name": current_name}, + ) + except Exception as e: + ws_label = f"{ws.workspace_name} ({ws.team_id})" + _logger.warning(f"Failed to refresh name for {ws_label}: {e}") + + block_dicts = builders.build_home_tab(body, client, logger, context, user_id=user_id, return_blocks=True) + if block_dicts is None: + return + client.views_publish(user_id=user_id, view={"type": "home", "blocks": block_dicts}) + helpers.refresh_after_full(hash_key, blocks_key, refresh_at_key, current_hash, block_dicts) + + +def handle_join_sync_submission( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handles the join sync form submission by appending to the SyncChannel table. + + Requires admin/owner authorization (defense-in-depth). + The bot joins the channel *before* the DB record is created so that + a failed join doesn't leave an orphaned record. + """ + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "join_sync"}) + return + + form_data = forms.JOIN_SYNC_FORM.get_selected_values(body) + sync_id = helpers.safe_get(form_data, actions.CONFIG_JOIN_SYNC_SELECT) + channel_id = helpers.safe_get(form_data, actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT) + team_id = helpers.safe_get(body, "view", "team_id") + + if not sync_id or not channel_id or not team_id: + logger.warning(f"Rejected join-sync: missing required field (sync_id={sync_id}, channel_id={channel_id})") + return + + workspace_record: schemas.Workspace = DbManager.get_record(schemas.Workspace, id=team_id) + sync_record: schemas.Sync = DbManager.get_record(schemas.Sync, id=sync_id) + + if not workspace_record or not sync_record: + logger.warning("Rejected join-sync: workspace or sync record not found") + return + + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + admin_name, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) + + partner_channels: list = [] + try: + client.conversations_join(channel=channel_id) + channel_sync_record = schemas.SyncChannel( + sync_id=sync_id, + channel_id=channel_id, + workspace_id=workspace_record.id, + created_at=datetime.now(UTC), + ) + DbManager.create_record(channel_sync_record) + partner_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.workspace_id != workspace_record.id, + ], + ) + if partner_channels: + p_ch = partner_channels[0] + p_ws = helpers.get_workspace_by_id(p_ch.workspace_id) + partner_ref = helpers.resolve_channel_name(p_ch.channel_id, p_ws) + else: + partner_ref = sync_record.title or "the partner channel" + client.chat_postMessage( + channel=channel_id, + text=f":arrows_counterclockwise: *{admin_name}* started syncing this channel with *{partner_ref}*. Messages will be shared automatically.", + ) + + local_ref = helpers.resolve_channel_name(channel_id, workspace_record) + for p_ch in partner_channels: + try: + p_ws = helpers.get_workspace_by_id(p_ch.workspace_id) + if p_ws and p_ws.bot_token: + p_client = WebClient(token=helpers.decrypt_bot_token(p_ws.bot_token)) + p_client.chat_postMessage( + channel=p_ch.channel_id, + text=f":arrows_counterclockwise: *{admin_label}* started syncing *{local_ref}* with this channel. Messages will be shared automatically.", + ) + except Exception as exc: + _logger.debug(f"join_sync: failed to notify publisher channel {p_ch.channel_id}: {exc}") + except Exception as e: + logger.error(f"Failed to join sync channel {channel_id}: {e}") + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + for p_ch in partner_channels: + p_ws = helpers.get_workspace_by_id(p_ch.workspace_id, context=context) + if p_ws: + builders.refresh_home_tab_for_workspace(p_ws, logger, context=context) + + +def handle_new_sync_submission( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handles the new sync form submission. + + Creates a Sync named after the selected channel, links the channel + to the sync, joins the channel, and posts a welcome message. + Requires admin/owner authorization (defense-in-depth). + """ + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "new_sync"}) + return + + form_data = forms.NEW_SYNC_FORM.get_selected_values(body) + channel_id = helpers.safe_get(form_data, actions.CONFIG_NEW_SYNC_CHANNEL_SELECT) + team_id = helpers.safe_get(body, "view", "team_id") + + if not channel_id or not team_id: + logger.warning(f"Rejected sync creation: missing field (channel_id={channel_id})") + return + + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + logger.warning("Rejected sync creation: workspace record not found") + return + + try: + conv_info = client.conversations_info(channel=channel_id) + channel_name = helpers.safe_get(conv_info, "channel", "name") or channel_id + except Exception as exc: + _logger.debug(f"handle_create_sync: conversations_info failed for {channel_id}: {exc}") + channel_name = channel_id + + sync_title = _sanitize_text(channel_name) + if not sync_title: + logger.warning("Rejected sync creation: could not determine channel name") + return + + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + admin_name, _ = helpers.format_admin_label(client, acting_user_id, workspace_record) + + try: + client.conversations_join(channel=channel_id) + sync_record = schemas.Sync(title=sync_title, description=None) + DbManager.create_record(sync_record) + channel_sync_record = schemas.SyncChannel( + sync_id=sync_record.id, + channel_id=channel_id, + workspace_id=workspace_record.id, + created_at=datetime.now(UTC), + ) + DbManager.create_record(channel_sync_record) + client.chat_postMessage( + channel=channel_id, + text=f":outbox_tray: *{admin_name}* published this channel for syncing. Other Workspaces can now subscribe.", + ) + except Exception as e: + logger.error(f"Failed to create sync for channel {channel_id}: {e}") + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +def handle_member_joined_channel( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle member_joined_channel: check if SyncBot was added to an untracked channel.""" + event = body.get("event", {}) + user_id = event.get("user") + channel_id = event.get("channel") + team_id = helpers.safe_get(body, "team_id") or event.get("team") + + if not user_id or not channel_id or not team_id: + return + + own_user_id = helpers.get_own_bot_user_id(client) + if user_id != own_user_id: + return + + sync_records = helpers.get_sync_list(team_id, channel_id) + if sync_records: + return + + try: + client.chat_postMessage( + channel=channel_id, + text=":wave: Hello! I'm SyncBot. I was added to this channel, but this channel " + "doesn't seem to be part of a Sync. I'm leaving now. Please open the SyncBot Home " + "tab to configure me.", + ) + client.conversations_leave(channel=channel_id) + except Exception as e: + _logger.warning(f"Failed to notify and leave untracked channel {channel_id}: {e}") + + +def check_join_sync_channel( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Checks to see if the chosen channel id is already part of a sync.""" + view_id = helpers.safe_get(body, "view", "id") + form_data = forms.JOIN_SYNC_FORM.get_selected_values(body) + channel_id = helpers.safe_get(form_data, actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT) + blocks = helpers.safe_get(body, "view", "blocks") + already_warning = constants.WARNING_BLOCK in [block["block_id"] for block in blocks] + sync_channel_records = DbManager.find_records( + schemas.SyncChannel, + [schemas.SyncChannel.channel_id == channel_id, schemas.SyncChannel.deleted_at.is_(None)], + ) + + if len(sync_channel_records) > 0 and not already_warning: + blocks.append( + orm.SectionBlock( + action=constants.WARNING_BLOCK, + label=":warning: :warning: This channel is already part of a Sync! Please choose another channel.", + ).as_form_field() + ) + helpers.update_modal( + blocks=blocks, + client=client, + view_id=view_id, + title_text="Join Sync", + callback_id=actions.CONFIG_JOIN_SYNC_SUMBIT, + ) + elif len(sync_channel_records) == 0 and already_warning: + blocks = [block for block in blocks if block["block_id"] != constants.WARNING_BLOCK] + helpers.update_modal( + blocks=blocks, + client=client, + view_id=view_id, + title_text="Join Sync", + callback_id=actions.CONFIG_JOIN_SYNC_SUMBIT, + ) diff --git a/syncbot/handlers/tokens.py b/syncbot/handlers/tokens.py new file mode 100644 index 0000000..08e01b5 --- /dev/null +++ b/syncbot/handlers/tokens.py @@ -0,0 +1,136 @@ +"""Token revocation handler.""" + +import logging +from datetime import UTC, datetime +from logging import Logger + +from slack_sdk.web import WebClient + +import constants +import helpers +from db import DbManager, schemas + +_logger = logging.getLogger(__name__) + + +def handle_tokens_revoked( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle ``tokens_revoked`` event: a workspace uninstalled the app. + + Soft-deletes the workspace, its group memberships, and its sync channels. + Notifies partner workspaces in shared groups via admin DMs and channel messages. + """ + team_id = helpers.safe_get(body, "team_id") + if not team_id: + _logger.warning("handle_tokens_revoked: missing team_id") + return + + workspace_record = DbManager.get_record(schemas.Workspace, team_id=team_id) + if not workspace_record: + _logger.warning("handle_tokens_revoked: unknown workspace", extra={"team_id": team_id}) + return + + now = datetime.now(UTC) + ws_name = helpers.resolve_workspace_name(workspace_record) + retention_days = constants.SOFT_DELETE_RETENTION_DAYS + + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == workspace_record.id], + {schemas.Workspace.deleted_at: now}, + ) + + active_memberships = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.workspace_id == workspace_record.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + + for membership in active_memberships: + DbManager.update_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == membership.id], + {schemas.WorkspaceGroupMember.deleted_at: now}, + ) + + my_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + for ch in my_channels: + DbManager.update_records( + schemas.SyncChannel, + [schemas.SyncChannel.id == ch.id], + {schemas.SyncChannel.deleted_at: now, schemas.SyncChannel.status: "paused"}, + ) + + notified_ws: set[int] = set() + for membership in active_memberships: + group_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == membership.group_id, + schemas.WorkspaceGroupMember.workspace_id != workspace_record.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + for m in group_members: + if not m.workspace_id or m.workspace_id in notified_ws: + continue + partner = helpers.get_workspace_by_id(m.workspace_id) + if not partner or not partner.bot_token or partner.deleted_at: + continue + notified_ws.add(m.workspace_id) + + try: + partner_client = WebClient(token=helpers.decrypt_bot_token(partner.bot_token)) + + helpers.notify_admins_dm( + partner_client, + f":double_vertical_bar: *{ws_name}* has uninstalled SyncBot. " + f"Syncing has been paused. If they reinstall within {retention_days} days, " + "syncing will resume automatically.", + ) + + partner_channel_ids = [] + for ch in my_channels: + sibling_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == ch.sync_id, + schemas.SyncChannel.workspace_id == m.workspace_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + for sc in sibling_channels: + partner_channel_ids.append(sc.channel_id) + + if partner_channel_ids: + helpers.notify_synced_channels( + partner_client, + partner_channel_ids, + f":double_vertical_bar: Syncing with *{ws_name}* has been paused because they uninstalled the app.", + ) + except Exception as e: + _logger.warning(f"handle_tokens_revoked: failed to notify partner {m.workspace_id}: {e}") + + _logger.info( + "workspace_soft_deleted", + extra={ + "workspace_id": workspace_record.id, + "team_id": team_id, + "memberships_paused": len(active_memberships), + "channels_paused": len(my_channels), + }, + ) diff --git a/syncbot/handlers/users.py b/syncbot/handlers/users.py new file mode 100644 index 0000000..5b31f5c --- /dev/null +++ b/syncbot/handlers/users.py @@ -0,0 +1,291 @@ +"""User event handlers — team join, profile changes, user mapping management.""" + +import logging +import time +from datetime import UTC, datetime +from logging import Logger + +from slack_sdk.web import WebClient + +import builders +import constants +import helpers +from builders._common import _get_group_members, _get_groups_for_workspace +from db import DbManager, schemas + +_logger = logging.getLogger(__name__) + + +def handle_team_join( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle a team_join event: a new user joined a connected workspace. + + 1. Upsert the new user into ``user_directory`` for this workspace. + 2. Re-check all ``match_method='none'`` mappings targeting this workspace. + """ + event = body.get("event", {}) + user_data = event.get("user", {}) + team_id = helpers.safe_get(body, "team_id") + + if not user_data or not team_id: + return + + if user_data.get("is_bot") or user_data.get("id") == "USLACKBOT": + return + + workspace_record = DbManager.get_record(schemas.Workspace, id=team_id) + if not workspace_record: + _logger.warning(f"team_join: unknown team_id {team_id}") + return + + _logger.info( + "team_join_received", + extra={"team_id": team_id, "user_id": user_data.get("id")}, + ) + + helpers._upsert_single_user_to_directory(user_data, workspace_record.id) + + newly_matched, still_unmatched = helpers.run_auto_match_for_workspace(client, workspace_record.id) + _logger.info( + "team_join_matching_complete", + extra={ + "workspace_id": workspace_record.id, + "newly_matched": newly_matched, + "still_unmatched": still_unmatched, + }, + ) + + +def handle_user_profile_changed( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle a user_profile_changed event: update directory and notify group members.""" + event = body.get("event", {}) + user_data = event.get("user", {}) + team_id = helpers.safe_get(body, "team_id") + + if not user_data or not team_id: + return + + if user_data.get("is_bot") or user_data.get("id") == "USLACKBOT": + return + + workspace_record = DbManager.get_record(schemas.Workspace, id=team_id) + if not workspace_record: + return + + helpers._upsert_single_user_to_directory(user_data, workspace_record.id) + + my_groups = _get_groups_for_workspace(workspace_record.id) + notified_ws: set[int] = set() + for group, _ in my_groups: + members = _get_group_members(group.id) + for m in members: + if m.workspace_id and m.workspace_id != workspace_record.id and m.workspace_id not in notified_ws: + partner = helpers.get_workspace_by_id(m.workspace_id, context=context) + if partner: + builders.refresh_home_tab_for_workspace(partner, logger, context=context) + notified_ws.add(m.workspace_id) + + _logger.info( + "user_profile_updated", + extra={"team_id": team_id, "user_id": user_data.get("id")}, + ) + + +def handle_user_mapping_back( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Return from the user mapping screen to the main Home tab.""" + user_id = helpers.get_user_id_from_body(body) + if not user_id: + return + builders.build_home_tab(body, client, logger, context, user_id=user_id) + + +def handle_user_mapping_refresh( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Refresh user mappings: re-seed, auto-match, then re-render the mapping screen. + + Uses content hash and cached blocks; when hash unchanged and within 60s cooldown, + re-publishes with cooldown message. + """ + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "user_mapping_refresh"}) + return + + team_id = ( + helpers.safe_get(body, "view", "team_id") + or helpers.safe_get(body, "team", "id") + or helpers.safe_get(body, "user", "team_id") + ) + workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None + if not workspace_record: + return + + raw_group = helpers.safe_get(body, "actions", 0, "value") or "0" + try: + group_id = int(raw_group) + except (TypeError, ValueError): + group_id = 0 + + gid_opt = group_id or None + current_hash = builders._user_mapping_content_hash(workspace_record, gid_opt) + hash_key = f"user_mapping_hash:{workspace_record.team_id}:{user_id}:{group_id}" + blocks_key = f"user_mapping_blocks:{workspace_record.team_id}:{user_id}:{group_id}" + refresh_at_key = f"refresh_at:user_mapping:{workspace_record.team_id}:{user_id}:{group_id}" + + action, cached_blocks, remaining = helpers.refresh_cooldown_check( + current_hash, hash_key, blocks_key, refresh_at_key + ) + cooldown_sec = getattr(constants, "REFRESH_COOLDOWN_SECONDS", 60) + + if action == "cooldown" and cached_blocks is not None and remaining is not None: + blocks_with_message = helpers.inject_cooldown_message( + cached_blocks, builders._USER_MAPPING_REFRESH_BUTTON_INDEX, remaining + ) + client.views_publish(user_id=user_id, view={"type": "home", "blocks": blocks_with_message}) + return + if action == "cached" and cached_blocks is not None: + client.views_publish(user_id=user_id, view={"type": "home", "blocks": cached_blocks}) + helpers._cache_set(refresh_at_key, time.monotonic(), ttl=cooldown_sec * 2) + return + + helpers._CACHE.pop(f"dir_refresh:{workspace_record.id}", None) + + if group_id: + members = _get_group_members(group_id) + else: + members = [] + for group, _ in _get_groups_for_workspace(workspace_record.id): + members.extend(_get_group_members(group.id)) + + partner_clients: list[tuple[WebClient, int]] = [] + + for m in members: + if not m.workspace_id or m.workspace_id == workspace_record.id: + continue + try: + helpers._CACHE.pop(f"dir_refresh:{m.workspace_id}", None) + partner_ws = helpers.get_workspace_by_id(m.workspace_id, context=context) + if partner_ws and partner_ws.bot_token: + partner_client = WebClient(token=helpers.decrypt_bot_token(partner_ws.bot_token)) + helpers._refresh_user_directory(partner_client, m.workspace_id) + partner_clients.append((partner_client, m.workspace_id)) + helpers.seed_user_mappings(m.workspace_id, workspace_record.id, group_id=gid_opt) + helpers.seed_user_mappings(workspace_record.id, m.workspace_id, group_id=gid_opt) + except Exception: + pass + + helpers.run_auto_match_for_workspace(client, workspace_record.id) + for p_client, p_id in partner_clients: + try: + helpers.run_auto_match_for_workspace(p_client, p_id) + except Exception: + pass + + block_dicts = builders.build_user_mapping_screen( + client, + workspace_record, + user_id, + group_id=gid_opt, + context=context, + return_blocks=True, + ) + if block_dicts is None: + return + client.views_publish(user_id=user_id, view={"type": "home", "blocks": block_dicts}) + helpers.refresh_after_full(hash_key, blocks_key, refresh_at_key, current_hash, block_dicts) + + +def handle_user_mapping_edit_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Save the per-user mapping edit and refresh the mapping screen.""" + from handlers._common import _parse_private_metadata + + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "user_mapping_edit_submit"}) + return + + team_id = helpers.safe_get(body, "view", "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None + if not workspace_record: + return + + meta = _parse_private_metadata(body) + mapping_id = meta.get("mapping_id") + group_id = meta.get("group_id") or 0 + + if not mapping_id: + _logger.warning("user_mapping_edit_submit: missing mapping_id") + return + + mapping = DbManager.get_record(schemas.UserMapping, id=mapping_id) + if not mapping: + return + + values = helpers.safe_get(body, "view", "state", "values") or {} + selected = None + for block_data in values.values(): + for action_data in block_data.values(): + sel = action_data.get("selected_option") + if sel: + selected = sel.get("value") + + now = datetime.now(UTC) + if selected == "__remove__": + DbManager.update_records( + schemas.UserMapping, + [schemas.UserMapping.id == mapping.id], + { + schemas.UserMapping.target_user_id: None, + schemas.UserMapping.match_method: "none", + schemas.UserMapping.matched_at: now, + }, + ) + _logger.info("user_mapping_removed", extra={"mapping_id": mapping.id}) + elif selected: + DbManager.update_records( + schemas.UserMapping, + [schemas.UserMapping.id == mapping.id], + { + schemas.UserMapping.target_user_id: selected, + schemas.UserMapping.match_method: "manual", + schemas.UserMapping.matched_at: now, + }, + ) + _logger.info("user_mapping_updated", extra={"mapping_id": mapping.id, "target_user_id": selected}) + + # Invalidate user-mapping caches so next Refresh on that screen does a full rebuild + helpers._cache_delete_prefix(f"user_mapping_hash:{workspace_record.team_id}:") + helpers._cache_delete_prefix(f"user_mapping_blocks:{workspace_record.team_id}:") + helpers._cache_delete_prefix(f"refresh_at:user_mapping:{workspace_record.team_id}:") + + builders.build_user_mapping_screen( + client, + workspace_record, + user_id, + group_id=group_id or None, + context=context, + ) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) \ No newline at end of file diff --git a/syncbot/helpers/__init__.py b/syncbot/helpers/__init__.py new file mode 100644 index 0000000..a77ab69 --- /dev/null +++ b/syncbot/helpers/__init__.py @@ -0,0 +1,148 @@ +"""Shared utility functions used throughout SyncBot. + +This package re-exports public names from sub-modules so existing +``from helpers import X`` and ``helpers.X`` references continue to work. +""" + +from helpers._cache import ( + _CACHE, + _CACHE_TTL_SECONDS, + _USER_INFO_CACHE_TTL, + _cache_delete, + _cache_delete_prefix, + _cache_get, + _cache_set, +) +from helpers.core import ( + format_admin_label, + get_request_type, + get_user_id_from_body, + is_user_authorized, + safe_get, +) +from helpers.encryption import decrypt_bot_token, encrypt_bot_token +from helpers.files import ( + cleanup_temp_files, + download_public_file, + download_slack_files, + upload_files_to_slack, + upload_photos, +) +from helpers.notifications import ( + get_admin_ids, + notify_admins_dm, + notify_admins_dm_blocks, + notify_synced_channels, + purge_stale_soft_deletes, + save_dm_messages_to_group_member, +) +from helpers.oauth import get_oauth_flow +from helpers.refresh import ( + cooldown_message_block, + inject_cooldown_message, + refresh_after_full, + refresh_cooldown_check, +) +from helpers.slack_api import ( + _users_info, + delete_message, + get_bot_info_from_event, + get_own_bot_id, + get_own_bot_user_id, + get_post_records, + get_user_info, + post_message, + slack_retry, + update_modal, +) +from helpers.user_matching import ( + _get_user_profile, + _normalize_name, + _refresh_user_directory, + _upsert_single_user_to_directory, + apply_mentioned_users, + get_display_name_and_icon_for_synced_message, + get_mapped_target_user_id, + normalize_display_name, + parse_mentioned_users, + resolve_channel_references, + resolve_mention_for_workspace, + run_auto_match_for_workspace, + seed_user_mappings, +) +from helpers.workspace import ( + get_federated_workspace, + get_federated_workspace_for_sync, + get_group_members, + get_groups_for_workspace, + get_sync_list, + get_workspace_by_id, + get_workspace_record, + resolve_channel_name, + resolve_workspace_name, +) + +__all__ = [ + "_CACHE", + "_CACHE_TTL_SECONDS", + "_USER_INFO_CACHE_TTL", + "_cache_delete", + "_cache_delete_prefix", + "_cache_get", + "_cache_set", + "_get_user_profile", + "_normalize_name", + "_refresh_user_directory", + "_upsert_single_user_to_directory", + "_users_info", + "apply_mentioned_users", + "cleanup_temp_files", + "cooldown_message_block", + "decrypt_bot_token", + "delete_message", + "download_public_file", + "download_slack_files", + "encrypt_bot_token", + "format_admin_label", + "get_admin_ids", + "get_bot_info_from_event", + "get_federated_workspace", + "get_federated_workspace_for_sync", + "get_group_members", + "get_groups_for_workspace", + "get_display_name_and_icon_for_synced_message", + "get_mapped_target_user_id", + "get_oauth_flow", + "normalize_display_name", + "get_own_bot_id", + "get_own_bot_user_id", + "get_post_records", + "get_request_type", + "get_sync_list", + "get_user_id_from_body", + "get_user_info", + "get_workspace_by_id", + "get_workspace_record", + "inject_cooldown_message", + "is_user_authorized", + "notify_admins_dm", + "notify_admins_dm_blocks", + "notify_synced_channels", + "parse_mentioned_users", + "post_message", + "purge_stale_soft_deletes", + "resolve_channel_name", + "resolve_channel_references", + "refresh_after_full", + "refresh_cooldown_check", + "resolve_mention_for_workspace", + "resolve_workspace_name", + "run_auto_match_for_workspace", + "safe_get", + "save_dm_messages_to_group_member", + "seed_user_mappings", + "slack_retry", + "update_modal", + "upload_files_to_slack", + "upload_photos", +] diff --git a/syncbot/helpers/_cache.py b/syncbot/helpers/_cache.py new file mode 100644 index 0000000..f20d665 --- /dev/null +++ b/syncbot/helpers/_cache.py @@ -0,0 +1,38 @@ +"""Lightweight in-process TTL cache. + +Lambda containers are reused across invocations, so a short TTL cache +avoids redundant DB queries for the same sync list within a warm container. +""" + +import time as _time + +_CACHE: dict = {} +_CACHE_TTL_SECONDS = 60 +_USER_INFO_CACHE_TTL = 300 # 5 min for user info lookups + + +def _cache_get(key: str): + """Return a cached value if it exists and has not expired, else *None*.""" + entry = _CACHE.get(key) + if entry and (_time.monotonic() - entry["t"]) < entry.get("ttl", _CACHE_TTL_SECONDS): + return entry["v"] + _CACHE.pop(key, None) + return None + + +def _cache_set(key: str, value, ttl: int = _CACHE_TTL_SECONDS): + """Store *value* in the cache under *key* with an optional TTL (seconds).""" + _CACHE[key] = {"v": value, "t": _time.monotonic(), "ttl": ttl} + + +def _cache_delete(key: str) -> None: + """Remove a single cache entry.""" + _CACHE.pop(key, None) + + +def _cache_delete_prefix(prefix: str) -> int: + """Remove all cache entries whose key starts with *prefix*. Returns count removed.""" + to_remove = [k for k in _CACHE if k.startswith(prefix)] + for k in to_remove: + _CACHE.pop(k, None) + return len(to_remove) diff --git a/syncbot/helpers/core.py b/syncbot/helpers/core.py new file mode 100644 index 0000000..f99e548 --- /dev/null +++ b/syncbot/helpers/core.py @@ -0,0 +1,104 @@ +"""Core utility functions used throughout SyncBot.""" + +import logging +import os +from typing import Any + +from slack_sdk.errors import SlackApiError + +import constants +from slack import actions + +_logger = logging.getLogger(__name__) + + +def safe_get(data: Any, *keys: Any) -> Any: + """Safely traverse nested dicts/lists. Returns None on missing keys.""" + if not data: + return None + try: + result = data + for k in keys: + if isinstance(k, int) and isinstance(result, list) or result.get(k): + result = result[k] + else: + return None + return result + except (KeyError, AttributeError, IndexError): + return None + + +def get_user_id_from_body(body: dict) -> str | None: + """Extract the acting user's ID from any Slack request payload.""" + return safe_get(body, "user_id") or safe_get(body, "user", "id") + + +def is_user_authorized(client, user_id: str) -> bool: + """Return *True* if the user is allowed to configure SyncBot. + + When ``REQUIRE_ADMIN`` is ``"true"`` (the default), only workspace + admins and owners are authorized. + """ + from .slack_api import _users_info + + require_admin = os.environ.get(constants.REQUIRE_ADMIN, "true").lower() + if require_admin != "true": + return True + + try: + res = _users_info(client, user_id) + except SlackApiError: + _logger.warning(f"Could not verify admin status for user {user_id} — denying access") + return False + + user = safe_get(res, "user") or {} + return bool(user.get("is_admin") or user.get("is_owner")) + + +def format_admin_label(client, user_id: str, workspace) -> tuple[str, str]: + """Return ``(display_name, full_label)`` for an admin.""" + from .slack_api import get_user_info + from .workspace import resolve_workspace_name + + display_name, _ = get_user_info(client, user_id) + display_name = display_name or "An admin" + ws_name = resolve_workspace_name(workspace) if workspace else None + if ws_name: + return display_name, f"{display_name} ({ws_name})" + return display_name, display_name + + +_PREFIXED_ACTIONS = ( + actions.CONFIG_REMOVE_FEDERATION_CONNECTION, + actions.CONFIG_LEAVE_GROUP, + actions.CONFIG_ACCEPT_GROUP_REQUEST, + actions.CONFIG_DECLINE_GROUP_REQUEST, + actions.CONFIG_CANCEL_GROUP_REQUEST, + actions.CONFIG_SUBSCRIBE_CHANNEL, + actions.CONFIG_UNPUBLISH_CHANNEL, + actions.CONFIG_USER_MAPPING_EDIT, + actions.CONFIG_REMOVE_SYNC, + actions.CONFIG_RESUME_SYNC, + actions.CONFIG_PAUSE_SYNC, + actions.CONFIG_STOP_SYNC, +) + + +def get_request_type(body: dict) -> tuple[str, str]: + """Classify an incoming Slack request into a ``(category, identifier)`` pair.""" + request_type = safe_get(body, "type") + if request_type == "event_callback": + return ("event_callback", safe_get(body, "event", "type")) + elif request_type == "block_actions": + block_action = safe_get(body, "actions", 0, "action_id") + for prefix in _PREFIXED_ACTIONS: + if block_action == prefix or block_action.startswith(prefix + "_"): + block_action = prefix + break + return ("block_actions", block_action) + elif request_type == "view_submission": + return ("view_submission", safe_get(body, "view", "callback_id")) + elif not request_type and "command" in body: + return ("command", safe_get(body, "command")) + else: + return ("unknown", "unknown") diff --git a/syncbot/helpers/encryption.py b/syncbot/helpers/encryption.py new file mode 100644 index 0000000..cae496d --- /dev/null +++ b/syncbot/helpers/encryption.py @@ -0,0 +1,72 @@ +"""Bot-token encryption / decryption using Fernet (AES-128-CBC + HMAC-SHA256). + +The PASSWORD_ENCRYPT_KEY env var is stretched to a 32-byte key using +PBKDF2-HMAC-SHA256 with 600,000 iterations. The derived Fernet instance +is cached so the expensive KDF runs at most once per key per process. +""" + +import base64 +import functools +import logging +import os + +from cryptography.fernet import Fernet, InvalidToken + +import constants + +_logger = logging.getLogger(__name__) + +_PBKDF2_ITERATIONS = 600_000 +_PBKDF2_SALT_PREFIX = b"syncbot-fernet-v1" + + +@functools.lru_cache(maxsize=2) +def _get_fernet(key: str) -> Fernet: + """Derive a Fernet cipher from an arbitrary passphrase via PBKDF2.""" + from cryptography.hazmat.primitives import hashes + from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC + + salt = _PBKDF2_SALT_PREFIX + key.encode()[:16] + kdf = PBKDF2HMAC( + algorithm=hashes.SHA256(), + length=32, + salt=salt, + iterations=_PBKDF2_ITERATIONS, + ) + derived = kdf.derive(key.encode()) + return Fernet(base64.urlsafe_b64encode(derived)) + + +def _encryption_enabled() -> bool: + """Return *True* if bot-token encryption is active.""" + key = os.environ.get(constants.PASSWORD_ENCRYPT_KEY, "") + return bool(key) and key != "123" + + +def encrypt_bot_token(token: str) -> str: + """Encrypt a bot token before storing it in the database.""" + if not _encryption_enabled(): + return token + key = os.environ[constants.PASSWORD_ENCRYPT_KEY] + return _get_fernet(key).encrypt(token.encode()).decode() + + +def decrypt_bot_token(encrypted: str) -> str: + """Decrypt a bot token read from the database. + + Raises on failure when encryption is enabled. + """ + if not _encryption_enabled(): + return encrypted + key = os.environ[constants.PASSWORD_ENCRYPT_KEY] + try: + return _get_fernet(key).decrypt(encrypted.encode()).decode() + except InvalidToken: + _logger.error( + "Bot token decryption failed — refusing to use the token. " + "If you recently enabled encryption, run " + "db/migrate_002_encrypt_tokens.py to encrypt existing tokens." + ) + raise ValueError( + "Bot token decryption failed. The token may be plaintext (not yet migrated) or tampered with." + ) from None diff --git a/syncbot/helpers/export_import.py b/syncbot/helpers/export_import.py new file mode 100644 index 0000000..1be5818 --- /dev/null +++ b/syncbot/helpers/export_import.py @@ -0,0 +1,524 @@ +"""Backup/restore and data migration export/import helpers. + +Full-instance backup: dump all tables as JSON with HMAC for tampering detection. +Data migration: workspace-scoped export with Ed25519 signature; import with replace mode. +""" + +import hashlib +import hmac +import json +import logging +import os +from datetime import datetime +from decimal import Decimal +from typing import Any + +import constants +from db import DbManager, schemas + +_logger = logging.getLogger(__name__) + +BACKUP_VERSION = 1 +MIGRATION_VERSION = 1 + + +def _json_serializer(obj: Any) -> Any: + """Convert datetime and Decimal for JSON.""" + if isinstance(obj, datetime): + return obj.isoformat() + if isinstance(obj, Decimal): + return float(obj) + raise TypeError(f"Object of type {type(obj).__name__} is not JSON serializable") + + +def canonical_json_dumps(obj: dict) -> bytes: + """Serialize to canonical JSON (sort_keys, no extra whitespace) for signing/HMAC.""" + return json.dumps( + obj, + sort_keys=True, + separators=(",", ":"), + default=_json_serializer, + ).encode("utf-8") + + +def _compute_encryption_key_hash() -> str | None: + """SHA-256 hex of PASSWORD_ENCRYPT_KEY, or None if unset.""" + key = os.environ.get(constants.PASSWORD_ENCRYPT_KEY, "") + if not key or key == "123": + return None + return hashlib.sha256(key.encode()).hexdigest() + + +def _compute_backup_hmac(payload_without_hmac: dict) -> str: + """HMAC-SHA256 of canonical JSON of payload (excluding hmac field), keyed by PASSWORD_ENCRYPT_KEY.""" + key = os.environ.get(constants.PASSWORD_ENCRYPT_KEY, "") + if not key: + key = "" + raw = canonical_json_dumps(payload_without_hmac) + return hmac.new(key.encode(), raw, hashlib.sha256).hexdigest() + + +def _records_to_list(records: list, cls: type) -> list[dict]: + """Convert ORM records to list of dicts with serializable values.""" + out = [] + for r in records: + d = {} + for k in cls._get_column_keys(): + v = getattr(r, k) + if isinstance(v, datetime): + v = v.isoformat() + elif isinstance(v, Decimal): + v = float(v) + d[k] = v + out.append(d) + return out + + +# --------------------------------------------------------------------------- +# Full-instance backup +# --------------------------------------------------------------------------- + +def build_full_backup() -> dict: + """Build full-instance backup payload (all tables, version, exported_at, encryption_key_hash, hmac).""" + payload = { + "version": BACKUP_VERSION, + "exported_at": datetime.utcnow().isoformat() + "Z", + "encryption_key_hash": _compute_encryption_key_hash(), + } + tables = [ + ("workspaces", schemas.Workspace), + ("workspace_groups", schemas.WorkspaceGroup), + ("workspace_group_members", schemas.WorkspaceGroupMember), + ("syncs", schemas.Sync), + ("sync_channels", schemas.SyncChannel), + ("post_meta", schemas.PostMeta), + ("user_directory", schemas.UserDirectory), + ("user_mappings", schemas.UserMapping), + ("federated_workspaces", schemas.FederatedWorkspace), + ("instance_keys", schemas.InstanceKey), + ] + for table_name, cls in tables: + records = DbManager.find_records(cls, []) + payload[table_name] = _records_to_list(records, cls) + + payload["hmac"] = _compute_backup_hmac({k: v for k, v in payload.items() if k != "hmac"}) + return payload + + +def verify_backup_hmac(data: dict) -> bool: + """Return True if HMAC in data matches recomputed HMAC (excluding hmac field).""" + stored = data.get("hmac") + if not stored: + return False + payload_without_hmac = {k: v for k, v in data.items() if k != "hmac"} + expected = _compute_backup_hmac(payload_without_hmac) + return hmac.compare_digest(stored, expected) # noqa: S324 + + +def verify_backup_encryption_key(data: dict) -> bool: + """Return True if current encryption key hash matches backup's.""" + current = _compute_encryption_key_hash() + backup_hash = data.get("encryption_key_hash") + if backup_hash is None and current is None: + return True + if backup_hash is None or current is None: + return False + return hmac.compare_digest(current, backup_hash) # noqa: S324 + + +def restore_full_backup( + data: dict, + *, + skip_hmac_check: bool = False, + skip_encryption_key_check: bool = False, +) -> list[str]: + """Restore full backup into DB. Inserts in FK order. Returns list of team_ids for cache invalidation. + + Caller must have validated version and structure. Does not truncate tables; assumes empty or + intentional overwrite (e.g. restore after rebuild). + """ + team_ids: list[str] = [] + tables = [ + "workspaces", + "workspace_groups", + "workspace_group_members", + "syncs", + "sync_channels", + "post_meta", + "user_directory", + "user_mappings", + "federated_workspaces", + "instance_keys", + ] + table_to_schema = { + "workspaces": schemas.Workspace, + "workspace_groups": schemas.WorkspaceGroup, + "workspace_group_members": schemas.WorkspaceGroupMember, + "syncs": schemas.Sync, + "sync_channels": schemas.SyncChannel, + "post_meta": schemas.PostMeta, + "user_directory": schemas.UserDirectory, + "user_mappings": schemas.UserMapping, + "federated_workspaces": schemas.FederatedWorkspace, + "instance_keys": schemas.InstanceKey, + } + datetime_keys = {"created_at", "updated_at", "deleted_at", "joined_at", "matched_at"} + for table_name in tables: + rows = data.get(table_name, []) + cls = table_to_schema[table_name] + for row in rows: + kwargs = {} + for k, v in row.items(): + if v is None: + kwargs[k] = None + elif isinstance(v, str) and k in datetime_keys: + try: + kwargs[k] = datetime.fromisoformat(v.replace("Z", "+00:00")) + except ValueError: + kwargs[k] = v + elif k == "ts" and v is not None: + kwargs[k] = Decimal(str(v)) + else: + kwargs[k] = v + rec = cls(**kwargs) + DbManager.create_record(rec) + if table_name == "workspaces" and rec.team_id: + team_ids.append(rec.team_id) + return team_ids + + +# --------------------------------------------------------------------------- +# Cache invalidation after restore/import +# --------------------------------------------------------------------------- + +def invalidate_home_tab_caches_for_team(team_id: str) -> None: + """Clear home_tab_hash and home_tab_blocks for a team so next Refresh does full rebuild.""" + from helpers._cache import _cache_delete_prefix + _cache_delete_prefix(f"home_tab_hash:{team_id}") + _cache_delete_prefix(f"home_tab_blocks:{team_id}") + + +def invalidate_home_tab_caches_for_all_teams(team_ids: list[str]) -> None: + """Clear home tab caches for each team_id (e.g. after full restore).""" + for tid in team_ids: + invalidate_home_tab_caches_for_team(tid) + + +def invalidate_sync_list_cache_for_channel(channel_id: str) -> None: + """Clear get_sync_list cache for a channel.""" + from helpers._cache import _cache_delete + _cache_delete(f"sync_list:{channel_id}") + + +# --------------------------------------------------------------------------- +# Data migration export (workspace-scoped) +# --------------------------------------------------------------------------- + +def build_migration_export(workspace_id: int, include_source_instance: bool = True) -> dict: + """Build workspace-scoped migration JSON. Optionally sign with Ed25519 and include source_instance.""" + workspace = DbManager.get_record(schemas.Workspace, workspace_id) + if not workspace or workspace.deleted_at: + raise ValueError("Workspace not found") + + team_id = workspace.team_id + workspace_name = workspace.workspace_name or "" + + # Groups W is in + memberships = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.workspace_id == workspace_id, + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.status == "active", + ], + ) + groups_data = [] + for m in memberships: + g = DbManager.get_record(schemas.WorkspaceGroup, m.group_id) + if g: + groups_data.append({"name": g.name, "role": m.role}) + + # Syncs that have at least one SyncChannel for W + sync_channels_w = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.workspace_id == workspace_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + sync_ids = {sc.sync_id for sc in sync_channels_w} + syncs_data = [] + sync_channels_data = [] + post_meta_by_key = {} + + for sync_id in sync_ids: + sync = DbManager.get_record(schemas.Sync, sync_id) + if not sync: + continue + pub_team = None + tgt_team = None + if sync.publisher_workspace_id: + pw = DbManager.get_record(schemas.Workspace, sync.publisher_workspace_id) + if pw: + pub_team = pw.team_id + if sync.target_workspace_id: + tw = DbManager.get_record(schemas.Workspace, sync.target_workspace_id) + if tw: + tgt_team = tw.team_id + syncs_data.append({ + "title": sync.title, + "sync_mode": sync.sync_mode or "group", + "publisher_team_id": pub_team, + "target_team_id": tgt_team, + "is_publisher": sync.publisher_workspace_id == workspace_id, + }) + for sc in sync_channels_w: + if sc.sync_id != sync_id: + continue + sync_channels_data.append({ + "sync_title": sync.title, + "channel_id": sc.channel_id, + "status": sc.status or "active", + }) + key = f"{sync.title}:{sc.channel_id}" + post_metas = DbManager.find_records( + schemas.PostMeta, + [schemas.PostMeta.sync_channel_id == sc.id], + ) + post_meta_by_key[key] = [{"post_id": pm.post_id, "ts": float(pm.ts)} for pm in post_metas] + + # user_directory for W + ud_records = DbManager.find_records( + schemas.UserDirectory, + [ + schemas.UserDirectory.workspace_id == workspace_id, + schemas.UserDirectory.deleted_at.is_(None), + ], + ) + user_directory_data = [] + for u in ud_records: + user_directory_data.append({ + "slack_user_id": u.slack_user_id, + "email": u.email, + "real_name": u.real_name, + "display_name": u.display_name, + "normalized_name": u.normalized_name, + "updated_at": u.updated_at.isoformat() if u.updated_at else None, + }) + + # user_mappings involving W (export with team_id for other side) + um_records = DbManager.find_records( + schemas.UserMapping, + [ + (schemas.UserMapping.source_workspace_id == workspace_id) | (schemas.UserMapping.target_workspace_id == workspace_id), + ], + ) + user_mappings_data = [] + for um in um_records: + src_ws = DbManager.get_record(schemas.Workspace, um.source_workspace_id) if um.source_workspace_id else None + tgt_ws = DbManager.get_record(schemas.Workspace, um.target_workspace_id) if um.target_workspace_id else None + user_mappings_data.append({ + "source_team_id": src_ws.team_id if src_ws else None, + "target_team_id": tgt_ws.team_id if tgt_ws else None, + "source_user_id": um.source_user_id, + "target_user_id": um.target_user_id, + "match_method": um.match_method, + }) + + payload = { + "version": MIGRATION_VERSION, + "exported_at": datetime.utcnow().isoformat() + "Z", + "workspace": {"team_id": team_id, "workspace_name": workspace_name}, + "groups": groups_data, + "syncs": syncs_data, + "sync_channels": sync_channels_data, + "post_meta": post_meta_by_key, + "user_directory": user_directory_data, + "user_mappings": user_mappings_data, + } + + if include_source_instance: + from federation import core as federation + try: + url = federation.get_public_url() + instance_id = federation.get_instance_id() + _, public_key_pem = federation.get_or_create_instance_keypair() + code = federation.generate_federation_code(webhook_url=url, instance_id=instance_id, public_key=public_key_pem) + payload["source_instance"] = { + "webhook_url": url, + "instance_id": instance_id, + "public_key": public_key_pem, + "connection_code": code, + } + except Exception as e: + _logger.warning("build_migration_export: could not add source_instance: %s", e) + + # Sign with Ed25519 (exclude signature from signed bytes; include signed_at) + try: + from federation import core as federation + payload["signed_at"] = datetime.utcnow().isoformat() + "Z" + to_sign = {k: v for k, v in payload.items() if k != "signature"} + raw = canonical_json_dumps(to_sign).decode("utf-8") + payload["signature"] = federation.sign_body(raw) + except Exception as e: + _logger.warning("build_migration_export: could not sign: %s", e) + + return payload + + +def verify_migration_signature(data: dict) -> bool: + """Verify Ed25519 signature using source_instance.public_key. Returns False if no signature or invalid.""" + sig = data.get("signature") + source = data.get("source_instance") + if not sig or not source: + return False + public_key = source.get("public_key") + if not public_key: + return False + to_verify = {k: v for k, v in data.items() if k != "signature"} + raw = canonical_json_dumps(to_verify).decode("utf-8") + from federation import core as federation + return federation.verify_body(raw, sig, public_key) + + +def import_migration_data( + data: dict, + workspace_id: int, + group_id: int, + *, + team_id_to_workspace_id: dict[str, int], +) -> None: + """Import migration payload into DB (replace mode). Caller must have resolved federated group and team_id_to_workspace_id on B. + + - Replace mode: soft-delete W's SyncChannels in this group and their PostMeta, then create from export. + - team_id_to_workspace_id: map export team_id -> B's workspace id (for publisher/target and user_mappings). + """ + from datetime import UTC + + syncs_export = data.get("syncs", []) + sync_channels_export = data.get("sync_channels", []) + post_meta_export = data.get("post_meta", {}) + user_directory_export = data.get("user_directory", []) + user_mappings_export = data.get("user_mappings", []) + workspace_export = data.get("workspace", {}) + export_team_id = workspace_export.get("team_id") + + # Replace mode: find syncs in group, then SyncChannels for this workspace in those syncs + syncs_in_group = DbManager.find_records(schemas.Sync, [schemas.Sync.group_id == group_id]) + sync_ids_in_group = [s.id for s in syncs_in_group] + if sync_ids_in_group: + channels_to_remove = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id.in_(sync_ids_in_group), + schemas.SyncChannel.workspace_id == workspace_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + now = datetime.now(UTC) + for sc in channels_to_remove: + DbManager.delete_records( + schemas.PostMeta, + [schemas.PostMeta.sync_channel_id == sc.id], + ) + DbManager.update_records( + schemas.SyncChannel, + [schemas.SyncChannel.id == sc.id], + {schemas.SyncChannel.deleted_at: now}, + ) + + # Build sync title -> sync_id (B) for this group (create or reuse) + title_to_sync = {} + for s in syncs_export: + title = s.get("title") + if not title: + continue + existing = DbManager.find_records( + schemas.Sync, + [schemas.Sync.group_id == group_id, schemas.Sync.title == title], + ) + if existing: + title_to_sync[title] = existing[0].id + else: + pub_team = s.get("publisher_team_id") + tgt_team = s.get("target_team_id") + is_publisher = s.get("is_publisher") + pub_ws_id = (workspace_id if is_publisher else team_id_to_workspace_id.get(pub_team)) if pub_team else None + tgt_ws_id = (workspace_id if tgt_team == export_team_id else team_id_to_workspace_id.get(tgt_team)) if tgt_team else None + new_sync = schemas.Sync( + title=title, + group_id=group_id, + sync_mode=s.get("sync_mode", "group"), + publisher_workspace_id=pub_ws_id, + target_workspace_id=tgt_ws_id, + ) + DbManager.create_record(new_sync) + title_to_sync[title] = new_sync.id + + # Create SyncChannels and PostMeta + for sc_entry in sync_channels_export: + sync_title = sc_entry.get("sync_title") + channel_id = sc_entry.get("channel_id") + status = sc_entry.get("status", "active") + sync_id = title_to_sync.get(sync_title) + if not sync_id: + continue + new_sc = schemas.SyncChannel( + sync_id=sync_id, + workspace_id=workspace_id, + channel_id=channel_id, + status=status, + created_at=datetime.now(UTC), + ) + DbManager.create_record(new_sc) + key = f"{sync_title}:{channel_id}" + for pm in post_meta_export.get(key, []): + DbManager.create_record(schemas.PostMeta( + post_id=pm["post_id"], + sync_channel_id=new_sc.id, + ts=Decimal(str(pm["ts"])), + )) + + # user_directory for W (replace: remove existing for this workspace then insert) + DbManager.delete_records( + schemas.UserDirectory, + [schemas.UserDirectory.workspace_id == workspace_id], + ) + for u in user_directory_export: + DbManager.create_record(schemas.UserDirectory( + workspace_id=workspace_id, + slack_user_id=u["slack_user_id"], + email=u.get("email"), + real_name=u.get("real_name"), + display_name=u.get("display_name"), + normalized_name=u.get("normalized_name"), + updated_at=datetime.fromisoformat(u["updated_at"].replace("Z", "+00:00")) if u.get("updated_at") else datetime.now(UTC), + )) + + # user_mappings where both source and target workspace exist on B + for um in user_mappings_export: + src_team = um.get("source_team_id") + tgt_team = um.get("target_team_id") + src_ws_id = team_id_to_workspace_id.get(src_team) if src_team else None + tgt_ws_id = team_id_to_workspace_id.get(tgt_team) if tgt_team else None + if not src_ws_id or not tgt_ws_id: + continue + existing = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.source_workspace_id == src_ws_id, + schemas.UserMapping.source_user_id == um["source_user_id"], + schemas.UserMapping.target_workspace_id == tgt_ws_id, + ], + ) + if existing: + continue + DbManager.create_record(schemas.UserMapping( + source_workspace_id=src_ws_id, + source_user_id=um["source_user_id"], + target_workspace_id=tgt_ws_id, + target_user_id=um.get("target_user_id"), + match_method=um.get("match_method", "none"), + matched_at=datetime.now(UTC), + group_id=group_id, + )) diff --git a/syncbot/helpers/files.py b/syncbot/helpers/files.py new file mode 100644 index 0000000..543b520 --- /dev/null +++ b/syncbot/helpers/files.py @@ -0,0 +1,279 @@ +"""File upload/download helpers for message sync.""" + +import contextlib +import logging +import os +import re +import time as _time +import uuid +from logging import Logger + +import requests +from slack_sdk import WebClient + +import constants + +_logger = logging.getLogger(__name__) + +_DOWNLOAD_TIMEOUT = 30 # seconds +_MAX_FILE_BYTES = 100 * 1024 * 1024 # 100 MB +_STREAM_CHUNK = 8192 + + +def cleanup_temp_files(photos: list[dict] | None, direct_files: list[dict] | None) -> None: + """Remove temporary files created during message sync.""" + for item in photos or []: + path = item.get("path") + if path: + with contextlib.suppress(OSError): + os.remove(path) + for item in direct_files or []: + path = item.get("path") + if path: + with contextlib.suppress(OSError): + os.remove(path) + + +def _safe_file_parts(f: dict) -> tuple[str, str, str]: + """Return ``(safe_id, safe_ext, default_name)`` with path-safe characters only.""" + safe_id = re.sub(r"[^a-zA-Z0-9_-]", "", f.get("id", "file")) + safe_ext = re.sub(r"[^a-zA-Z0-9]", "", f.get("filetype", "bin")) + return safe_id, safe_ext, f"{safe_id}.{safe_ext}" + + +def _download_to_file(url: str, file_path: str, headers: dict | None = None) -> None: + """Stream a URL to disk, aborting if the response exceeds *_MAX_FILE_BYTES*. + + Removes the partial file on any failure so /tmp doesn't fill up. + """ + try: + with requests.get(url, headers=headers, timeout=_DOWNLOAD_TIMEOUT, stream=True) as r: + r.raise_for_status() + written = 0 + with open(file_path, "wb") as fh: + for chunk in r.iter_content(chunk_size=_STREAM_CHUNK): + written += len(chunk) + if written > _MAX_FILE_BYTES: + raise ValueError(f"File exceeds {_MAX_FILE_BYTES} byte limit") + fh.write(chunk) + except Exception: + with contextlib.suppress(OSError): + os.remove(file_path) + raise + + +def _get_s3_client(): + """Return a reusable boto3 S3 client (created once per call-site).""" + import boto3 + + if constants.LOCAL_DEVELOPMENT: + return boto3.client( + "s3", + aws_access_key_id=os.environ[constants.AWS_ACCESS_KEY_ID], + aws_secret_access_key=os.environ[constants.AWS_SECRET_ACCESS_KEY], + ) + return boto3.client("s3") + + +def upload_photos(files: list[dict], client: WebClient, logger: Logger) -> list[dict]: + """Download file attachments from Slack and upload them to S3. + + Images are optionally converted from HEIC to PNG. + """ + uploaded: list[dict] = [] + s3_client = _get_s3_client() + auth_headers = {"Authorization": f"Bearer {client.token}"} + + for f in files: + try: + is_image = f.get("mimetype", "").startswith("image") + + if is_image: + download_url = ( + f.get("thumb_480") or f.get("thumb_360") + or f.get("thumb_80") or f.get("url_private") + ) + else: + download_url = f.get("url_private") + if not download_url: + continue + + safe_id, safe_ext, file_name = _safe_file_parts(f) + file_path = f"/tmp/{file_name}" + file_mimetype = f.get("mimetype", "application/octet-stream") + + _download_to_file(download_url, file_path, headers=auth_headers) + + if is_image and f.get("filetype") == "heic": + from PIL import Image + from pillow_heif import register_heif_opener + + register_heif_opener() + heic_img = Image.open(file_path) + x, y = heic_img.size + coeff = min(constants.MAX_HEIF_SIZE / max(x, y), 1) + heic_img = heic_img.resize((int(x * coeff), int(y * coeff))) + heic_img.save(file_path.replace(".heic", ".png"), quality=95, optimize=True, format="PNG") + os.remove(file_path) + file_path = file_path.replace(".heic", ".png") + file_name = file_name.replace(".heic", ".png") + file_mimetype = "image/png" + + with open(file_path, "rb") as fh: + s3_client.upload_fileobj( + fh, constants.S3_IMAGE_BUCKET, file_name, ExtraArgs={"ContentType": file_mimetype} + ) + uploaded.append( + { + "url": f"{constants.S3_IMAGE_URL}{file_name}", + "name": file_name, + "path": file_path, + } + ) + except Exception as e: + logger.error(f"Error uploading file: {e}") + return uploaded + + +def download_public_file(url: str, logger: Logger) -> dict | None: + """Download a file from a public URL (e.g. GIPHY) to /tmp.""" + try: + content_type = "image/gif" + file_name = f"attachment_{uuid.uuid4().hex[:8]}.gif" + file_path = f"/tmp/{file_name}" + + with requests.get(url, timeout=_DOWNLOAD_TIMEOUT, stream=True) as r: + r.raise_for_status() + content_type = r.headers.get("content-type", "image/gif").split(";")[0] + ext = content_type.split("/")[-1] if "/" in content_type else "gif" + file_name = f"attachment_{uuid.uuid4().hex[:8]}.{ext}" + file_path = f"/tmp/{file_name}" + written = 0 + with open(file_path, "wb") as fh: + for chunk in r.iter_content(chunk_size=_STREAM_CHUNK): + written += len(chunk) + if written > _MAX_FILE_BYTES: + raise ValueError(f"File exceeds {_MAX_FILE_BYTES} byte limit") + fh.write(chunk) + + return {"path": file_path, "name": file_name, "mimetype": content_type} + except Exception as e: + logger.warning(f"download_public_file: failed for {url}: {e}") + return None + + +def download_slack_files( + files: list[dict], client: WebClient, logger: Logger +) -> list[dict]: + """Download files from Slack to /tmp for direct re-upload.""" + downloaded: list[dict] = [] + auth_headers = {"Authorization": f"Bearer {client.token}"} + + for f in files: + try: + url = f.get("url_private") + if not url: + continue + + safe_id, safe_ext, default_name = _safe_file_parts(f) + file_name = f.get("name") or default_name + file_path = f"/tmp/{safe_id}.{safe_ext}" + + _download_to_file(url, file_path, headers=auth_headers) + + downloaded.append({ + "path": file_path, + "name": file_name, + "mimetype": f.get("mimetype", "application/octet-stream"), + }) + except Exception as e: + logger.error(f"download_slack_files: failed for {f.get('id')}: {e}") + return downloaded + + +def upload_files_to_slack( + bot_token: str, + channel_id: str, + files: list[dict], + initial_comment: str | None = None, + thread_ts: str | None = None, +) -> tuple[dict | None, str | None]: + """Upload one or more local files directly to a Slack channel.""" + if not files: + return None, None + + slack_client = WebClient(bot_token) + file_uploads = [] + for f in files: + file_uploads.append({ + "file": f["path"], + "filename": f["name"], + }) + + kwargs: dict = {"channel": channel_id} + if initial_comment: + kwargs["initial_comment"] = initial_comment + if thread_ts: + kwargs["thread_ts"] = thread_ts + + try: + if len(file_uploads) == 1: + kwargs["file"] = file_uploads[0]["file"] + kwargs["filename"] = file_uploads[0]["filename"] + res = slack_client.files_upload_v2(**kwargs) + else: + kwargs["file_uploads"] = file_uploads + res = slack_client.files_upload_v2(**kwargs) + + msg_ts = _extract_file_message_ts(slack_client, res, channel_id, thread_ts=thread_ts) + return res, msg_ts + except Exception as e: + _logger.warning(f"upload_files_to_slack: failed for channel {channel_id}: {e}") + return None, None + + +def _extract_file_message_ts( + client: WebClient, upload_response, channel_id: str, + thread_ts: str | None = None, +) -> str | None: + """Extract the message ts created by a file upload.""" + if not upload_response: + return None + + file_id = None + with contextlib.suppress(KeyError, TypeError, IndexError): + file_id = upload_response["file"]["id"] + + if not file_id: + try: + files_list = upload_response["files"] + if files_list and len(files_list) > 0: + file_id = files_list[0]["id"] if isinstance(files_list[0], dict) else files_list[0].get("id") + except (KeyError, TypeError, IndexError): + pass + + if not file_id: + _logger.warning("_extract_file_message_ts: could not find file_id in upload response") + return None + + for attempt in range(4): + try: + info_resp = client.files_info(file=file_id) + shares = info_resp["file"]["shares"] + for share_type in ("public", "private"): + channel_shares = shares.get(share_type, {}).get(channel_id, []) + if channel_shares: + ts = channel_shares[0].get("ts") + _logger.info("_extract_file_message_ts: success", + extra={"file_id": file_id, "ts": ts, "attempt": attempt}) + return ts + except (KeyError, TypeError, IndexError): + pass + except Exception as e: + _logger.warning(f"_extract_file_message_ts: files.info error (attempt {attempt}): {e}") + + if attempt < 3: + _time.sleep(1.5) + + _logger.warning(f"_extract_file_message_ts: could not resolve ts for file {file_id} after retries") + return None diff --git a/syncbot/helpers/notifications.py b/syncbot/helpers/notifications.py new file mode 100644 index 0000000..11243b2 --- /dev/null +++ b/syncbot/helpers/notifications.py @@ -0,0 +1,226 @@ +"""Admin DM notifications and channel notifications.""" + +import logging +from datetime import UTC, datetime + +from slack_sdk import WebClient +from sqlalchemy.exc import ProgrammingError + +import constants +from db import DbManager, schemas +from helpers._cache import _cache_get, _cache_set +from helpers.core import safe_get +from helpers.encryption import decrypt_bot_token + +_logger = logging.getLogger(__name__) + + +def get_admin_ids( + client: WebClient, + *, + team_id: str | None = None, + context: dict | None = None, +) -> list[str]: + """Return a list of admin/owner user IDs for the workspace behind *client*. + + If *context* and *team_id* are provided, uses request-scoped cache to avoid + repeated users.list for the same workspace within one request. + """ + if context is not None and team_id: + cache = context.setdefault("_admin_ids", {}) + if team_id in cache: + return cache[team_id] + + from helpers.user_matching import _users_list_page + + cursor = "" + admin_ids: list[str] = [] + + while True: + try: + res = _users_list_page(client, cursor=cursor) + except Exception as e: + _logger.warning(f"get_admin_ids: failed to list users: {e}") + break + + members = safe_get(res, "members") or [] + for member in members: + if member.get("is_bot") or member.get("id") == "USLACKBOT": + continue + if member.get("deleted"): + continue + if member.get("is_admin") or member.get("is_owner"): + admin_ids.append(member["id"]) + + next_cursor = safe_get(res, "response_metadata", "next_cursor") + if not next_cursor: + break + cursor = next_cursor + + if context is not None and team_id: + context.setdefault("_admin_ids", {})[team_id] = admin_ids + return admin_ids + + +def notify_admins_dm( + client: WebClient, + message: str, + exclude_user_ids: set[str] | None = None, + blocks: list[dict] | None = None, +) -> int: + """Send a DM to all workspace admins/owners. Best-effort. + + Returns the number of admins successfully notified. + """ + notified = 0 + kwargs: dict = {"text": message} + if blocks: + kwargs["blocks"] = blocks + for user_id in get_admin_ids(client): + if exclude_user_ids and user_id in exclude_user_ids: + continue + try: + dm = client.conversations_open(users=[user_id]) + channel_id = safe_get(dm, "channel", "id") + if channel_id: + client.chat_postMessage(channel=channel_id, **kwargs) + notified += 1 + except Exception as e: + _logger.warning(f"notify_admins_dm: failed to DM user {user_id}: {e}") + + return notified + + +def notify_admins_dm_blocks( + client: WebClient, + text: str, + blocks: list[dict], +) -> list[dict]: + """Send a Block Kit DM to all workspace admins/owners. + + Returns a list of ``{"channel": ..., "ts": ...}`` dicts for each + successfully sent DM (used for later message updates). + """ + sent: list[dict] = [] + for user_id in get_admin_ids(client): + try: + dm = client.conversations_open(users=[user_id]) + channel_id = safe_get(dm, "channel", "id") + if channel_id: + res = client.chat_postMessage(channel=channel_id, text=text, blocks=blocks) + msg_ts = safe_get(res, "ts") + if msg_ts: + sent.append({"channel": channel_id, "ts": msg_ts}) + except Exception as e: + _logger.warning(f"notify_admins_dm_blocks: failed to DM user {user_id}: {e}") + + return sent + + +def save_dm_messages_to_group_member(member_id: int, dm_entries: list[dict]) -> None: + """Persist DM channel/ts metadata on a group member record for later updates.""" + import json as _json + + if not dm_entries: + return + existing = DbManager.get_record(schemas.WorkspaceGroupMember, id=member_id) + if not existing: + return + try: + prev = _json.loads(existing.dm_messages) if existing.dm_messages else [] + except (ValueError, TypeError): + prev = [] + prev.extend(dm_entries) + DbManager.update_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == member_id], + {schemas.WorkspaceGroupMember.dm_messages: _json.dumps(prev)}, + ) + + +def notify_synced_channels(client: WebClient, channel_ids: list[str], message: str) -> int: + """Post a message to a list of channels. Best-effort.""" + notified = 0 + for channel_id in channel_ids: + try: + client.chat_postMessage(channel=channel_id, text=message) + notified += 1 + except Exception as e: + _logger.warning(f"notify_synced_channels: failed to post to {channel_id}: {e}") + return notified + + +def purge_stale_soft_deletes() -> int: + """Permanently delete workspaces that have been soft-deleted beyond the retention period. + + Returns 0 without raising if the schema is missing (e.g. fresh DB before init.sql). + """ + from helpers.workspace import get_workspace_by_id + + cache_key = "purge_check" + if _cache_get(cache_key): + return 0 + _cache_set(cache_key, True, ttl=86400) + + retention_days = constants.SOFT_DELETE_RETENTION_DAYS + cutoff = datetime.now(UTC) - __import__("datetime").timedelta(days=retention_days) + + try: + stale_workspaces = DbManager.find_records( + schemas.Workspace, + [ + schemas.Workspace.deleted_at.isnot(None), + schemas.Workspace.deleted_at < cutoff, + ], + ) + except ProgrammingError as e: + _logger.debug("purge_stale_soft_deletes: schema not ready (%s), skipping", e.orig if hasattr(e, "orig") else e) + return 0 + + if not stale_workspaces: + return 0 + + purged = 0 + for ws in stale_workspaces: + ws_name = ws.workspace_name or ws.team_id or f"Workspace {ws.id}" + + group_memberships = DbManager.find_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.workspace_id == ws.id], + ) + + notified_ws: set[int] = set() + for membership in group_memberships: + other_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == membership.group_id, + schemas.WorkspaceGroupMember.workspace_id != ws.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + for m in other_members: + if not m.workspace_id or m.workspace_id in notified_ws: + continue + partner = get_workspace_by_id(m.workspace_id) + if not partner or not partner.bot_token or partner.deleted_at is not None: + continue + notified_ws.add(m.workspace_id) + try: + partner_client = WebClient(token=decrypt_bot_token(partner.bot_token)) + notify_admins_dm( + partner_client, + f":wastebasket: *{ws_name}* has been permanently removed " + f"after {retention_days} days of inactivity.", + ) + except Exception as e: + _logger.warning(f"purge: failed to notify partner {m.workspace_id}: {e}") + + DbManager.delete_records(schemas.Workspace, [schemas.Workspace.id == ws.id]) + purged += 1 + + if purged: + _logger.info("purge_stale_soft_deletes_complete", extra={"purged": purged}) + + return purged diff --git a/syncbot/helpers/oauth.py b/syncbot/helpers/oauth.py new file mode 100644 index 0000000..cbdbde4 --- /dev/null +++ b/syncbot/helpers/oauth.py @@ -0,0 +1,62 @@ +"""Slack OAuth flow construction.""" + +import logging +import os + +from slack_bolt.adapter.aws_lambda.lambda_s3_oauth_flow import LambdaS3OAuthFlow +from slack_bolt.oauth import OAuthFlow +from slack_bolt.oauth.oauth_settings import OAuthSettings +from slack_sdk.oauth.installation_store import FileInstallationStore +from slack_sdk.oauth.state_store import FileOAuthStateStore + +import constants + +_logger = logging.getLogger(__name__) + + +def get_oauth_flow(): + """Build the Slack OAuth flow, choosing the right backend. + + - **Production (Lambda)**: Uses S3-backed stores. + - **Local development with OAuth credentials**: Uses file-based stores. + - **Local development without OAuth credentials**: Returns *None*. + """ + client_id = os.environ.get(constants.SLACK_CLIENT_ID, "").strip() + client_secret = os.environ.get(constants.SLACK_CLIENT_SECRET, "").strip() + scopes_raw = os.environ.get(constants.SLACK_SCOPES, "").strip() + + if constants.LOCAL_DEVELOPMENT: + if not (client_id and client_secret and scopes_raw): + _logger.info("OAuth credentials not set — running in single-workspace mode") + return None + + _logger.info("OAuth credentials found — enabling local OAuth flow (file-based stores)") + base_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), ".oauth-data") + os.makedirs(base_dir, exist_ok=True) + + return OAuthFlow( + settings=OAuthSettings( + client_id=client_id, + client_secret=client_secret, + scopes=scopes_raw.split(","), + installation_store=FileInstallationStore( + base_dir=os.path.join(base_dir, "installations"), + client_id=client_id, + ), + state_store=FileOAuthStateStore( + expiration_seconds=600, + base_dir=os.path.join(base_dir, "states"), + client_id=client_id, + ), + ), + ) + else: + return LambdaS3OAuthFlow( + oauth_state_bucket_name=os.environ[constants.SLACK_STATE_S3_BUCKET_NAME], + installation_bucket_name=os.environ[constants.SLACK_INSTALLATION_S3_BUCKET_NAME], + settings=OAuthSettings( + client_id=client_id, + client_secret=client_secret, + scopes=scopes_raw.split(","), + ), + ) diff --git a/syncbot/helpers/refresh.py b/syncbot/helpers/refresh.py new file mode 100644 index 0000000..9af7c8b --- /dev/null +++ b/syncbot/helpers/refresh.py @@ -0,0 +1,83 @@ +"""Shared helpers for Refresh-button flows (Home tab and User Mapping). + +Provides a single place for cooldown message text, block injection, +and the hash/cache/cooldown check so both handlers stay DRY. +""" + +import time +from typing import Literal + +import constants +from helpers._cache import _cache_get, _cache_set + +_REFRESH_COOLDOWN_SECONDS = getattr(constants, "REFRESH_COOLDOWN_SECONDS", 60) + + +def cooldown_message_block(remaining_seconds: int) -> dict: + """Return a Block Kit context block dict for the refresh cooldown message.""" + text = ( + f"No new data. Wait {remaining_seconds} second{'s' if remaining_seconds != 1 else ''} " + "before refreshing again." + ) + return { + "type": "context", + "elements": [{"type": "mrkdwn", "text": text}], + } + + +def inject_cooldown_message( + cached_blocks: list, + after_block_index: int, + remaining_seconds: int, +) -> list: + """Insert the cooldown message block after the given block index. Does not mutate cached_blocks.""" + msg_block = cooldown_message_block(remaining_seconds) + i = after_block_index + 1 + return cached_blocks[:i] + [msg_block] + cached_blocks[i:] + + +def refresh_cooldown_check( + current_hash: str, + hash_key: str, + blocks_key: str, + refresh_at_key: str, + cooldown_seconds: int | None = None, +) -> tuple[Literal["cooldown", "cached", "full"], list | None, int | None]: + """Check whether we can short-circuit based on hash and cooldown. + + Returns: + ("cooldown", cached_blocks, remaining_seconds) when hash matches and within cooldown. + ("cached", cached_blocks, None) when hash matches and past cooldown. + ("full", None, None) when hash differs or no cached blocks. + """ + cooldown_sec = cooldown_seconds if cooldown_seconds is not None else _REFRESH_COOLDOWN_SECONDS + + cached_hash = _cache_get(hash_key) + cached_blocks = _cache_get(blocks_key) + last_refresh_at = _cache_get(refresh_at_key) + now = time.monotonic() + + if current_hash != cached_hash or cached_blocks is None: + return ("full", None, None) + + if last_refresh_at is not None and (now - last_refresh_at) < cooldown_sec: + remaining = max(0, int(cooldown_sec - (now - last_refresh_at))) + return ("cooldown", cached_blocks, remaining) + + return ("cached", cached_blocks, None) + + +def refresh_after_full( + hash_key: str, + blocks_key: str, + refresh_at_key: str, + current_hash: str, + block_dicts: list, + cooldown_seconds: int | None = None, +) -> None: + """Store hash, blocks, and refresh timestamp after a full refresh.""" + cooldown_sec = cooldown_seconds if cooldown_seconds is not None else _REFRESH_COOLDOWN_SECONDS + + _cache_set(hash_key, current_hash, ttl=3600) + _cache_set(blocks_key, block_dicts, ttl=3600) + _cache_set(refresh_at_key, time.monotonic(), ttl=cooldown_sec * 2) diff --git a/syncbot/helpers/slack_api.py b/syncbot/helpers/slack_api.py new file mode 100644 index 0000000..68d11d4 --- /dev/null +++ b/syncbot/helpers/slack_api.py @@ -0,0 +1,220 @@ +"""Slack API wrappers with automatic retry and rate-limit handling.""" + +import json +import logging +import time as _time +from functools import wraps + +from slack_sdk import WebClient +from slack_sdk.errors import SlackApiError + +from db import DbManager, schemas +from helpers._cache import _USER_INFO_CACHE_TTL, _cache_get, _cache_set +from helpers.core import safe_get + +_logger = logging.getLogger(__name__) + +_SLACK_MAX_RETRIES = 3 +_SLACK_INITIAL_BACKOFF = 1.0 # seconds + + +def slack_retry(fn): + """Decorator that retries Slack API calls on rate-limit and server errors.""" + + @wraps(fn) + def wrapper(*args, **kwargs): + last_exc: Exception | None = None + backoff = _SLACK_INITIAL_BACKOFF + + for attempt in range(_SLACK_MAX_RETRIES + 1): + try: + return fn(*args, **kwargs) + except SlackApiError as exc: + last_exc = exc + status = exc.response.status_code if exc.response else 0 + + if status == 429: + retry_after = float(exc.response.headers.get("Retry-After", backoff)) + _logger.warning(f"{fn.__name__} rate-limited (attempt {attempt + 1}), sleeping {retry_after:.1f}s") + _time.sleep(retry_after) + backoff = min(backoff * 2, 30) + elif 500 <= status < 600: + _logger.warning( + f"{fn.__name__} server error {status} (attempt {attempt + 1}), retrying in {backoff:.1f}s" + ) + _time.sleep(backoff) + backoff = min(backoff * 2, 30) + else: + raise + raise last_exc + + return wrapper + + +@slack_retry +def _users_info(client: WebClient, user_id: str) -> dict: + """Low-level wrapper so the retry decorator can catch SlackApiError.""" + return client.users_info(user=user_id) + + +def _get_auth_info(client: WebClient) -> dict | None: + """Call ``auth.test`` once and cache both bot_id and user_id.""" + cache_key = "own_auth_info" + cached = _cache_get(cache_key) + if cached is not None: + return cached + try: + res = client.auth_test() + info = {"bot_id": safe_get(res, "bot_id"), "user_id": safe_get(res, "user_id")} + _cache_set(cache_key, info, ttl=3600) + return info + except Exception: + _logger.warning("Could not determine own identity via auth.test") + return None + + +def get_own_bot_id(client: WebClient, context: dict) -> str | None: + """Return SyncBot's own ``bot_id`` for the current workspace.""" + bot_id = context.get("bot_id") + if bot_id: + return bot_id + info = _get_auth_info(client) + return info["bot_id"] if info else None + + +def get_own_bot_user_id(client: WebClient) -> str | None: + """Return SyncBot's own *user* ID (``U…``) for the current workspace.""" + info = _get_auth_info(client) + return info["user_id"] if info else None + + +def get_bot_info_from_event(body: dict) -> tuple[str | None, str | None]: + """Extract display name and icon URL from a bot_message event.""" + event = body.get("event", {}) + bot_name = event.get("username") or "Bot" + icons = event.get("icons") or {} + icon_url = icons.get("image_48") or icons.get("image_36") or icons.get("image_72") + return bot_name, icon_url + + +def get_user_info(client: WebClient, user_id: str) -> tuple[str | None, str | None]: + """Return (display_name, profile_image_url) for a Slack user.""" + cache_key = f"user_info:{user_id}" + cached = _cache_get(cache_key) + if cached is not None: + return cached + + try: + res = _users_info(client, user_id) + except SlackApiError as exc: + _logger.debug(f"get_user_info: failed to look up user {user_id}: {exc}") + return None, None + + user_name = ( + safe_get(res, "user", "profile", "display_name") or safe_get(res, "user", "profile", "real_name") or None + ) + user_profile_url = safe_get(res, "user", "profile", "image_192") + + result = (user_name, user_profile_url) + _cache_set(cache_key, result, ttl=_USER_INFO_CACHE_TTL) + return result + + +@slack_retry +def post_message( + bot_token: str, + channel_id: str, + msg_text: str, + user_name: str | None = None, + user_profile_url: str | None = None, + thread_ts: str | None = None, + update_ts: str | None = None, + workspace_name: str | None = None, + blocks: list[dict] | None = None, +) -> dict: + """Post or update a message in a Slack channel.""" + slack_client = WebClient(bot_token) + posted_from = f"({workspace_name})" if workspace_name else "(via SyncBot)" + if blocks: + if msg_text.strip(): + msg_block = {"type": "section", "text": {"type": "mrkdwn", "text": msg_text}} + all_blocks = [msg_block] + blocks + else: + all_blocks = blocks + else: + all_blocks = [] + fallback_text = msg_text if msg_text.strip() else "Shared an image" + if update_ts: + res = slack_client.chat_update( + channel=channel_id, + text=fallback_text, + ts=update_ts, + blocks=all_blocks, + ) + else: + res = slack_client.chat_postMessage( + channel=channel_id, + text=fallback_text, + username=f"{user_name} {posted_from}", + icon_url=user_profile_url, + thread_ts=thread_ts, + blocks=all_blocks, + ) + return res + + +def get_post_records(thread_ts: str) -> list[tuple[schemas.PostMeta, schemas.SyncChannel, schemas.Workspace]]: + """Look up all PostMeta records that share the same ``post_id``.""" + post = DbManager.find_records(schemas.PostMeta, [schemas.PostMeta.ts == float(thread_ts)]) + if post: + post_records = DbManager.find_join_records3( + left_cls=schemas.PostMeta, + right_cls1=schemas.SyncChannel, + right_cls2=schemas.Workspace, + filters=[ + schemas.PostMeta.post_id == post[0].post_id, + schemas.SyncChannel.status == "active", + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + else: + post_records = [] + return post_records + + +@slack_retry +def delete_message(bot_token: str, channel_id: str, ts: str) -> dict: + """Delete a message from a Slack channel.""" + slack_client = WebClient(bot_token) + res = slack_client.chat_delete( + channel=channel_id, + ts=ts, + ) + return res + + +def update_modal( + blocks: list[dict], + client: WebClient, + view_id: str, + title_text: str, + callback_id: str, + submit_button_text: str = "Submit", + parent_metadata: dict | None = None, + close_button_text: str = "Close", + notify_on_close: bool = False, +) -> None: + """Replace the contents of an existing Slack modal.""" + view = { + "type": "modal", + "callback_id": callback_id, + "title": {"type": "plain_text", "text": title_text}, + "submit": {"type": "plain_text", "text": submit_button_text}, + "close": {"type": "plain_text", "text": close_button_text}, + "notify_on_close": notify_on_close, + "blocks": blocks, + } + if parent_metadata: + view["private_metadata"] = json.dumps(parent_metadata) + + client.views_update(view_id=view_id, view=view) diff --git a/syncbot/helpers/user_matching.py b/syncbot/helpers/user_matching.py new file mode 100644 index 0000000..a12b0f8 --- /dev/null +++ b/syncbot/helpers/user_matching.py @@ -0,0 +1,628 @@ +"""Cross-workspace user matching and mention resolution.""" + +import logging +import re +from datetime import UTC, datetime +from typing import Any + +from slack_sdk import WebClient +from slack_sdk.errors import SlackApiError + +import constants +from db import DbManager, schemas +from helpers._cache import _CACHE, _USER_INFO_CACHE_TTL, _cache_get, _cache_set +from helpers.core import safe_get +from helpers.encryption import decrypt_bot_token +from helpers.slack_api import _users_info, get_user_info, slack_retry +from helpers.workspace import ( + get_workspace_by_id, + resolve_workspace_name, +) + +_logger = logging.getLogger(__name__) + + +def _get_user_profile(client: WebClient, user_id: str) -> dict[str, Any] | None: + """Fetch a single user's profile with caching and retry.""" + cache_key = f"user_profile:{user_id}" + cached = _cache_get(cache_key) + if cached is not None: + return cached + + try: + res = _users_info(client, user_id) + except SlackApiError as exc: + _logger.warning(f"Failed to look up user {user_id}: {exc}") + return None + + profile = safe_get(res, "user", "profile") or {} + user_name = profile.get("display_name") or profile.get("real_name") or user_id + email = profile.get("email") + + result: dict[str, Any] = {"user_name": user_name, "email": email} + _cache_set(cache_key, result, ttl=_USER_INFO_CACHE_TTL) + return result + + +def _normalize_name(display_name: str) -> str: + """Trim trailing title/qualifier from a display name (e.g. drop text in parens or after dash).""" + name = re.split(r"\s+[\(\-]", display_name or "")[0] + return name.strip() + + +def normalize_display_name(name: str | None) -> str: + """Return display name with trailing paren/dash qualifiers stripped; fallback to original if empty.""" + if not name: + return name or "" + n = _normalize_name(name) + return n if n else name + + +def _match_ttl(method: str) -> int: + """Return the TTL in seconds for a given match method.""" + if method == "manual": + return 0 + if method == "email": + return constants.MATCH_TTL_EMAIL + if method == "name": + return constants.MATCH_TTL_NAME + return constants.MATCH_TTL_NONE + + +def _is_mapping_fresh(mapping: schemas.UserMapping) -> bool: + """Return True if a cached mapping is still within its TTL.""" + if mapping.match_method == "manual": + return True + ttl = _match_ttl(mapping.match_method) + age = (datetime.now(UTC) - mapping.matched_at.replace(tzinfo=UTC)).total_seconds() + return age < ttl + + +@slack_retry +def _users_list_page(client: WebClient, cursor: str = "") -> dict: + """Fetch one page of users.list (with retry on rate-limit).""" + return client.users_list(limit=200, cursor=cursor) + + +def _refresh_user_directory(client: WebClient, workspace_id: int) -> None: + """Crawl users.list for a workspace and upsert into user_directory. + + Active users are upserted normally. Deactivated users + (``member["deleted"] == True``) are soft-deleted via + ``_upsert_single_user_to_directory``. Users that were previously + in the directory but no longer appear in ``users.list`` at all are + hard-deleted along with their mappings. + """ + cache_key = f"dir_refresh:{workspace_id}" + if _cache_get(cache_key): + return + + _logger.info("user_directory_refresh_start", extra={"workspace_id": workspace_id}) + cursor = "" + count = 0 + seen_user_ids: set[str] = set() + + while True: + res = _users_list_page(client, cursor=cursor) + members = safe_get(res, "members") or [] + + for member in members: + if member.get("is_bot") or member.get("id") == "USLACKBOT": + continue + seen_user_ids.add(member["id"]) + _upsert_single_user_to_directory(member, workspace_id) + count += 1 + + cursor = safe_get(res, "response_metadata", "next_cursor") or "" + if not cursor: + break + + if seen_user_ids: + all_entries = DbManager.find_records( + schemas.UserDirectory, + [schemas.UserDirectory.workspace_id == workspace_id], + ) + for entry in all_entries: + if entry.slack_user_id not in seen_user_ids: + _purge_mappings_for_user(entry.slack_user_id, workspace_id) + DbManager.delete_records( + schemas.UserDirectory, + [schemas.UserDirectory.id == entry.id], + ) + + _logger.info("user_directory_refresh_done", extra={"workspace_id": workspace_id, "count": count}) + _cache_set(cache_key, True, ttl=constants.USER_DIR_REFRESH_TTL) + + +def _upsert_single_user_to_directory(member: dict, workspace_id: int) -> None: + """Insert or update a single user in the directory and propagate name changes. + + If the user is deactivated (``member["deleted"] == True``), their + directory entry is soft-deleted and all associated user mappings are + removed. + """ + profile = member.get("profile", {}) + display_name = profile.get("display_name") or "" + real_name = profile.get("real_name") or "" + email = profile.get("email") + now = datetime.now(UTC) + current_name = display_name or real_name + is_deleted = member.get("deleted", False) + + existing = DbManager.find_records( + schemas.UserDirectory, + [ + schemas.UserDirectory.workspace_id == workspace_id, + schemas.UserDirectory.slack_user_id == member["id"], + ], + ) + + if is_deleted: + if existing: + DbManager.update_records( + schemas.UserDirectory, + [schemas.UserDirectory.id == existing[0].id], + {schemas.UserDirectory.deleted_at: now, schemas.UserDirectory.updated_at: now}, + ) + _purge_mappings_for_user(member["id"], workspace_id) + _CACHE.pop(f"user_info:{member['id']}", None) + return + + if existing: + DbManager.update_records( + schemas.UserDirectory, + [schemas.UserDirectory.id == existing[0].id], + { + schemas.UserDirectory.email: email, + schemas.UserDirectory.real_name: real_name, + schemas.UserDirectory.display_name: display_name, + schemas.UserDirectory.normalized_name: _normalize_name(display_name) + if display_name + else _normalize_name(real_name), + schemas.UserDirectory.updated_at: now, + schemas.UserDirectory.deleted_at: None, + }, + ) + else: + DbManager.create_record( + schemas.UserDirectory( + workspace_id=workspace_id, + slack_user_id=member["id"], + email=email, + real_name=real_name, + display_name=display_name, + normalized_name=_normalize_name(display_name) if display_name else _normalize_name(real_name), + updated_at=now, + ) + ) + + if current_name: + mappings = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.source_workspace_id == workspace_id, + schemas.UserMapping.source_user_id == member["id"], + ], + ) + for m in mappings: + if m.source_display_name != current_name: + DbManager.update_records( + schemas.UserMapping, + [schemas.UserMapping.id == m.id], + {schemas.UserMapping.source_display_name: current_name}, + ) + + _CACHE.pop(f"user_info:{member['id']}", None) + + +def _purge_mappings_for_user(slack_user_id: str, workspace_id: int) -> None: + """Hard-delete all user mappings where this user is source or target.""" + DbManager.delete_records( + schemas.UserMapping, + [ + schemas.UserMapping.source_workspace_id == workspace_id, + schemas.UserMapping.source_user_id == slack_user_id, + ], + ) + DbManager.delete_records( + schemas.UserMapping, + [ + schemas.UserMapping.target_workspace_id == workspace_id, + schemas.UserMapping.target_user_id == slack_user_id, + ], + ) + + +@slack_retry +def _lookup_user_by_email(client: WebClient, email: str) -> str | None: + """Resolve a user ID from an email address in the target workspace.""" + res = client.users_lookupByEmail(email=email) + return safe_get(res, "user", "id") + + +def _find_user_match( + source_user_id: str, + source_profile: dict[str, Any], + target_client: WebClient, + target_workspace_id: int, +) -> tuple[str | None, str]: + """Run the matching algorithm for one source user against one target workspace.""" + email = source_profile.get("email") + + if email: + try: + target_uid = _lookup_user_by_email(target_client, email) + if target_uid: + return target_uid, "email" + except SlackApiError as exc: + _logger.debug(f"match_user: email lookup failed for {email}: {exc}") + + _refresh_user_directory(target_client, target_workspace_id) + + source_real = source_profile.get("real_name", "") + source_display = source_profile.get("display_name", "") + source_normalized = _normalize_name(source_display) if source_display else _normalize_name(source_real) + + if not source_normalized: + return None, "none" + + candidates = DbManager.find_records( + schemas.UserDirectory, + [ + schemas.UserDirectory.workspace_id == target_workspace_id, + schemas.UserDirectory.deleted_at.is_(None), + ], + ) + + name_matches = [ + c + for c in candidates + if c.normalized_name + and c.normalized_name.lower() == source_normalized.lower() + and c.real_name + and source_real + and c.real_name.lower() == source_real.lower() + ] + if len(name_matches) == 1: + return name_matches[0].slack_user_id, "name" + + if source_real: + real_only = [c for c in candidates if c.real_name and c.real_name.lower() == source_real.lower()] + if len(real_only) == 1: + return real_only[0].slack_user_id, "name" + + return None, "none" + + +def _get_source_profile_full(client: WebClient, user_id: str) -> dict[str, Any] | None: + """Fetch full profile fields needed for matching.""" + cache_key = f"user_profile_full:{user_id}" + cached = _cache_get(cache_key) + if cached is not None: + return cached + + try: + res = _users_info(client, user_id) + except SlackApiError as exc: + _logger.warning(f"Failed to look up user {user_id}: {exc}") + return None + + profile = safe_get(res, "user", "profile") or {} + result: dict[str, Any] = { + "display_name": profile.get("display_name") or "", + "real_name": profile.get("real_name") or "", + "email": profile.get("email"), + } + _cache_set(cache_key, result, ttl=_USER_INFO_CACHE_TTL) + return result + + +def get_mapped_target_user_id( + source_user_id: str, + source_workspace_id: int, + target_workspace_id: int, +) -> str | None: + """Return the mapped target user ID, or *None* if unmapped.""" + mappings = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.source_workspace_id == source_workspace_id, + schemas.UserMapping.source_user_id == source_user_id, + schemas.UserMapping.target_workspace_id == target_workspace_id, + schemas.UserMapping.target_user_id.isnot(None), + schemas.UserMapping.match_method != "none", + ], + ) + return mappings[0].target_user_id if mappings else None + + +def get_display_name_and_icon_for_synced_message( + source_user_id: str, + source_workspace_id: int, + source_display_name: str | None, + source_icon_url: str | None, + target_client: WebClient, + target_workspace_id: int, +) -> tuple[str | None, str | None]: + """Return (display_name, icon_url) to use when syncing a message into the target workspace. + + If the source user is mapped to a user in the target workspace, returns that + local user's display name and profile image so the synced message appears + under the name familiar to users in the target workspace. Otherwise + returns the source display name and icon. Display names are normalized + (text in parens or after a dash at the end is dropped); the app then + appends the remote workspace name in parens when posting. + """ + mapped_id = get_mapped_target_user_id(source_user_id, source_workspace_id, target_workspace_id) + if mapped_id: + local_name, local_icon = get_user_info(target_client, mapped_id) + if local_name: + return normalize_display_name(local_name), local_icon or source_icon_url + return normalize_display_name(source_display_name), source_icon_url + + +def resolve_mention_for_workspace( + source_client: WebClient, + source_user_id: str, + source_workspace_id: int, + target_client: WebClient, + target_workspace_id: int, +) -> str: + """Resolve a single @mention from source workspace to target workspace.""" + source_ws = get_workspace_by_id(source_workspace_id) + source_ws_name = resolve_workspace_name(source_ws) if source_ws else None + + def _unmapped_label(name: str) -> str: + if source_ws_name: + return f"[{name} ({source_ws_name})]" + return f"[{name}]" + + mappings = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.source_workspace_id == source_workspace_id, + schemas.UserMapping.source_user_id == source_user_id, + schemas.UserMapping.target_workspace_id == target_workspace_id, + ], + ) + + if mappings and _is_mapping_fresh(mappings[0]): + mapping = mappings[0] + if mapping.target_user_id: + return f"<@{mapping.target_user_id}>" + return _unmapped_label(mapping.source_display_name or source_user_id) + + source_profile = _get_source_profile_full(source_client, source_user_id) + if not source_profile: + return _unmapped_label(source_user_id) + + target_uid, method = _find_user_match(source_user_id, source_profile, target_client, target_workspace_id) + + display = source_profile.get("display_name") or source_profile.get("real_name") or source_user_id + now = datetime.now(UTC) + + if mappings: + DbManager.update_records( + schemas.UserMapping, + [schemas.UserMapping.id == mappings[0].id], + { + schemas.UserMapping.target_user_id: target_uid, + schemas.UserMapping.match_method: method, + schemas.UserMapping.source_display_name: display, + schemas.UserMapping.matched_at: now, + }, + ) + else: + DbManager.create_record( + schemas.UserMapping( + source_workspace_id=source_workspace_id, + source_user_id=source_user_id, + target_workspace_id=target_workspace_id, + target_user_id=target_uid, + match_method=method, + source_display_name=display, + matched_at=now, + group_id=None, + ) + ) + + if target_uid: + return f"<@{target_uid}>" + return _unmapped_label(display) + + +_MAX_MENTIONS = 50 + + +def parse_mentioned_users(msg_text: str, client: WebClient) -> list[dict[str, Any]]: + """Extract mentioned user IDs from a message and resolve their profiles.""" + user_ids = re.findall(r"<@(\w+)>", msg_text or "")[:_MAX_MENTIONS] + if not user_ids: + return [] + + results: list[dict[str, Any]] = [] + for uid in user_ids: + profile = _get_user_profile(client, uid) + if profile: + results.append({"user_id": uid, **profile}) + else: + results.append({"user_id": uid, "user_name": uid, "email": None}) + return results + + +def apply_mentioned_users( + msg_text: str, + source_client: WebClient, + target_client: WebClient, + mentioned_user_info: list[dict[str, Any]], + source_workspace_id: int, + target_workspace_id: int, +) -> str: + """Re-map @mentions from the source workspace to the target workspace.""" + msg_text = msg_text or "" + if not mentioned_user_info: + return msg_text + + replace_list: list[str] = [] + for user_info in mentioned_user_info: + uid = user_info.get("user_id", "") + try: + resolved = resolve_mention_for_workspace( + source_client=source_client, + source_user_id=uid, + source_workspace_id=source_workspace_id, + target_client=target_client, + target_workspace_id=target_workspace_id, + ) + replace_list.append(resolved) + except Exception as exc: + _logger.error(f"Failed to resolve mention for user {uid}: {exc}") + fallback = user_info.get("user_name") or uid + source_ws = get_workspace_by_id(source_workspace_id) if source_workspace_id else None + ws_label = resolve_workspace_name(source_ws) if source_ws else None + if ws_label: + replace_list.append(f"[{fallback} ({ws_label})]") + else: + replace_list.append(f"[{fallback}]") + + replace_iter = iter(replace_list) + return re.sub(r"<@\w+>", lambda _: next(replace_iter), msg_text) + + +def resolve_channel_references( + msg_text: str, + source_client: WebClient, + source_workspace: "schemas.Workspace | None" = None, +) -> str: + """Replace ``<#CHANNEL_ID>`` references with deep-linked channel names.""" + if not msg_text: + return msg_text + + channel_pattern = re.compile(r"<#(C[A-Z0-9]+)(?:\|[^>]*)?>") + matches = channel_pattern.findall(msg_text) + if not matches: + return msg_text + + team_id = getattr(source_workspace, "team_id", None) if source_workspace else None + ws_name = resolve_workspace_name(source_workspace) if source_workspace else None + + for ch_id in set(matches): + ch_name = ch_id + try: + info = source_client.conversations_info(channel=ch_id) + ch_name = safe_get(info, "channel", "name") or ch_id + except Exception: + pass + + if team_id and ch_name != ch_id: + deep_link = f"https://slack.com/app_redirect?channel={ch_id}&team={team_id}" + label = f"#{ch_name} ({ws_name})" if ws_name else f"#{ch_name}" + replacement = f"<{deep_link}|{label}>" + elif ch_name != ch_id: + label = f"#{ch_name} ({ws_name})" if ws_name else f"#{ch_name}" + replacement = label + else: + replacement = f"#{ch_id}" + + msg_text = channel_pattern.sub( + lambda m, _cid=ch_id, _rep=replacement: _rep if m.group(1) == _cid else m.group(0), + msg_text, + ) + + return msg_text + + +def seed_user_mappings(source_workspace_id: int, target_workspace_id: int, group_id: int | None = None) -> int: + """Create stub UserMapping records for all active users in the source directory.""" + directory = DbManager.find_records( + schemas.UserDirectory, + [schemas.UserDirectory.workspace_id == source_workspace_id, schemas.UserDirectory.deleted_at.is_(None)], + ) + + existing = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.source_workspace_id == source_workspace_id, + schemas.UserMapping.target_workspace_id == target_workspace_id, + ], + ) + existing_by_uid = {m.source_user_id: m for m in existing} + + now = datetime.now(UTC) + created = 0 + for entry in directory: + current_name = entry.display_name or entry.real_name + if entry.slack_user_id in existing_by_uid: + mapping = existing_by_uid[entry.slack_user_id] + if mapping.source_display_name != current_name: + DbManager.update_records( + schemas.UserMapping, + [schemas.UserMapping.id == mapping.id], + {schemas.UserMapping.source_display_name: current_name}, + ) + continue + DbManager.create_record( + schemas.UserMapping( + source_workspace_id=source_workspace_id, + source_user_id=entry.slack_user_id, + target_workspace_id=target_workspace_id, + target_user_id=None, + match_method="none", + source_display_name=current_name, + matched_at=now, + group_id=group_id, + ) + ) + created += 1 + + return created + + +def run_auto_match_for_workspace(target_client: WebClient, target_workspace_id: int) -> tuple[int, int]: + """Re-run auto-matching for all unmatched mappings targeting a workspace.""" + unmatched = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.target_workspace_id == target_workspace_id, + schemas.UserMapping.match_method == "none", + ], + ) + + _refresh_user_directory(target_client, target_workspace_id) + + newly_matched = 0 + still_unmatched = 0 + + for mapping in unmatched: + source_workspace = get_workspace_by_id(mapping.source_workspace_id) + if not source_workspace: + still_unmatched += 1 + continue + + source_client = WebClient(token=decrypt_bot_token(source_workspace.bot_token)) + source_profile = _get_source_profile_full(source_client, mapping.source_user_id) + if not source_profile: + still_unmatched += 1 + continue + + target_uid, method = _find_user_match( + mapping.source_user_id, source_profile, target_client, target_workspace_id + ) + + if target_uid: + display = source_profile.get("display_name") or source_profile.get("real_name") or mapping.source_user_id + DbManager.update_records( + schemas.UserMapping, + [schemas.UserMapping.id == mapping.id], + { + schemas.UserMapping.target_user_id: target_uid, + schemas.UserMapping.match_method: method, + schemas.UserMapping.source_display_name: display, + schemas.UserMapping.matched_at: datetime.now(UTC), + }, + ) + newly_matched += 1 + else: + still_unmatched += 1 + + return newly_matched, still_unmatched diff --git a/syncbot/helpers/workspace.py b/syncbot/helpers/workspace.py new file mode 100644 index 0000000..d92b106 --- /dev/null +++ b/syncbot/helpers/workspace.py @@ -0,0 +1,389 @@ +"""Workspace record management and name resolution.""" + +import logging + +from slack_sdk import WebClient + +from db import DbManager, schemas +from helpers._cache import _cache_get, _cache_set +from helpers.core import safe_get +from helpers.encryption import decrypt_bot_token, encrypt_bot_token + +_logger = logging.getLogger(__name__) + + +def get_sync_list(team_id: str, channel_id: str) -> list[tuple[schemas.SyncChannel, schemas.Workspace]]: + """Return every (SyncChannel, Workspace) pair that shares a sync with *channel_id*.""" + cache_key = f"sync_list:{channel_id}" + cached = _cache_get(cache_key) + if cached is not None: + return cached + + sync_channel_record = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.status == "active", + ], + ) + if sync_channel_record: + sync_channels = DbManager.find_join_records2( + left_cls=schemas.SyncChannel, + right_cls=schemas.Workspace, + filters=[ + schemas.SyncChannel.sync_id == sync_channel_record[0].sync_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.status == "active", + ], + ) + else: + sync_channels = [] + + _cache_set(cache_key, sync_channels) + return sync_channels + + +def get_federated_workspace(group_id: int, workspace_id: int) -> schemas.FederatedWorkspace | None: + """Return the federated workspace for a group membership, if one exists.""" + members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.workspace_id == workspace_id, + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + if not members or not members[0].federated_workspace_id: + return None + + fed_ws = DbManager.get_record(schemas.FederatedWorkspace, id=members[0].federated_workspace_id) + if not fed_ws or fed_ws.status != "active": + return None + + return fed_ws + + +def get_federated_workspace_for_sync(sync_id: int) -> schemas.FederatedWorkspace | None: + """Return the federated workspace for a sync, checking group membership.""" + sync = DbManager.get_record(schemas.Sync, id=sync_id) + if not sync or not sync.group_id: + return None + + fed_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == sync.group_id, + schemas.WorkspaceGroupMember.federated_workspace_id.isnot(None), + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.status == "active", + ], + ) + if not fed_members: + return None + + fed_ws = DbManager.get_record(schemas.FederatedWorkspace, id=fed_members[0].federated_workspace_id) + if not fed_ws or fed_ws.status != "active": + return None + + return fed_ws + + +def get_workspace_record(team_id: str, body: dict, context: dict, client: WebClient) -> schemas.Workspace: + """Fetch or create the Workspace record for a Slack workspace.""" + workspace_record: schemas.Workspace = DbManager.get_record(schemas.Workspace, id=team_id) + team_domain = safe_get(body, "team", "domain") + + if not workspace_record: + try: + team_info = client.team_info() + ws_name = team_info["team"]["name"] + except Exception as exc: + _logger.debug(f"get_workspace: team_info failed, falling back to domain: {exc}") + ws_name = team_domain + workspace_record: schemas.Workspace = DbManager.create_record( + schemas.Workspace( + team_id=team_id, + workspace_name=ws_name, + bot_token=encrypt_bot_token(context["bot_token"]), + ) + ) + elif workspace_record.deleted_at is not None: + workspace_record = _restore_workspace(workspace_record, context, client) + else: + _maybe_refresh_bot_token(workspace_record, context) + _maybe_refresh_workspace_name(workspace_record, client) + + return workspace_record + + +def _maybe_refresh_bot_token(workspace_record: schemas.Workspace, context: dict) -> None: + """Update the stored bot token if the OAuth flow provided a newer one.""" + new_token = safe_get(context, "bot_token") + if not new_token: + return + + encrypted_new = encrypt_bot_token(new_token) + if encrypted_new != workspace_record.bot_token: + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == workspace_record.id], + {schemas.Workspace.bot_token: encrypted_new}, + ) + workspace_record.bot_token = encrypted_new + _logger.info( + "bot_token_refreshed", + extra={"workspace_id": workspace_record.id, "team_id": workspace_record.team_id}, + ) + + +def _maybe_refresh_workspace_name(workspace_record: schemas.Workspace, client: WebClient) -> None: + """Refresh the stored workspace name from the Slack API (at most once per day).""" + cache_key = f"ws_name_refresh:{workspace_record.id}" + if _cache_get(cache_key): + return + + _cache_set(cache_key, True, ttl=86400) + + try: + team_info = client.team_info() + current_name = team_info["team"]["name"] + except Exception as exc: + _logger.debug(f"_maybe_refresh_workspace_name: team_info call failed: {exc}") + return + + if current_name and current_name != workspace_record.workspace_name: + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == workspace_record.id], + {schemas.Workspace.workspace_name: current_name}, + ) + workspace_record.workspace_name = current_name + _logger.info( + "workspace_name_refreshed", + extra={"workspace_id": workspace_record.id, "new_name": current_name}, + ) + + +def _restore_workspace( + workspace_record: schemas.Workspace, + context: dict, + client: WebClient, +) -> schemas.Workspace: + """Restore a soft-deleted workspace and notify group members.""" + from helpers.notifications import notify_admins_dm, notify_synced_channels + + ws_name = resolve_workspace_name(workspace_record) + + new_token = safe_get(context, "bot_token") + update_fields = {schemas.Workspace.deleted_at: None} + if new_token: + update_fields[schemas.Workspace.bot_token] = encrypt_bot_token(new_token) + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == workspace_record.id], + update_fields, + ) + + workspace_record = DbManager.get_record(schemas.Workspace, id=workspace_record.team_id) + + soft_deleted_memberships = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.workspace_id == workspace_record.id, + schemas.WorkspaceGroupMember.deleted_at.isnot(None), + schemas.WorkspaceGroupMember.status == "active", + ], + ) + + restored_group_ids: set[int] = set() + for membership in soft_deleted_memberships: + group = DbManager.get_record(schemas.WorkspaceGroup, id=membership.group_id) + if not group or group.status != "active": + continue + + DbManager.update_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == membership.id], + {schemas.WorkspaceGroupMember.deleted_at: None}, + ) + restored_group_ids.add(membership.group_id) + + if restored_group_ids: + my_soft_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.deleted_at.isnot(None), + ], + ) + for ch in my_soft_channels: + sync = DbManager.get_record(schemas.Sync, id=ch.sync_id) + if sync and sync.group_id in restored_group_ids: + DbManager.update_records( + schemas.SyncChannel, + [schemas.SyncChannel.id == ch.id], + {schemas.SyncChannel.deleted_at: None, schemas.SyncChannel.status: "active"}, + ) + + notified_ws: set[int] = set() + for group_id in restored_group_ids: + members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.workspace_id != workspace_record.id, + ], + ) + for m in members: + if not m.workspace_id or m.workspace_id in notified_ws: + continue + partner = get_workspace_by_id(m.workspace_id) + if not partner or not partner.bot_token or partner.deleted_at is not None: + continue + notified_ws.add(m.workspace_id) + try: + partner_client = WebClient(token=decrypt_bot_token(partner.bot_token)) + notify_admins_dm( + partner_client, + f":arrow_forward: *{ws_name}* has been restored. Group syncing will resume.", + ) + + syncs_in_group = DbManager.find_records( + schemas.Sync, [schemas.Sync.group_id == group_id], + ) + partner_ch_ids = [] + for sync in syncs_in_group: + partner_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync.id, + schemas.SyncChannel.workspace_id == m.workspace_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + for sc in partner_channels: + partner_ch_ids.append(sc.channel_id) + if partner_ch_ids: + notify_synced_channels( + partner_client, + partner_ch_ids, + f":arrow_forward: Syncing with *{ws_name}* has been resumed.", + ) + except Exception as e: + _logger.warning(f"_restore_workspace: failed to notify partner {m.workspace_id}: {e}") + + _logger.info( + "workspace_restored", + extra={ + "workspace_id": workspace_record.id, + "groups_restored": len(restored_group_ids), + }, + ) + + return workspace_record + + +def get_workspace_by_id(workspace_id: int, context: dict | None = None) -> schemas.Workspace | None: + """Look up a workspace by its integer primary-key ``id`` column. + + If *context* is provided, uses request-scoped cache to avoid repeated DB + lookups for the same workspace_id within one request. + """ + if context is not None: + cache = context.setdefault("_workspace_by_id", {}) + if workspace_id in cache: + return cache[workspace_id] + rows = DbManager.find_records(schemas.Workspace, [schemas.Workspace.id == workspace_id]) + result = rows[0] if rows else None + if context is not None: + context.setdefault("_workspace_by_id", {})[workspace_id] = result + return result + + +def get_groups_for_workspace(workspace_id: int) -> list[schemas.WorkspaceGroup]: + """Return all active groups the workspace belongs to.""" + members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.workspace_id == workspace_id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + groups: list[schemas.WorkspaceGroup] = [] + for m in members: + g = DbManager.get_record(schemas.WorkspaceGroup, id=m.group_id) + if g and g.status == "active": + groups.append(g) + return groups + + +def get_group_members(group_id: int) -> list[schemas.WorkspaceGroupMember]: + """Return all active members of a group.""" + return DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + + +def resolve_workspace_name(workspace: schemas.Workspace) -> str: + """Return a human-readable name for a workspace.""" + if workspace.workspace_name: + return workspace.workspace_name + + if workspace.bot_token: + try: + ws_client = WebClient(token=decrypt_bot_token(workspace.bot_token)) + team_info = ws_client.team_info() + name = safe_get(team_info, "team", "name") + if name: + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == workspace.id], + {schemas.Workspace.workspace_name: name}, + ) + workspace.workspace_name = name + return name + except Exception: + pass + + return workspace.team_id or f"Workspace {workspace.id}" + + +def resolve_channel_name(channel_id: str, workspace=None) -> str: + """Resolve a channel ID to a human-readable name.""" + if not channel_id: + return channel_id + + cache_key = f"chan_name:{channel_id}" + cached = _cache_get(cache_key) + if cached: + return cached + + ch_name = channel_id + ws_name = None + + if workspace and hasattr(workspace, "bot_token") and workspace.bot_token: + ws_name = getattr(workspace, "workspace_name", None) + try: + ws_client = WebClient(token=decrypt_bot_token(workspace.bot_token)) + info = ws_client.conversations_info(channel=channel_id) + ch_name = safe_get(info, "channel", "name") or channel_id + except Exception as exc: + _logger.debug(f"resolve_channel_name: conversations_info failed for {channel_id}: {exc}") + + if ws_name: + result = f"#{ch_name} ({ws_name})" + else: + result = f"#{ch_name}" + + if ch_name != channel_id: + _cache_set(cache_key, result, ttl=3600) + return result diff --git a/syncbot/logger.py b/syncbot/logger.py new file mode 100644 index 0000000..97a1f5b --- /dev/null +++ b/syncbot/logger.py @@ -0,0 +1,252 @@ +"""Structured logging and observability helpers. + +Provides: + +* **Structured JSON formatter** — Every log entry is emitted as a single + JSON object with consistent fields (``timestamp``, ``level``, + ``correlation_id``, ``module``, ``message``). This makes CloudWatch + Logs Insights queries fast and reliable. +* **Correlation IDs** — A unique ``correlation_id`` is generated at the + start of each incoming Slack request and automatically included in + every log line emitted during that request. +* **Metrics helpers** — Lightweight functions that emit metric events as + structured log entries. CloudWatch Logs Insights or a metric filter + can aggregate these into numeric dashboards. + +Usage:: + + from logger import configure_logging, set_correlation_id, emit_metric + + configure_logging() # call once at module level + set_correlation_id() # call at the start of each request + emit_metric("messages_synced", 3, sync_id="abc") +""" + +import json +import logging +import time as _time +import uuid +from datetime import UTC +from typing import Any + +# --------------------------------------------------------------------------- +# Correlation-ID storage (thread-local not needed — Lambda is single-thread) +# --------------------------------------------------------------------------- + +_correlation_id: str | None = None +_request_start: float | None = None + + +def set_correlation_id(value: str | None = None) -> str: + """Set and return a correlation ID for the current request. + + If *value* is ``None`` a new UUID-4 is generated. Also resets the + internal request-start timer used by :func:`get_request_duration_ms`. + """ + global _correlation_id, _request_start + _correlation_id = value or uuid.uuid4().hex[:12] + _request_start = _time.monotonic() + return _correlation_id + + +def get_correlation_id() -> str: + """Return the current correlation ID, or ``"none"`` if unset.""" + return _correlation_id or "none" + + +def get_request_duration_ms() -> float: + """Milliseconds elapsed since :func:`set_correlation_id` was called.""" + if _request_start is None: + return 0.0 + return (_time.monotonic() - _request_start) * 1000 + + +# --------------------------------------------------------------------------- +# Structured JSON formatter +# --------------------------------------------------------------------------- + + +class StructuredFormatter(logging.Formatter): + """Emit each log record as a single-line JSON object. + + Fields included in every entry: + + * ``timestamp`` — ISO-8601 UTC + * ``level`` — e.g. INFO, WARNING, ERROR + * ``correlation_id`` — request-scoped ID set by :func:`set_correlation_id` + * ``module`` — Python module that emitted the log + * ``function`` — function name + * ``message`` — the formatted log message + + Extra keys passed via ``logging.info("msg", extra={...})`` are merged + into the top-level JSON object. + """ + + # Keys that belong to the stdlib LogRecord and should not be forwarded. + _RESERVED = frozenset(logging.LogRecord("", 0, "", 0, "", (), None).__dict__.keys()) + + def format(self, record: logging.LogRecord) -> str: + entry: dict[str, Any] = { + "timestamp": self.formatTime(record, datefmt="%Y-%m-%dT%H:%M:%S.%fZ"), + "level": record.levelname, + "correlation_id": get_correlation_id(), + "module": record.module, + "function": record.funcName, + "message": record.getMessage(), + } + + if record.exc_info and record.exc_info[1]: + entry["exception"] = self.formatException(record.exc_info) + + # Merge any extra fields the caller passed. + for key, val in record.__dict__.items(): + if key not in self._RESERVED and key not in entry: + entry[key] = val + + return json.dumps(entry, default=str) + + def formatTime(self, record, datefmt=None): # noqa: N802 — override + from datetime import datetime + + dt = datetime.fromtimestamp(record.created, tz=UTC) + if datefmt: + return dt.strftime(datefmt) + return dt.isoformat() + + +class DevFormatter(logging.Formatter): + """Human-readable colorized formatter for local development. + + Outputs logs like:: + + 17:14:05 INFO [app.main_response] (9dab20ac) request_received + request_type=event_callback request_id=app_home_opened + + 17:14:06 ERROR [listener_error_handler.handle] (9dab20ac) Something broke + Traceback (most recent call last): + ... + """ + + _RESERVED = frozenset(logging.LogRecord("", 0, "", 0, "", (), None).__dict__.keys()) + + _COLORS = { + "DEBUG": "\033[90m", # grey + "INFO": "\033[32m", # green + "WARNING": "\033[33m", # yellow + "ERROR": "\033[31m", # red + "CRITICAL": "\033[1;31m", # bold red + } + _RESET = "\033[0m" + _DIM = "\033[90m" + + def format(self, record: logging.LogRecord) -> str: + from datetime import datetime + + dt = datetime.fromtimestamp(record.created, tz=UTC) + time_str = dt.strftime("%H:%M:%S") + + color = self._COLORS.get(record.levelname, "") + level = f"{color}{record.levelname:<5}{self._RESET}" + + corr = get_correlation_id() + corr_str = f" {self._DIM}({corr}){self._RESET}" if corr != "none" else "" + + location = f"{record.module}.{record.funcName}" + msg = record.getMessage() + + line = f"{self._DIM}{time_str}{self._RESET} {level} [{location}]{corr_str} {msg}" + + extras = {} + for key, val in record.__dict__.items(): + if key not in self._RESERVED and key not in ("message", "correlation_id"): + extras[key] = val + + if extras: + pairs = " ".join(f"{k}={v}" for k, v in extras.items()) + line += f"\n{' ' * 15}{self._DIM}{pairs}{self._RESET}" + + if record.exc_info and record.exc_info[1]: + exc_text = self.formatException(record.exc_info) + indented = "\n".join(f"{' ' * 15}{line_}" for line_ in exc_text.splitlines()) + line += f"\n{indented}" + + return line + + +# --------------------------------------------------------------------------- +# One-time logging configuration +# --------------------------------------------------------------------------- + +_configured = False + + +def configure_logging(level: int = logging.INFO) -> None: + """Replace the root logger's handlers with a single structured-JSON handler. + + Uses :class:`DevFormatter` (human-readable, colorized) when + ``LOCAL_DEVELOPMENT`` is enabled, otherwise :class:`StructuredFormatter` + (single-line JSON for CloudWatch). + + Safe to call multiple times — subsequent calls are no-ops. + """ + import os + + global _configured + if _configured: + return + _configured = True + + root = logging.getLogger() + root.setLevel(level) + + # Remove any existing handlers (e.g. Slack Bolt's defaults). + for h in list(root.handlers): + root.removeHandler(h) + + local_dev = os.environ.get("LOCAL_DEVELOPMENT", "false").lower() == "true" + + handler = logging.StreamHandler() + handler.setFormatter(DevFormatter() if local_dev else StructuredFormatter()) + root.addHandler(handler) + + +# --------------------------------------------------------------------------- +# Metric-event helper +# --------------------------------------------------------------------------- + +_metrics_logger = logging.getLogger("syncbot.metrics") + + +def emit_metric( + metric_name: str, + value: float = 1, + unit: str = "Count", + **dimensions: Any, +) -> None: + """Emit a metric as a structured log entry. + + CloudWatch Logs Insights can aggregate these with queries like:: + + filter metric_name = "messages_synced" + | stats sum(metric_value) as total by bin(5m) + + Parameters + ---------- + metric_name: + Short snake_case identifier, e.g. ``messages_synced``. + value: + Numeric value (default ``1`` for counter-style metrics). + unit: + CloudWatch-compatible unit string (``Count``, ``Milliseconds``, …). + **dimensions: + Arbitrary key/value pairs attached to the metric event. + """ + _metrics_logger.info( + metric_name, + extra={ + "metric_name": metric_name, + "metric_value": value, + "metric_unit": unit, + **dimensions, + }, + ) diff --git a/syncbot/requirements.txt b/syncbot/requirements.txt index d794c55..ccfd8d3 100644 --- a/syncbot/requirements.txt +++ b/syncbot/requirements.txt @@ -1,15 +1,16 @@ -certifi==2023.7.22 ; python_version >= "3.11" and python_version < "4.0" -cffi==1.16.0 ; python_version >= "3.11" and python_version < "4.0" -charset-normalizer==3.3.0 ; python_version >= "3.11" and python_version < "4.0" -cryptography==41.0.4 ; python_version >= "3.11" and python_version < "4.0" -greenlet==3.0.0 ; python_version >= "3.11" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") and python_version < "4.0" -idna==3.4 ; python_version >= "3.11" and python_version < "4.0" -pillow-heif==0.16.0 ; python_version >= "3.11" and python_version < "4.0" -pillow==10.3.0 ; python_version >= "3.11" and python_version < "4.0" -pycparser==2.21 ; python_version >= "3.11" and python_version < "4.0" -pymysql==1.1.0 ; python_version >= "3.11" and python_version < "4.0" -requests==2.31.0 ; python_version >= "3.11" and python_version < "4.0" -slack-bolt==1.18.0 ; python_version >= "3.11" and python_version < "4.0" -slack-sdk==3.23.0 ; python_version >= "3.11" and python_version < "4.0" -sqlalchemy==1.4.49 ; python_version >= "3.11" and python_version < "4.0" -urllib3==1.26.17 ; python_version >= "3.11" and python_version < "4.0" +certifi==2026.1.4 ; python_version >= "3.11" and python_version < "4.0" +cffi==2.0.0 ; python_version >= "3.11" and python_version < "4.0" +charset-normalizer==3.4.4 ; python_version >= "3.11" and python_version < "4.0" +cryptography==46.0.5 ; python_version >= "3.11" and python_version < "4.0" +greenlet==3.1.1 ; python_version >= "3.11" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") and python_version < "4.0" +idna==3.11 ; python_version >= "3.11" and python_version < "4.0" +pillow-heif==1.2.0 ; python_version >= "3.11" and python_version < "4.0" +pillow==12.1.1 ; python_version >= "3.11" and python_version < "4.0" +pycparser==2.23 ; python_version >= "3.11" and python_version < "4.0" +pymysql==1.1.2 ; python_version >= "3.11" and python_version < "4.0" +python-dotenv==1.2.1 ; python_version >= "3.11" and python_version < "4.0" +requests==2.32.5 ; python_version >= "3.11" and python_version < "4.0" +slack-bolt==1.27.0 ; python_version >= "3.11" and python_version < "4.0" +slack-sdk==3.40.0 ; python_version >= "3.11" and python_version < "4.0" +sqlalchemy==1.4.54 ; python_version >= "3.11" and python_version < "4.0" +urllib3==2.6.3 ; python_version >= "3.11" and python_version < "4.0" diff --git a/syncbot/routing.py b/syncbot/routing.py new file mode 100644 index 0000000..c320b93 --- /dev/null +++ b/syncbot/routing.py @@ -0,0 +1,86 @@ +"""Request routing tables. + +Maps incoming Slack request types to handler functions. The +:data:`MAIN_MAPPER` is a two-level dict keyed first by request category +(``block_actions``, ``event_callback``, ``view_submission``) and then by +the specific identifier (action ID, event type, or callback ID). + +:func:`~app.main_response` uses these tables to dispatch every request. +""" + +import builders +import handlers +from slack import actions + +ACTION_MAPPER = { + actions.CONFIG_JOIN_EXISTING_SYNC: builders.build_join_sync_form, + actions.CONFIG_CREATE_NEW_SYNC: builders.build_new_sync_form, + actions.CONFIG_REMOVE_SYNC: handlers.handle_remove_sync, + actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT: handlers.check_join_sync_channel, + actions.CONFIG_MANAGE_USER_MATCHING: builders.build_user_matching_entry, + actions.CONFIG_USER_MAPPING_BACK: handlers.handle_user_mapping_back, + actions.CONFIG_USER_MAPPING_EDIT: builders.build_user_mapping_edit_modal, + actions.CONFIG_USER_MAPPING_REFRESH: handlers.handle_user_mapping_refresh, + actions.CONFIG_CREATE_GROUP: handlers.handle_create_group, + actions.CONFIG_JOIN_GROUP: handlers.handle_join_group, + actions.CONFIG_INVITE_WORKSPACE: handlers.handle_invite_workspace, + actions.CONFIG_LEAVE_GROUP: handlers.handle_leave_group, + actions.CONFIG_ACCEPT_GROUP_REQUEST: handlers.handle_accept_group_invite, + actions.CONFIG_DECLINE_GROUP_REQUEST: handlers.handle_decline_group_invite, + actions.CONFIG_CANCEL_GROUP_REQUEST: handlers.handle_decline_group_invite, + actions.CONFIG_PUBLISH_CHANNEL: handlers.handle_publish_channel, + actions.CONFIG_UNPUBLISH_CHANNEL: handlers.handle_unpublish_channel, + actions.CONFIG_PAUSE_SYNC: handlers.handle_pause_sync, + actions.CONFIG_RESUME_SYNC: handlers.handle_resume_sync, + actions.CONFIG_STOP_SYNC: handlers.handle_stop_sync, + actions.CONFIG_SUBSCRIBE_CHANNEL: handlers.handle_subscribe_channel, + actions.CONFIG_REFRESH_HOME: handlers.handle_refresh_home, + actions.CONFIG_BACKUP_RESTORE: handlers.handle_backup_restore, + actions.CONFIG_BACKUP_DOWNLOAD: handlers.handle_backup_download, + actions.CONFIG_DATA_MIGRATION: handlers.handle_data_migration, + actions.CONFIG_DATA_MIGRATION_EXPORT: handlers.handle_data_migration_export, + actions.CONFIG_GENERATE_FEDERATION_CODE: handlers.handle_generate_federation_code, + actions.CONFIG_ENTER_FEDERATION_CODE: handlers.handle_enter_federation_code, + actions.CONFIG_REMOVE_FEDERATION_CONNECTION: handlers.handle_remove_federation_connection, +} +"""Block-action ``action_id`` -> handler.""" + +EVENT_MAPPER = { + "app_home_opened": handlers.handle_app_home_opened, + "member_joined_channel": handlers.handle_member_joined_channel, + "message": handlers.respond_to_message_event, + "reaction_added": handlers._handle_reaction, + "reaction_removed": handlers._handle_reaction, + "team_join": handlers.handle_team_join, + "tokens_revoked": handlers.handle_tokens_revoked, + "user_profile_changed": handlers.handle_user_profile_changed, +} +"""Event ``type`` -> handler.""" + +VIEW_MAPPER = { + actions.CONFIG_JOIN_SYNC_SUMBIT: handlers.handle_join_sync_submission, + actions.CONFIG_NEW_SYNC_SUBMIT: handlers.handle_new_sync_submission, + actions.CONFIG_USER_MAPPING_EDIT_SUBMIT: handlers.handle_user_mapping_edit_submit, + actions.CONFIG_CREATE_GROUP_SUBMIT: handlers.handle_create_group_submit, + actions.CONFIG_JOIN_GROUP_SUBMIT: handlers.handle_join_group_submit, + actions.CONFIG_INVITE_WORKSPACE_SUBMIT: handlers.handle_invite_workspace_submit, + actions.CONFIG_LEAVE_GROUP_CONFIRM: handlers.handle_leave_group_confirm, + actions.CONFIG_PUBLISH_MODE_SUBMIT: handlers.handle_publish_mode_submit, + actions.CONFIG_PUBLISH_CHANNEL_SUBMIT: handlers.handle_publish_channel_submit, + actions.CONFIG_SUBSCRIBE_CHANNEL_SUBMIT: handlers.handle_subscribe_channel_submit, + actions.CONFIG_STOP_SYNC_CONFIRM: handlers.handle_stop_sync_confirm, + actions.CONFIG_FEDERATION_CODE_SUBMIT: handlers.handle_federation_code_submit, + actions.CONFIG_FEDERATION_LABEL_SUBMIT: handlers.handle_federation_label_submit, + actions.CONFIG_BACKUP_RESTORE_SUBMIT: handlers.handle_backup_restore_submit, + actions.CONFIG_BACKUP_RESTORE_CONFIRM: handlers.handle_backup_restore_confirm_submit, + actions.CONFIG_DATA_MIGRATION_SUBMIT: handlers.handle_data_migration_submit, + actions.CONFIG_DATA_MIGRATION_CONFIRM: handlers.handle_data_migration_confirm_submit, +} +"""View submission ``callback_id`` -> handler.""" + +MAIN_MAPPER = { + "block_actions": ACTION_MAPPER, + "event_callback": EVENT_MAPPER, + "view_submission": VIEW_MAPPER, +} +"""Top-level dispatcher: request category -> sub-mapper.""" diff --git a/syncbot/utils/__init__.py b/syncbot/slack/__init__.py similarity index 100% rename from syncbot/utils/__init__.py rename to syncbot/slack/__init__.py diff --git a/syncbot/slack/actions.py b/syncbot/slack/actions.py new file mode 100644 index 0000000..286ce47 --- /dev/null +++ b/syncbot/slack/actions.py @@ -0,0 +1,206 @@ +"""Slack Block Kit action ID constants. + +These string constants are used as ``action_id`` / ``callback_id`` values +throughout the UI forms and handler routing tables. Keeping them in one +place avoids typos and makes refactoring easier. +""" + +CONFIG_JOIN_EXISTING_SYNC = "join_existing_sync" +"""Action: user clicked "Join existing Sync" button.""" + +CONFIG_CREATE_NEW_SYNC = "create_new_sync" +"""Action: user clicked "Create new Sync" button.""" + +CONFIG_REMOVE_SYNC = "remove_sync" +"""Action: user clicked "DeSync" button (prefix-matched).""" + +CONFIG_NEW_SYNC_CHANNEL_SELECT = "config_new_sync_channel_select" +"""Input: channel picker in the new-sync form.""" + +CONFIG_NEW_SYNC_SUBMIT = "config_new_sync_submit" +"""Callback: new-sync modal submitted.""" + +CONFIG_JOIN_SYNC_SELECT = "config_join_sync_select" +"""Input: sync selector in the join-sync form.""" + +CONFIG_JOIN_SYNC_CHANNEL_SELECT = "config_join_sync_channel_select" +"""Input: channel selector in the join-sync form (dispatches an action on change).""" + +CONFIG_JOIN_SYNC_SUMBIT = "config_join_sync_submit" +"""Callback: join-sync modal submitted.""" + +# --------------------------------------------------------------------------- +# User Matching actions +# --------------------------------------------------------------------------- + +CONFIG_MANAGE_USER_MATCHING = "manage_user_matching" +"""Action: user clicked "User Mapping" button on the Home tab.""" + +CONFIG_USER_MAPPING_BACK = "user_mapping_back" +"""Action: user clicked "Back" on the user mapping screen to return to main Home tab.""" + +CONFIG_USER_MAPPING_EDIT = "user_mapping_edit" +"""Action: user clicked "Edit" on a user row in the mapping screen (prefix-matched with mapping ID).""" + +CONFIG_USER_MAPPING_EDIT_SUBMIT = "user_mapping_edit_submit" +"""Callback: per-user edit mapping modal submitted.""" + +CONFIG_USER_MAPPING_EDIT_SELECT = "user_mapping_edit_select" +"""Input: user picker dropdown in the edit mapping modal.""" + +CONFIG_USER_MAPPING_REFRESH = "user_mapping_refresh" +"""Action: user clicked "Refresh" on the user mapping screen.""" + +# --------------------------------------------------------------------------- +# Workspace Group actions +# --------------------------------------------------------------------------- + +CONFIG_CREATE_GROUP = "create_group" +"""Action: user clicked "Create Group" on the Home tab.""" + +CONFIG_CREATE_GROUP_SUBMIT = "create_group_submit" +"""Callback: create-group modal submitted.""" + +CONFIG_CREATE_GROUP_NAME = "create_group_name" +"""Input: text field for the group name.""" + +CONFIG_JOIN_GROUP = "join_group" +"""Action: user clicked "Join Group" on the Home tab.""" + +CONFIG_JOIN_GROUP_SUBMIT = "join_group_submit" +"""Callback: join-group modal submitted.""" + +CONFIG_JOIN_GROUP_CODE = "join_group_code" +"""Input: text field for the group invite code.""" + +CONFIG_LEAVE_GROUP = "leave_group" +"""Action: user clicked "Leave Group" (prefix-matched with group_id).""" + +CONFIG_LEAVE_GROUP_CONFIRM = "leave_group_confirm" +"""Callback: leave-group confirmation modal submitted.""" + +CONFIG_ACCEPT_GROUP_REQUEST = "accept_group_request" +"""Action: user clicked "Accept" on an incoming group join request (prefix-matched with member_id).""" + +CONFIG_CANCEL_GROUP_REQUEST = "cancel_group_request" +"""Action: user clicked "Cancel Request" on an outgoing group join request (prefix-matched with member_id).""" + +CONFIG_INVITE_WORKSPACE = "invite_workspace" +"""Action: user clicked "Invite Workspace" button on a group (value carries group_id).""" + +CONFIG_INVITE_WORKSPACE_SUBMIT = "invite_workspace_submit" +"""Callback: invite-workspace modal submitted (sends DM invite to selected workspace).""" + +CONFIG_INVITE_WORKSPACE_SELECT = "invite_workspace_select" +"""Input: workspace picker dropdown in the invite workspace modal.""" + +CONFIG_DECLINE_GROUP_REQUEST = "decline_group_request" +"""Action: user clicked "Decline" on an incoming group invite DM (prefix-matched with member_id).""" + +# --------------------------------------------------------------------------- +# Channel Sync actions +# --------------------------------------------------------------------------- + +CONFIG_PUBLISH_CHANNEL = "publish_channel" +"""Action: user clicked "Publish Channel" button (value carries group_id).""" + +CONFIG_PUBLISH_CHANNEL_SELECT = "publish_channel_select" +"""Input: channel picker in the publish channel modal.""" + +CONFIG_PUBLISH_CHANNEL_SUBMIT = "publish_channel_submit" +"""Callback: publish channel modal submitted.""" + +CONFIG_PUBLISH_MODE_SUBMIT = "publish_mode_submit" +"""Callback: step 1 of publish channel (sync mode selection) submitted.""" + +CONFIG_PUBLISH_SYNC_MODE = "publish_sync_mode" +"""Input: radio buttons for direct vs group-wide sync mode.""" + +CONFIG_PUBLISH_DIRECT_TARGET = "publish_direct_target" +"""Input: workspace picker for direct (1-to-1) sync target.""" + +CONFIG_UNPUBLISH_CHANNEL = "unpublish_channel" +"""Action: user clicked "Unpublish" on a published channel (prefix-matched with sync_channel_id).""" + +CONFIG_PAUSE_SYNC = "pause_sync" +"""Action: user clicked "Pause Syncing" on an active channel sync (prefix-matched with sync_id).""" + +CONFIG_RESUME_SYNC = "resume_sync" +"""Action: user clicked "Resume Syncing" on a paused channel sync (prefix-matched with sync_id).""" + +CONFIG_STOP_SYNC = "stop_sync" +"""Action: user clicked "Stop Syncing" on a channel sync (prefix-matched with sync_id).""" + +CONFIG_STOP_SYNC_CONFIRM = "stop_sync_confirm" +"""View submission: user confirmed stopping a channel sync.""" + +CONFIG_SUBSCRIBE_CHANNEL = "subscribe_channel" +"""Action: user clicked "Start Syncing" on an available channel (prefix-matched with sync_id).""" + +CONFIG_SUBSCRIBE_CHANNEL_SELECT = "subscribe_channel_select" +"""Input: channel picker in the subscribe channel modal.""" + +CONFIG_SUBSCRIBE_CHANNEL_SUBMIT = "subscribe_channel_submit" +"""Callback: subscribe channel modal submitted.""" + +# --------------------------------------------------------------------------- +# Home Tab actions +# --------------------------------------------------------------------------- + +CONFIG_REFRESH_HOME = "refresh_home" +"""Action: user clicked the "Refresh" button on the Home tab.""" + +CONFIG_BACKUP_RESTORE = "backup_restore" +"""Action: user clicked "Backup/Restore" on the Home tab (opens modal).""" + +CONFIG_BACKUP_RESTORE_SUBMIT = "backup_restore_submit" +"""Callback: Backup/Restore modal submitted (restore from backup).""" + +CONFIG_BACKUP_RESTORE_CONFIRM = "backup_restore_confirm" +"""Callback: Confirm restore when HMAC or encryption key mismatch.""" + +CONFIG_BACKUP_DOWNLOAD = "backup_download" +"""Action: user clicked Download backup in Backup/Restore modal.""" + +CONFIG_BACKUP_RESTORE_JSON_INPUT = "backup_restore_json_input" +"""Input: plain text area for restore JSON in Backup/Restore modal.""" + +CONFIG_DATA_MIGRATION = "data_migration" +"""Action: user clicked "Data Migration" in External Connections (opens modal).""" + +CONFIG_DATA_MIGRATION_SUBMIT = "data_migration_submit" +"""Callback: Data Migration modal submitted (import migration file).""" + +CONFIG_DATA_MIGRATION_CONFIRM = "data_migration_confirm" +"""Callback: Confirm import when signature check failed.""" + +CONFIG_DATA_MIGRATION_EXPORT = "data_migration_export" +"""Action: user clicked Export in Data Migration modal.""" + +CONFIG_DATA_MIGRATION_JSON_INPUT = "data_migration_json_input" +"""Input: plain text area for migration import JSON.""" + +# --------------------------------------------------------------------------- +# External Connections (federation) actions +# --------------------------------------------------------------------------- + +CONFIG_GENERATE_FEDERATION_CODE = "generate_federation_code" +"""Action: user clicked "Generate Connection Code" on the Home tab.""" + +CONFIG_ENTER_FEDERATION_CODE = "enter_federation_code" +"""Action: user clicked "Enter Connection Code" on the Home tab.""" + +CONFIG_FEDERATION_CODE_SUBMIT = "federation_code_submit" +"""Callback: enter-connection-code modal submitted.""" + +CONFIG_FEDERATION_CODE_INPUT = "federation_code_input" +"""Input: text field for the connection code in the modal.""" + +CONFIG_FEDERATION_LABEL_SUBMIT = "federation_label_submit" +"""Callback: connection label modal submitted (before code generation).""" + +CONFIG_FEDERATION_LABEL_INPUT = "federation_label_input" +"""Input: text field for the connection label in the modal.""" + +CONFIG_REMOVE_FEDERATION_CONNECTION = "remove_federation_connection" +"""Action: user clicked "Remove Connection" on an external connection (prefix-matched).""" diff --git a/syncbot/slack/blocks.py b/syncbot/slack/blocks.py new file mode 100644 index 0000000..9778f33 --- /dev/null +++ b/syncbot/slack/blocks.py @@ -0,0 +1,95 @@ +"""Block Kit shorthand constructors. + +Thin wrappers around :mod:`slack.orm` dataclasses that collapse the most +common 5-10 line patterns into single function calls. Every function +returns an ``orm`` object, so they compose naturally with +:class:`~slack.orm.BlockView` and the existing dataclass API. + +Usage:: + + from slack.blocks import header, divider, context, text, button, actions + + blocks = [ + header("SyncBot Configuration"), + actions(button(":arrows_counterclockwise: Refresh", action=CONFIG_REFRESH_HOME)), + divider(), + context("Only workspace admins can configure SyncBot."), + ] +""" + +from slack import orm + + +def header(label: str) -> orm.HeaderBlock: + """Large bold header text.""" + return orm.HeaderBlock(text=label) + + +def divider() -> orm.DividerBlock: + """Horizontal divider line.""" + return orm.DividerBlock() + + +def context(label: str) -> orm.ContextBlock: + """Mrkdwn context block (small grey text).""" + return orm.ContextBlock(element=orm.ContextElement(initial_value=label)) + + +def text(label: str) -> orm.SectionBlock: + """Mrkdwn section block (body text).""" + return orm.SectionBlock(label=label) + + +# Alias for section-style usage (SectionBlock with label only). +section = text + + +def button( + label: str, + action: str, + *, + value: str | None = None, + style: str | None = None, + confirm: object = None, + url: str | None = None, +) -> orm.ButtonElement: + """Button element for use inside :func:`actions`.""" + return orm.ButtonElement( + label=label, + action=action, + value=value or label, + style=style, + confirm=confirm, + url=url, + ) + + +def actions(*elements: orm.ButtonElement) -> orm.ActionsBlock: + """Actions block containing one or more buttons.""" + return orm.ActionsBlock(elements=list(elements)) + + +def section_with_image( + label: str, + image_url: str | None, + alt_text: str = "icon", +) -> orm.SectionBlock: + """Section block with an optional image accessory. + + If *image_url* is falsy, returns a plain section block. + """ + if image_url: + return orm.SectionBlock( + label=label, + element=orm.ImageAccessoryElement(image_url=image_url, alt_text=alt_text), + ) + return orm.SectionBlock(label=label) + + +def workspace_card( + label: str, + ws_info: dict, + ws_name: str, +) -> orm.SectionBlock: + """Section block showing workspace info with an optional team icon.""" + return section_with_image(label, ws_info.get("icon_url"), ws_name) diff --git a/syncbot/slack/forms.py b/syncbot/slack/forms.py new file mode 100644 index 0000000..3f6d73b --- /dev/null +++ b/syncbot/slack/forms.py @@ -0,0 +1,100 @@ +"""Pre-built Slack Block Kit forms for SyncBot configuration modals. + +Defines reusable form templates that are deep-copied and customised at +runtime before being sent to Slack: + +* :data:`NEW_SYNC_FORM` — Modal for creating a new sync group (channel picker). +* :data:`JOIN_SYNC_FORM` — Modal for joining an existing sync group + (sync selector + channel selector). +* :data:`ENTER_GROUP_CODE_FORM` — Modal for entering a group invite code. +* :data:`PUBLISH_CHANNEL_FORM` — Modal for publishing a channel. +* :data:`SUBSCRIBE_CHANNEL_FORM` — Modal for subscribing to a channel. +""" + +from slack import actions, orm + +NEW_SYNC_FORM = orm.BlockView( + blocks=[ + orm.InputBlock( + label="Channel to Sync", + action=actions.CONFIG_NEW_SYNC_CHANNEL_SELECT, + element=orm.ConversationsSelectElement(placeholder="Select a channel"), + optional=False, + ), + orm.ContextBlock( + element=orm.ContextElement( + initial_value="Select the channel you want to sync. The sync will be named after the channel. " + "If a sync has already been set up in another workspace, use 'Join existing Sync' instead.", + ), + ), + ] +) + +JOIN_SYNC_FORM = orm.BlockView( + blocks=[ + orm.InputBlock( + label="Sync Select", + action=actions.CONFIG_JOIN_SYNC_SELECT, + element=orm.StaticSelectElement(placeholder="Select a Sync to join"), + optional=False, + ), + orm.InputBlock( + label="Sync Channel Select", + action=actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT, + element=orm.ConversationsSelectElement(placeholder="Select a channel to use for this Sync"), + optional=False, + dispatch_action=True, + ), + ] +) + + +ENTER_GROUP_CODE_FORM = orm.BlockView( + blocks=[ + orm.InputBlock( + label="Group Invite Code", + action=actions.CONFIG_JOIN_GROUP_CODE, + element=orm.PlainTextInputElement(placeholder="Enter the code (e.g. A7X-K9M)"), + optional=False, + ), + orm.ContextBlock( + element=orm.ContextElement( + initial_value="Enter the invite code shared by an admin from another workspace in the group.", + ), + ), + ] +) + + +PUBLISH_CHANNEL_FORM = orm.BlockView( + blocks=[ + orm.InputBlock( + label="Channel to Publish", + action=actions.CONFIG_PUBLISH_CHANNEL_SELECT, + element=orm.ConversationsSelectElement(placeholder="Select a channel to publish"), + optional=False, + ), + orm.ContextBlock( + element=orm.ContextElement( + initial_value="Select a channel from your workspace to make available for syncing.", + ), + ), + ] +) + + +SUBSCRIBE_CHANNEL_FORM = orm.BlockView( + blocks=[ + orm.InputBlock( + label="Channel for Sync", + action=actions.CONFIG_SUBSCRIBE_CHANNEL_SELECT, + element=orm.ConversationsSelectElement(placeholder="Select a channel to sync into"), + optional=False, + ), + orm.ContextBlock( + element=orm.ContextElement( + initial_value="Select a channel in your workspace to receive messages from the published channel.", + ), + ), + ] +) diff --git a/syncbot/utils/slack/orm.py b/syncbot/slack/orm.py similarity index 76% rename from syncbot/utils/slack/orm.py rename to syncbot/slack/orm.py index 0da7a6a..cfda533 100644 --- a/syncbot/utils/slack/orm.py +++ b/syncbot/slack/orm.py @@ -1,9 +1,11 @@ import json +import logging from dataclasses import dataclass, field -from typing import Any, Dict, List +from typing import Any +from helpers import safe_get -from utils.helpers import safe_get +logger = logging.getLogger(__name__) @dataclass @@ -76,7 +78,9 @@ def get_selected_value(self, input_data, **kwargs): return self.element.get_selected_value(input_data, self.action, **kwargs) def as_form_field(self): - block = {"type": "section", "block_id": self.action, "text": self.make_label_field()} + block = {"type": "section", "text": self.make_label_field()} + if self.action: + block["block_id"] = self.action if self.element: block.update({"accessory": self.element.as_form_field(action=self.action)}) return block @@ -85,6 +89,23 @@ def make_label_field(self, text=None): return {"type": "mrkdwn", "text": text or self.label or ""} +@dataclass +class HeaderBlock(BaseBlock): + """A ``header`` block — renders as large bold text.""" + + text: str = None + + def as_form_field(self): + return { + "type": "header", + "text": { + "type": "plain_text", + "text": self.text or self.label or "", + "emoji": True, + }, + } + + @dataclass class ButtonElement(BaseAction): style: str = None @@ -114,8 +135,8 @@ class SelectorOption: value: str -def as_selector_options(names: List[str], values: List[str] = []) -> List[SelectorOption]: - if values == []: +def as_selector_options(names: list[str], values: list[str] | None = None) -> list[SelectorOption]: + if values is None: selectors = [SelectorOption(name=x, value=x) for x in names] else: selectors = [SelectorOption(name=x, value=y) for x, y in zip(names, values)] @@ -125,7 +146,7 @@ def as_selector_options(names: List[str], values: List[str] = []) -> List[Select @dataclass class StaticSelectElement(BaseElement): initial_value: str = None - options: List[SelectorOption] = None + options: list[SelectorOption] = None # def with_options(self, options: List[SelectorOption]): # return SelectorElement(self.label, self.action, options) @@ -159,7 +180,7 @@ def __make_option(self, option: SelectorOption): @dataclass class RadioButtonsElement(BaseElement): initial_value: str = None - options: List[SelectorOption] = None + options: list[SelectorOption] = None def get_selected_value(self, input_data, action): return safe_get(input_data, action, action, "selected_option", "value") @@ -257,6 +278,32 @@ def as_form_field(self, action: str): return j +@dataclass +class ConversationsSelectElement(BaseElement): + """Channel picker that includes both public and private channels.""" + + initial_value: str = None + + def get_selected_value(self, input_data, action): + return safe_get(input_data, action, action, "selected_conversation") + + def as_form_field(self, action: str): + j = { + "type": "conversations_select", + "action_id": action, + "filter": { + "include": ["public", "private"], + "exclude_bot_users": True, + "exclude_external_shared_channels": True, + }, + } + if self.placeholder: + j.update(self.make_placeholder_field()) + if self.initial_value: + j["initial_conversation"] = self.initial_value + return j + + @dataclass class DatepickerElement(BaseElement): initial_value: str = None @@ -316,7 +363,7 @@ def as_form_field(self, action: str): @dataclass class MultiUsersSelectElement(BaseElement): - initial_value: List[str] = None + initial_value: list[str] = None def get_selected_value(self, input_data, action): return safe_get(input_data, action, action, "selected_users") @@ -336,6 +383,7 @@ def as_form_field(self, action: str): @dataclass class ContextBlock(BaseBlock): element: BaseElement = None + elements: list = None initial_value: str = "" def get_selected_value(self, input_data, action): @@ -346,12 +394,45 @@ def get_selected_value(self, input_data, action): def as_form_field(self): j = {"type": "context"} - j.update({"elements": [self.element.as_form_field()]}) + if self.elements: + j["elements"] = [e.as_form_field() for e in self.elements] + elif self.element: + j["elements"] = [self.element.as_form_field()] if self.action: j["block_id"] = self.action return j +@dataclass +class ImageContextElement(BaseElement): + """An image element for use inside a ContextBlock.""" + + image_url: str = None + alt_text: str = "icon" + + def as_form_field(self): + return { + "type": "image", + "image_url": self.image_url, + "alt_text": self.alt_text, + } + + +@dataclass +class ImageAccessoryElement(BaseElement): + """An image element for use as a SectionBlock accessory.""" + + image_url: str = None + alt_text: str = "icon" + + def as_form_field(self, action: str = None): + return { + "type": "image", + "image_url": self.image_url, + "alt_text": self.alt_text, + } + + @dataclass class ContextElement(BaseElement): initial_value: str = None @@ -372,7 +453,7 @@ def as_form_field(self): @dataclass class ActionsBlock(BaseBlock): - elements: List[BaseAction] = field(default_factory=list) + elements: list[BaseAction] = field(default_factory=list) def as_form_field(self): j = { @@ -386,7 +467,7 @@ def as_form_field(self): @dataclass class BlockView: - blocks: List[BaseBlock] + blocks: list[BaseBlock] def delete_block(self, action: str): self.blocks = [b for b in self.blocks if b.action != action] @@ -399,12 +480,12 @@ def set_initial_values(self, values: dict): if block.action in values: block.element.initial_value = values[block.action] - def set_options(self, options: Dict[str, List[SelectorOption]]): + def set_options(self, options: dict[str, list[SelectorOption]]): for block in self.blocks: if block.action in options: block.element.options = options[block.action] - def as_form_field(self) -> List[dict]: + def as_form_field(self) -> list[dict]: return [b.as_form_field() for b in self.blocks] def get_selected_values(self, body) -> dict: @@ -454,8 +535,16 @@ def post_modal( elif new_or_add == "add": client.views_push(trigger_id=trigger_id, view=view) except Exception as e: - print(e) - print(json.dumps(view, indent=2)) + logger.error(f"Failed to open/push modal view: {e}") + logger.debug(f"View payload: {json.dumps(view, indent=2)}") + + def publish_home_tab(self, client: Any, user_id: str): + """Publish a Home tab view for the given user.""" + blocks = self.as_form_field() + client.views_publish( + user_id=user_id, + view={"type": "home", "blocks": blocks}, + ) def update_modal( self, @@ -474,16 +563,40 @@ def update_modal( "type": "modal", "callback_id": callback_id, "title": {"type": "plain_text", "text": title_text}, - "submit": {"type": "plain_text", "text": submit_button_text}, "close": {"type": "plain_text", "text": close_button_text}, "notify_on_close": notify_on_close, "blocks": blocks, } + if submit_button_text != "None": + view["submit"] = {"type": "plain_text", "text": submit_button_text} if parent_metadata: view["private_metadata"] = json.dumps(parent_metadata) client.views_update(view_id=view_id, view=view) + def as_ack_update( + self, + title_text: str, + callback_id: str, + submit_button_text: str = "Submit", + parent_metadata: dict = None, + close_button_text: str = "Close", + ) -> dict: + """Build a modal view dict suitable for ack(response_action="update").""" + blocks = self.as_form_field() + view: dict = { + "type": "modal", + "callback_id": callback_id, + "title": {"type": "plain_text", "text": title_text}, + "close": {"type": "plain_text", "text": close_button_text}, + "blocks": blocks, + } + if submit_button_text != "None": + view["submit"] = {"type": "plain_text", "text": submit_button_text} + if parent_metadata: + view["private_metadata"] = json.dumps(parent_metadata) + return view + @dataclass class ImageBlock(BaseBlock): diff --git a/syncbot/utils/announcements.py b/syncbot/utils/announcements.py deleted file mode 100644 index b38c40a..0000000 --- a/syncbot/utils/announcements.py +++ /dev/null @@ -1,44 +0,0 @@ -import time -from logging import Logger -from typing import List - -from slack_sdk.web import WebClient -from utils.db import DbManager -from utils.db.schemas import Region, SyncChannel - -# msg = ":rotating_light: Hey, {region}! This is Moneyball, coming at you with some new features for Syncbot! :rotating_light:\n\n" -# msg += ":camera_with_flash: *Photo Sync*: photos will now be synced when you post them to linked channels. Videos are not supported at this time. Also, animated GIFs will be synced, but they will show up as still images.\n\n" -# msg += ":speech_balloon: *@ mention tagging*: you can now @ mention users in your synced posts, and Syncbot will do its best to translate them to the appropriate user in the target workspace. Linked users must be in both workspaces for this to work, otherwise it will default to a non-tagged representation of a mention.\n\n" -# msg += "~ :moneybag: :baseball:" - - -def send( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -): - if body.get("text")[:7] == "confirm": - msg = body.get("text")[8:] - region_records: List[Region] = DbManager.find_records(Region, filters=[True]) - for region in region_records: - sync_channels: List[SyncChannel] = DbManager.find_records( - SyncChannel, filters=[SyncChannel.region_id == region.id] - ) - client = WebClient(token=region.bot_token) - for channel in sync_channels: - try: - client.chat_postMessage(channel=channel.channel_id, text=msg.format(region=region.workspace_name)) - print("Message sent!") - except Exception as e: - if e.response.get("error") == "ratelimited": - print("Rate limited, waiting 10 seconds") - time.sleep(10) - try: - client.chat_postMessage( - channel=channel.channel_id, text=msg.format(region=region.workspace_name) - ) - print("Message sent!") - except Exception as e: - print(f"Error sending message to {region.workspace_name}: {e}") - print(f"Error sending message to {region.workspace_name}: {e}") diff --git a/syncbot/utils/builders.py b/syncbot/utils/builders.py deleted file mode 100644 index c4b82fa..0000000 --- a/syncbot/utils/builders.py +++ /dev/null @@ -1,137 +0,0 @@ -import copy -from slack_sdk.web import WebClient -from logging import Logger -from utils import helpers -from utils.slack import forms, orm, actions -from utils.db.schemas import Region, SyncChannel, Sync -from utils.db import DbManager -from utils.helpers import safe_get - - -def build_config_form( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> orm.BlockView: - """Builds a BlockView config form for the given region. - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - - """ - - team_id: str = safe_get(body, "team_id") or safe_get(body, "view", "team_id") - trigger_id: str = safe_get(body, "trigger_id") - root_view_id: str = safe_get(body, "view", "root_view_id") - error_message: str = safe_get(body, "error_message") - region_record: Region = helpers.get_region_record(team_id, body, context, client) - - config_form = copy.deepcopy(forms.CONFIG_FORM) - - # pull all Syncs, SyncChannels for this region - records = DbManager.find_join_records2( - left_cls=SyncChannel, - right_cls=Sync, - filters=[SyncChannel.region_id == region_record.id], - ) - - for record in records: - sync_channel: SyncChannel = record[0] - sync: Sync = record[1] - config_form.blocks.extend( - forms.build_config_form_sync_block( - sync_channel=sync_channel, - sync=sync, - ) - ) - - if error_message: - config_form.blocks.insert( - 0, - orm.SectionBlock( - text=orm.MrkdwnText(error_message), - ), - ) - - if root_view_id: - config_form.update_modal( - client=client, - view_id=root_view_id, - callback_id=actions.CONFIG_FORM_SUBMIT, - title_text="SyncBot Configuration", - ) - else: - config_form.post_modal( - client=client, - trigger_id=trigger_id, - callback_id=actions.CONFIG_FORM_SUBMIT, - title_text="SyncBot Configuration", - ) - - -def build_join_sync_form( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Pushes a new modal layer to join a new sync. - - Args: - body (dict): Event body from the action invocation. - client (WebClient): The Slack WebClient object. - logger (Logger): A logger object. - context (dict): A context object. - """ - trigger_id: str = safe_get(body, "trigger_id") - team_id = safe_get(body, "view", "team_id") - join_sync_form: orm.BlockView = copy.deepcopy(forms.JOIN_SYNC_FORM) - - sync_records: list[Sync] = DbManager.find_records(Sync, [True]) - channel_sync_region_records: list[tuple[SyncChannel, Region]] = DbManager.find_join_records2( - left_cls=SyncChannel, - right_cls=Region, - filters=[Region.team_id == team_id], - ) - sync_records = [ - sync for sync in sync_records if sync.id not in [record[0].sync_id for record in channel_sync_region_records] - ] - - options = orm.as_selector_options([sync.title for sync in sync_records], [str(sync.id) for sync in sync_records]) - join_sync_form.set_options({actions.CONFIG_JOIN_SYNC_SELECT: options}) - join_sync_form.post_modal( - client=client, - trigger_id=trigger_id, - callback_id=actions.CONFIG_JOIN_SYNC_SUMBIT, - title_text="Join Sync", - new_or_add="add", - ) - - -def build_new_sync_form( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Pushes a new modal layer to create a new sync. - - Args: - body (dict): Event body from the action invocation. - client (WebClient): The Slack WebClient object. - logger (Logger): A logger object. - context (dict): A context object. - """ - trigger_id: str = safe_get(body, "trigger_id") - new_sync_form: orm.BlockView = copy.deepcopy(forms.NEW_SYNC_FORM) - new_sync_form.post_modal( - client=client, - trigger_id=trigger_id, - callback_id=actions.CONFIG_NEW_SYNC_SUBMIT, - title_text="New Sync", - new_or_add="add", - ) diff --git a/syncbot/utils/constants.py b/syncbot/utils/constants.py deleted file mode 100644 index a24f96b..0000000 --- a/syncbot/utils/constants.py +++ /dev/null @@ -1,32 +0,0 @@ -import os - -SLACK_BOT_TOKEN = "SLACK_BOT_TOKEN" -SLACK_STATE_S3_BUCKET_NAME = "ENV_SLACK_STATE_S3_BUCKET_NAME" -SLACK_INSTALLATION_S3_BUCKET_NAME = "ENV_SLACK_INSTALLATION_S3_BUCKET_NAME" -SLACK_CLIENT_ID = "ENV_SLACK_CLIENT_ID" -SLACK_CLIENT_SECRET = "ENV_SLACK_CLIENT_SECRET" -SLACK_SCOPES = "ENV_SLACK_SCOPES" -PASSWORD_ENCRYPT_KEY = "PASSWORD_ENCRYPT_KEY" - -DATABASE_HOST = "DATABASE_HOST" -ADMIN_DATABASE_USER = "ADMIN_DATABASE_USER" -ADMIN_DATABASE_PASSWORD = "ADMIN_DATABASE_PASSWORD" -ADMIN_DATABASE_SCHEMA = "ADMIN_DATABASE_SCHEMA" - -LOCAL_DEVELOPMENT = os.environ.get(SLACK_BOT_TOKEN, "123") != "123" - -SLACK_STATE_S3_BUCKET_NAME = "ENV_SLACK_STATE_S3_BUCKET_NAME" -SLACK_INSTALLATION_S3_BUCKET_NAME = "ENV_SLACK_INSTALLATION_S3_BUCKET_NAME" -SLACK_CLIENT_ID = "ENV_SLACK_CLIENT_ID" -SLACK_CLIENT_SECRET = "ENV_SLACK_CLIENT_SECRET" -SLACK_SCOPES = "ENV_SLACK_SCOPES" -SLACK_SIGNING_SECRET = "SLACK_SIGNING_SECRET" - -WARNING_BLOCK = "WARNING_BLOCK" - -MAX_HEIF_SIZE = 1000 - -AWS_ACCESS_KEY_ID = "AWS_ACCESS_KEY_ID" -AWS_SECRET_ACCESS_KEY = "AWS_SECRET_ACCESS_KEY" -S3_IMAGE_BUCKET = "syncbot-images" -S3_IMAGE_URL = f"https://{S3_IMAGE_BUCKET}.s3.amazonaws.com/" diff --git a/syncbot/utils/db/__init__.py b/syncbot/utils/db/__init__.py deleted file mode 100644 index 02a1098..0000000 --- a/syncbot/utils/db/__init__.py +++ /dev/null @@ -1,163 +0,0 @@ -from dataclasses import dataclass -from typing import Tuple, TypeVar, List -import os -from sqlalchemy import create_engine, pool, and_ -from sqlalchemy.orm import sessionmaker -from utils.db.schemas import BaseClass -from utils import constants - - -@dataclass -class DatabaseField: - name: str - value: object = None - - -GLOBAL_ENGINE = None -GLOBAL_SESSION = None -GLOBAL_SCHEMA = None - - -def get_session(echo=False, schema=None): - if GLOBAL_SESSION: - return GLOBAL_SESSION - - global GLOBAL_ENGINE, GLOBAL_SCHEMA - if schema != GLOBAL_SCHEMA or not GLOBAL_ENGINE: - host = os.environ[constants.DATABASE_HOST] - user = os.environ[constants.ADMIN_DATABASE_USER] - passwd = os.environ[constants.ADMIN_DATABASE_PASSWORD] - database = schema or os.environ[constants.ADMIN_DATABASE_SCHEMA] - - db_url = f"mysql+pymysql://{user}:{passwd}@{host}:3306/{database}?charset=utf8mb4" - GLOBAL_ENGINE = create_engine(db_url, echo=echo, poolclass=pool.NullPool, convert_unicode=True) - GLOBAL_SCHEMA = database - return sessionmaker()(bind=GLOBAL_ENGINE) - - -def close_session(session): - global GLOBAL_SESSION, GLOBAL_ENGINE - if GLOBAL_SESSION == session: - if GLOBAL_ENGINE: - GLOBAL_ENGINE.close() - GLOBAL_SESSION = None - - -T = TypeVar("T") - - -class DbManager: - def get_record(cls: T, id, schema=None) -> T: - session = get_session(schema=schema) - try: - x = session.query(cls).filter(cls.get_id() == id).first() - if x: - session.expunge(x) - return x - finally: - session.rollback() - close_session(session) - - def find_records(cls: T, filters, schema=None) -> List[T]: - session = get_session(schema=schema) - try: - records = session.query(cls).filter(and_(*filters)).all() - for r in records: - session.expunge(r) - return records - finally: - session.rollback() - close_session(session) - - def find_join_records2(left_cls: T, right_cls: T, filters, schema=None) -> List[Tuple[T]]: - session = get_session(schema=schema) - try: - records = session.query(left_cls, right_cls).join(right_cls).filter(and_(*filters)).all() - session.expunge_all() - return records - finally: - session.rollback() - close_session(session) - - def find_join_records3( - left_cls: T, right_cls1: T, right_cls2: T, filters, schema=None, left_join=False - ) -> List[Tuple[T]]: - session = get_session(schema=schema) - try: - records = ( - session.query(left_cls, right_cls1, right_cls2) - .select_from(left_cls) - .join(right_cls1, isouter=left_join) - .join(right_cls2, isouter=left_join) - .filter(and_(*filters)) - .all() - ) - session.expunge_all() - return records - finally: - session.rollback() - close_session(session) - - def update_record(cls: T, id, fields, schema=None): - session = get_session(schema=schema) - try: - session.query(cls).filter(cls.get_id() == id).update(fields, synchronize_session="fetch") - session.flush() - finally: - session.commit() - close_session(session) - - def update_records(cls: T, filters, fields, schema=None): - session = get_session(schema=schema) - try: - session.query(cls).filter(and_(*filters)).update(fields, synchronize_session="fetch") - session.flush() - finally: - session.commit() - close_session(session) - - def create_record(record: BaseClass, schema=None) -> BaseClass: - session = get_session(schema=schema) - try: - session.add(record) - session.flush() - session.expunge(record) - finally: - session.commit() - close_session(session) - return record - - def create_records(records: List[BaseClass], schema=None): - session = get_session(schema=schema) - try: - session.add_all(records) - session.flush() - finally: - session.commit() - close_session(session) - - def delete_record(cls: T, id, schema=None): - session = get_session(schema=schema) - try: - session.query(cls).filter(cls.get_id() == id).delete() - session.flush() - finally: - session.commit() - close_session(session) - - def delete_records(cls: T, filters, schema=None): - session = get_session(schema=schema) - try: - session.query(cls).filter(and_(*filters)).delete() - session.flush() - finally: - session.commit() - close_session(session) - - def execute_sql_query(sql_query, schema=None): - session = get_session(schema=schema) - try: - records = session.execute(sql_query) - return records - finally: - close_session(session) diff --git a/syncbot/utils/db/schemas.py b/syncbot/utils/db/schemas.py deleted file mode 100644 index b02984a..0000000 --- a/syncbot/utils/db/schemas.py +++ /dev/null @@ -1,80 +0,0 @@ -import sqlalchemy -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import Column, ForeignKey, Integer, String -from sqlalchemy.types import DECIMAL -from sqlalchemy.orm import relationship - -BaseClass = declarative_base(mapper=sqlalchemy.orm.mapper) - - -class GetDBClass: - def get_id(self): - return self.id - - def get(self, attr): - if attr in [c.key for c in self.__table__.columns]: - return getattr(self, attr) - return None - - def to_json(self): - return {c.key: self.get(c.key) for c in self.__table__.columns} - - def __repr__(self): - return str(self.to_json()) - - -class Region(BaseClass, GetDBClass): - __tablename__ = "regions" - id = Column(Integer, primary_key=True) - team_id = Column(String(100), unique=True) - workspace_name = Column(String(100)) - bot_token = Column(String(100)) - - def get_id(): - return Region.team_id - - -class Sync(BaseClass, GetDBClass): - __tablename__ = "syncs" - id = Column(Integer, primary_key=True) - title = Column(String(100), unique=True) - description = Column(String(100)) - - def get_id(): - return Sync.id - - -class SyncChannel(BaseClass, GetDBClass): - __tablename__ = "sync_channels" - id = Column(Integer, primary_key=True) - sync_id = Column(Integer, ForeignKey("syncs.id")) - region_id = Column(Integer, ForeignKey("regions.id")) - region = relationship("Region", backref="sync_channels") - channel_id = Column(String(100)) - - def get_id(): - return SyncChannel.channel_id - - -class PostMeta(BaseClass, GetDBClass): - __tablename__ = "post_meta" - id = Column(Integer, primary_key=True) - post_id = Column(String(100)) - sync_channel_id = Column(Integer, ForeignKey("sync_channels.id")) - ts = Column(DECIMAL(16, 6)) - - def get_id(): - return PostMeta.post_id - - -# class SyncChannelExtended(BaseClass, GetDBClass): -# __tablename__ = "sync_channels_extended" -# id = Column(Integer, primary_key=True) -# sync_id = Column(Integer) -# region_id = Column(Integer) -# channel_id = Column(String(100)) -# sync_title = Column(String(100)) -# sync_description = Column(String(100)) -# region_team_id = Column(String(100)) -# region_workspace_name = Column(String(100)) -# region_bot_token = Column(String(100)) diff --git a/syncbot/utils/handlers.py b/syncbot/utils/handlers.py deleted file mode 100644 index cc365c3..0000000 --- a/syncbot/utils/handlers.py +++ /dev/null @@ -1,342 +0,0 @@ -import os - -# import time -import uuid -from logging import Logger - -from slack_sdk.web import WebClient -from utils import builders, constants, helpers -from utils.db import DbManager, schemas -from utils.slack import actions, forms, orm - - -def handle_remove_sync( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -): - """Handles the "DeSync" button action by removing the SyncChannel record from the database. - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - """ - sync_channel_id = int(helpers.safe_get(body, "actions", 0, "value")) - sync_channel_record = DbManager.get_record(schemas.SyncChannel, id=sync_channel_id) - DbManager.delete_records(schemas.SyncChannel, [schemas.SyncChannel.id == sync_channel_id]) - try: - client.conversations_leave(channel=sync_channel_record.channel_id) - except Exception: - pass - builders.build_config_form(body, client, logger, context) - - -def respond_to_message_event( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Main function for handling message events. - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - """ - event_type = helpers.safe_get(body, "event", "type") - event_subtype = helpers.safe_get(body, "event", "subtype") - message_subtype = helpers.safe_get(body, "event", "message", "subtype") or helpers.safe_get( - body, "event", "previous_message", "subtype" - ) - team_id = helpers.safe_get(body, "team_id") - channel_id = helpers.safe_get(body, "event", "channel") - msg_text = helpers.safe_get(body, "event", "text") or helpers.safe_get(body, "event", "message", "text") - msg_text = " " if (msg_text or "") == "" else msg_text - mentioned_users = helpers.parse_mentioned_users(msg_text, client) - user_id = helpers.safe_get(body, "event", "user") or helpers.safe_get(body, "event", "message", "user") - thread_ts = helpers.safe_get(body, "event", "thread_ts") - ts = ( - helpers.safe_get(body, "event", "message", "ts") - or helpers.safe_get(body, "event", "previous_message", "ts") - or helpers.safe_get(body, "event", "ts") - ) - files = [ - file - for file in helpers.safe_get(body, "event", "files") - or helpers.safe_get(body, "event", "message", "files") - or [] - ] - photos = [photo for photo in files if helpers.safe_get(photo, "original_w")] - if event_subtype in ["message_changed", "message_deleted"]: - photo_names = [ - f"{photo['id']}.png" if photo['filetype'] == "heic" else f"{photo['id']}.{photo['filetype']}" - for photo in photos - ] - photo_list = [{"url": f"{constants.S3_IMAGE_URL}{name}", "name": name} for name in photo_names] - else: - photo_list = helpers.upload_photos(files=photos, client=client, logger=logger) - photo_blocks = [ - orm.ImageBlock(image_url=photo["url"], alt_text=photo["name"]).as_form_field() for photo in photo_list - ] - - if (event_type == "message") and (message_subtype != "bot_message"): # and (event_context not in EVENT_LIST): - # EVENT_LIST.append(event_context) - if (not event_subtype) or (event_subtype == "file_share" and msg_text != ""): - post_list = [] - post_uuid = uuid.uuid4().bytes - if not thread_ts: - # handle new post - sync_records = helpers.get_sync_list(team_id, channel_id) - if not sync_records: - try: - client.chat_postMessage( - channel=channel_id, - text=":wave: Hello! I'm SyncBot. I was added to this channel, but this channel doesn't seem to be part of a Sync. Please use the `/config-syncbot` command to configure me.", - ) - client.conversations_leave(channel=channel_id) - except Exception as e: - logger.error(e) - return - user_name, user_profile_url = helpers.get_user_info(client, user_id) - region_name = helpers.safe_get( - [record[1].workspace_name for record in sync_records if record[0].channel_id == channel_id], 0 - ) - for record in sync_records: - sync_channel, region = record - if sync_channel.channel_id == channel_id: - ts = helpers.safe_get(body, "event", "ts") - else: - msg_text = helpers.apply_mentioned_users(msg_text, client, mentioned_users) - res = helpers.post_message( - bot_token=region.bot_token, - channel_id=sync_channel.channel_id, - msg_text=msg_text, - user_name=user_name, - user_profile_url=user_profile_url, - region_name=region_name, - blocks=photo_blocks, - ) - # if photos != []: - # time.sleep(3) # required so the next step catches the latest ts - # posts = client.conversations_history(channel=sync_channel.channel_id, limit=1) - # print(posts["messages"][0]["ts"]) - # # ts = posts["messages"][0]["ts"] - # ts = helpers.safe_get(res, "ts") or helpers.safe_get(body, "event", "ts") - # else: - # ts = helpers.safe_get(res, "ts") or helpers.safe_get(body, "event", "ts") - ts = helpers.safe_get(res, "ts") or helpers.safe_get(body, "event", "ts") - post_list.append( - schemas.PostMeta( - post_id=post_uuid, - sync_channel_id=sync_channel.id, - ts=float(ts), - ) - ) - for photo in photo_list: - os.remove(photo["path"]) - DbManager.create_records(post_list) - else: - # handle threaded reply - post_list = [] - post_uuid = uuid.uuid4().bytes - post_records = helpers.get_post_records(thread_ts) - region_name = helpers.safe_get( - [record[2].workspace_name for record in post_records if record[1].channel_id == channel_id], 0 - ) - for record in post_records: - post_meta, sync_channel, region = record - user_name, user_profile_url = helpers.get_user_info(client, user_id) - if sync_channel.channel_id == channel_id: - ts = helpers.safe_get(body, "event", "ts") - else: - msg_text = helpers.apply_mentioned_users(msg_text, client, mentioned_users) - res = helpers.post_message( - bot_token=region.bot_token, - channel_id=sync_channel.channel_id, - msg_text=msg_text, - user_name=user_name, - user_profile_url=user_profile_url, - thread_ts="{:.6f}".format(post_meta.ts), - region_name=region_name, - blocks=photo_blocks, - ) - ts = helpers.safe_get(res, "ts") - post_list.append( - schemas.PostMeta( - post_id=post_uuid, - sync_channel_id=sync_channel.id, - ts=float(ts), - ) - ) - DbManager.create_records(post_list) - - elif event_subtype == "message_changed": - # handle edited message - post_records = helpers.get_post_records(ts) - region_name = helpers.safe_get( - [record[2].workspace_name for record in post_records if record[1].channel_id == channel_id], 0 - ) - for record in post_records: - post_meta, sync_channel, region = record - if sync_channel.channel_id == channel_id: - continue - else: - msg_text = helpers.apply_mentioned_users(msg_text, client, mentioned_users) - res = helpers.post_message( - bot_token=region.bot_token, - channel_id=sync_channel.channel_id, - msg_text=msg_text, - update_ts="{:.6f}".format(post_meta.ts), - region_name=region_name, - blocks=photo_blocks, - ) - elif event_subtype == "message_deleted": - # handle deleted message - post_records = helpers.get_post_records(ts) - for record in post_records: - post_meta, sync_channel, region = record - if sync_channel.channel_id == channel_id: - continue - else: - res = helpers.delete_message( - bot_token=region.bot_token, - channel_id=sync_channel.channel_id, - ts="{:.6f}".format(post_meta.ts), - ) - - -def handle_config_submission( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Handles the config form submission (currently does nothing) - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - """ - pass - - -def handle_join_sync_submission( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Handles the join sync form submission by appending to the SyncChannel table. - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - """ - form_data = forms.JOIN_SYNC_FORM.get_selected_values(body) - sync_id = helpers.safe_get(form_data, actions.CONFIG_JOIN_SYNC_SELECT) - channel_id = helpers.safe_get(form_data, actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT) - team_id = helpers.safe_get(body, "view", "team_id") - region_record: schemas.Region = DbManager.get_record(schemas.Region, id=team_id) - sync_record: schemas.Sync = DbManager.get_record(schemas.Sync, id=sync_id) - - channel_sync_record = schemas.SyncChannel( - sync_id=sync_id, - channel_id=channel_id, - region_id=region_record.id, - ) - try: - DbManager.create_record(channel_sync_record) - client.conversations_join(channel=channel_id) - client.chat_postMessage( - channel=channel_id, - text=f":wave: Hello! I'm SyncBot. I'll be keeping this channel in sync with *{sync_record.title}*.", - ) - except Exception: - body["error_message"] = "Your chosen channel is already part of a Sync. Please choose another channel." - - builders.build_config_form(body, client, logger, context) - - -def handle_new_sync_submission( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Handles the new sync form submission by appending to the Sync table. - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - """ - form_data = forms.NEW_SYNC_FORM.get_selected_values(body) - sync_title = helpers.safe_get(form_data, actions.CONFIG_NEW_SYNC_TITLE) - sync_description = helpers.safe_get(form_data, actions.CONFIG_NEW_SYNC_DESCRIPTION) - - sync_record = schemas.Sync( - title=sync_title, - description=sync_description, - ) - DbManager.create_record(sync_record) - - -def check_join_sync_channel( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Checks to see if the chosen channel id is already part of a sync - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - """ - view_id = helpers.safe_get(body, "view", "id") - form_data = forms.JOIN_SYNC_FORM.get_selected_values(body) - channel_id = helpers.safe_get(form_data, actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT) - blocks = helpers.safe_get(body, "view", "blocks") - already_warning = constants.WARNING_BLOCK in [block["block_id"] for block in blocks] - sync_channel_records = DbManager.find_records(schemas.SyncChannel, [schemas.SyncChannel.channel_id == channel_id]) - - if len(sync_channel_records) > 0 and not already_warning: - block = orm.SectionBlock( - action=constants.WARNING_BLOCK, - label=":warning: :warning: This channel is already part of a Sync! Please choose another channel.", - ).as_form_field() - print(block) - blocks.append( - orm.SectionBlock( - action=constants.WARNING_BLOCK, - label=":warning: :warning: This channel is already part of a Sync! Please choose another channel.", - ).as_form_field() - ) - helpers.update_modal( - blocks=blocks, - client=client, - view_id=view_id, - title_text="Join Sync", - callback_id=actions.CONFIG_JOIN_SYNC_SUMBIT, - ) - elif len(sync_channel_records) == 0 and already_warning: - blocks = [block for block in blocks if block["block_id"] != constants.WARNING_BLOCK] - helpers.update_modal( - blocks=blocks, - client=client, - view_id=view_id, - title_text="Join Sync", - callback_id=actions.CONFIG_JOIN_SYNC_SUMBIT, - ) diff --git a/syncbot/utils/helpers.py b/syncbot/utils/helpers.py deleted file mode 100644 index 1e3697c..0000000 --- a/syncbot/utils/helpers.py +++ /dev/null @@ -1,313 +0,0 @@ -import json -import os -import re -from logging import Logger -from typing import Dict, List, Tuple - -import boto3 -import requests -import slack_sdk -from PIL import Image -from pillow_heif import register_heif_opener -from slack_bolt.adapter.aws_lambda.lambda_s3_oauth_flow import LambdaS3OAuthFlow -from slack_bolt.oauth.oauth_settings import OAuthSettings -from slack_sdk import WebClient -from utils import constants -from utils.db import DbManager, schemas -from utils.slack import actions - -register_heif_opener() - - -def get_oauth_flow(): - if constants.LOCAL_DEVELOPMENT: - return None - else: - return LambdaS3OAuthFlow( - oauth_state_bucket_name=os.environ[constants.SLACK_STATE_S3_BUCKET_NAME], - installation_bucket_name=os.environ[constants.SLACK_INSTALLATION_S3_BUCKET_NAME], - settings=OAuthSettings( - client_id=os.environ[constants.SLACK_CLIENT_ID], - client_secret=os.environ[constants.SLACK_CLIENT_SECRET], - scopes=os.environ[constants.SLACK_SCOPES].split(","), - ), - ) - - -def safe_get(data, *keys): - if not data: - return None - try: - result = data - for k in keys: - if isinstance(k, int) and isinstance(result, list): - result = result[k] - elif result.get(k): - result = result[k] - else: - return None - return result - except KeyError: - return None - - -def get_sync_list(team_id: str, channel_id: str) -> List[Tuple[schemas.SyncChannel, schemas.Region]]: - sync_channel_record = DbManager.find_records(schemas.SyncChannel, [schemas.SyncChannel.channel_id == channel_id]) - if sync_channel_record: - sync_channels = DbManager.find_join_records2( - left_cls=schemas.SyncChannel, - right_cls=schemas.Region, - filters=[schemas.SyncChannel.sync_id == sync_channel_record[0].sync_id], - ) - else: - sync_channels = [] - return sync_channels - - -def get_user_info(client: WebClient, user_id: str) -> Tuple[str, str]: - try: - res = client.users_info(user=user_id) - except slack_sdk.errors.SlackApiError: - return None, None - - user_name = ( - safe_get(res, "user", "profile", "display_name") or safe_get(res, "user", "profile", "real_name") or None - ) - user_profile_url = safe_get(res, "user", "profile", "image_192") - return user_name, user_profile_url - - -def post_message( - bot_token: str, - channel_id: str, - msg_text: str, - user_name: str = None, - user_profile_url: str = None, - thread_ts: str = None, - update_ts: str = None, - region_name: str = None, - blocks: List[dict] = None, -) -> Dict: - slack_client = WebClient(bot_token) - posted_from = f"({region_name})" if region_name else "(via SyncBot)" - if blocks: - # msg_block = orm.SectionBlock(label=msg_text).as_form_field() - msg_block = {"type": "section", "text": {"type": "mrkdwn", "text": msg_text}} - all_blocks = [msg_block] + blocks - else: - all_blocks = [] - if update_ts: - res = slack_client.chat_update( - channel=channel_id, - text=msg_text, - ts=update_ts, - blocks=all_blocks, - ) - else: - res = slack_client.chat_postMessage( - channel=channel_id, - text=msg_text, - username=f"{user_name} {posted_from}", - icon_url=user_profile_url, - thread_ts=thread_ts, - blocks=all_blocks, - ) - return res - - -def get_post_records(thread_ts: str) -> List[Tuple[schemas.PostMeta, schemas.SyncChannel, schemas.Region]]: - post = DbManager.find_records(schemas.PostMeta, [schemas.PostMeta.ts == float(thread_ts)]) - if post: - post_records = DbManager.find_join_records3( - left_cls=schemas.PostMeta, - right_cls1=schemas.SyncChannel, - right_cls2=schemas.Region, - filters=[schemas.PostMeta.post_id == post[0].post_id], - ) - else: - post_records = [] - return post_records - - -def delete_message(bot_token: str, channel_id: str, ts: str) -> Dict: - slack_client = WebClient(bot_token) - res = slack_client.chat_delete( - channel=channel_id, - ts=ts, - ) - return res - - -def get_request_type(body: dict) -> tuple[str]: - request_type = safe_get(body, "type") - if request_type == "event_callback": - return ("event_callback", safe_get(body, "event", "type")) - elif request_type == "block_actions": - block_action = safe_get(body, "actions", 0, "action_id") - if block_action[: len(actions.CONFIG_REMOVE_SYNC)] == actions.CONFIG_REMOVE_SYNC: - block_action = actions.CONFIG_REMOVE_SYNC - return ("block_actions", block_action) - elif request_type == "view_submission": - return ("view_submission", safe_get(body, "view", "callback_id")) - elif not request_type and "command" in body: - return ("command", safe_get(body, "command")) - else: - return ("unknown", "unknown") - - -def get_region_record(team_id: str, body: dict, context: dict, client: WebClient) -> schemas.Region: - region_record: schemas.Region = DbManager.get_record(schemas.Region, id=team_id) - team_domain = safe_get(body, "team", "domain") - - if not region_record: - try: - team_info = client.team_info() - team_name = team_info["team"]["name"] - except Exception: - team_name = team_domain - region_record: schemas.Region = DbManager.create_record( - schemas.Region( - team_id=team_id, - workspace_name=team_name, - bot_token=context["bot_token"], - ) - ) - - return region_record - - -def update_modal( - blocks: List[dict], - client: WebClient, - view_id: str, - title_text: str, - callback_id: str, - submit_button_text: str = "Submit", - parent_metadata: dict = None, - close_button_text: str = "Close", - notify_on_close: bool = False, -): - view = { - "type": "modal", - "callback_id": callback_id, - "title": {"type": "plain_text", "text": title_text}, - "submit": {"type": "plain_text", "text": submit_button_text}, - "close": {"type": "plain_text", "text": close_button_text}, - "notify_on_close": notify_on_close, - "blocks": blocks, - } - if parent_metadata: - view["private_metadata"] = json.dumps(parent_metadata) - - client.views_update(view_id=view_id, view=view) - - -def upload_photos(files: List[dict], client: WebClient, logger: Logger) -> List[dict]: - uploaded_photos = [] - photos = [file for file in files if file["mimetype"][:5] == "image"] - for photo in photos: - try: - # Download photo - # Try to get a medium size photo first, then fallback to smaller sizes - r = requests.get( - photo.get("thumb_480") or photo.get("thumb_360") or photo.get("thumb_80") or photo.get("url_private"), - headers={"Authorization": f"Bearer {client.token}"}, - ) - r.raise_for_status() - - file_name = f"{photo['id']}.{photo['filetype']}" - file_path = f"/tmp/{file_name}" - file_mimetype = photo["mimetype"] - - # Save photo to disk - with open(file_path, "wb") as f: - f.write(r.content) - - # Convert HEIC to PNG - if photo["filetype"] == "heic": - heic_img = Image.open(file_path) - x, y = heic_img.size - coeff = min(constants.MAX_HEIF_SIZE / max(x, y), 1) - heic_img = heic_img.resize((int(x * coeff), int(y * coeff))) - heic_img.save(file_path.replace(".heic", ".png"), quality=95, optimize=True, format="PNG") - os.remove(file_path) - - file_path = file_path.replace(".heic", ".png") - file_name = file_name.replace(".heic", ".png") - file_mimetype = "image/png" - - # Upload photo to S3 - if constants.LOCAL_DEVELOPMENT: - s3_client = boto3.client( - "s3", - aws_access_key_id=os.environ[constants.AWS_ACCESS_KEY_ID], - aws_secret_access_key=os.environ[constants.AWS_SECRET_ACCESS_KEY], - ) - else: - s3_client = boto3.client("s3") - - with open(file_path, "rb") as f: - s3_client.upload_fileobj( - f, constants.S3_IMAGE_BUCKET, file_name, ExtraArgs={"ContentType": file_mimetype} - ) - uploaded_photos.append( - { - "url": f"{constants.S3_IMAGE_URL}{file_name}", - "name": file_name, - "path": file_path, - } - ) - except Exception as e: - logger.error(f"Error uploading file: {e}") - return uploaded_photos - - -def parse_mentioned_users(msg_text: str, client: WebClient) -> List[Dict]: - - user_ids = re.findall(r"<@(\w+)>", msg_text or "") - - if user_ids != []: - try: - members = client.users_list()["members"] - except slack_sdk.errors.SlackApiError: - # TODO: rate limited, use client.user_info() to get individual user info - members = [] - member_dict = {} - for member in members: - user_name = ( - member["profile"]["real_name"] - if member["profile"]["display_name"] != "" - else member["profile"]["display_name"] - ) - member_dict.update({member["id"]: {"user_name": user_name, "email": safe_get(member, "profile", "email")}}) - - return [member_dict[user_id] for user_id in user_ids] - - -def apply_mentioned_users(msg_text: str, client: WebClient, mentioned_user_info: List[Dict]) -> List[Dict]: - - email_list = [user["email"] for user in mentioned_user_info] - msg_text = msg_text or "" - - if email_list == []: - return msg_text - else: - try: - members = client.users_list()["members"] - except slack_sdk.errors.SlackApiError: - # TODO: rate limited, use client.user_info() to get individual user info - members = [] - member_dict = { - member["profile"].get("email"): member["id"] for member in members if member["profile"].get("email") - } - - replace_list = [] - for index, email in enumerate(email_list): - user_id = member_dict.get(email) - if user_id: - replace_list.append(f"<@{user_id}>") - else: - replace_list.append(f"@{mentioned_user_info[index]['user_name']}") - - pattern = r"<@\w+>" - return re.sub(pattern, "{}", msg_text).format(*replace_list) diff --git a/syncbot/utils/routing.py b/syncbot/utils/routing.py deleted file mode 100644 index 5ee474c..0000000 --- a/syncbot/utils/routing.py +++ /dev/null @@ -1,31 +0,0 @@ -from utils import announcements, builders, handlers -from utils.slack import actions - -COMMAND_MAPPER = { - "/config-syncbot": builders.build_config_form, - "/send-syncbot-announcement": announcements.send, -} - -ACTION_MAPPER = { - actions.CONFIG_JOIN_EXISTING_SYNC: builders.build_join_sync_form, - actions.CONFIG_CREATE_NEW_SYNC: builders.build_new_sync_form, - actions.CONFIG_REMOVE_SYNC: handlers.handle_remove_sync, - actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT: handlers.check_join_sync_channel, -} - -EVENT_MAPPER = { - "message": handlers.respond_to_message_event, -} - -VIEW_MAPPER = { - actions.CONFIG_FORM_SUBMIT: handlers.handle_config_submission, - actions.CONFIG_JOIN_SYNC_SUMBIT: handlers.handle_join_sync_submission, - actions.CONFIG_NEW_SYNC_SUBMIT: handlers.handle_new_sync_submission, -} - -MAIN_MAPPER = { - "command": COMMAND_MAPPER, - "block_actions": ACTION_MAPPER, - "event_callback": EVENT_MAPPER, - "view_submission": VIEW_MAPPER, -} diff --git a/syncbot/utils/slack/__init__.py b/syncbot/utils/slack/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/syncbot/utils/slack/actions.py b/syncbot/utils/slack/actions.py deleted file mode 100644 index 1099ac7..0000000 --- a/syncbot/utils/slack/actions.py +++ /dev/null @@ -1,12 +0,0 @@ -CONFIG_JOIN_EXISTING_SYNC = "join_existing_sync" -CONFIG_CREATE_NEW_SYNC = "create_new_sync" -CONFIG_REMOVE_SYNC = "remove_sync" - -CONFIG_FORM_SUBMIT = "config_form_submit" - -CONFIG_NEW_SYNC_TITLE = "config_new_sync_title" -CONFIG_NEW_SYNC_DESCRIPTION = "config_new_sync_description" -CONFIG_NEW_SYNC_SUBMIT = "config_new_sync_submit" -CONFIG_JOIN_SYNC_SELECT = "config_join_sync_select" -CONFIG_JOIN_SYNC_CHANNEL_SELECT = "config_join_sync_channel_select" -CONFIG_JOIN_SYNC_SUMBIT = "config_join_sync_submit" diff --git a/syncbot/utils/slack/forms.py b/syncbot/utils/slack/forms.py deleted file mode 100644 index c810376..0000000 --- a/syncbot/utils/slack/forms.py +++ /dev/null @@ -1,86 +0,0 @@ -from typing import List -from utils.db import schemas -from utils.slack import orm, actions - -CONFIG_FORM = orm.BlockView( - blocks=[ - orm.ActionsBlock( - elements=[ - orm.ButtonElement( - label="Join existing Sync", - action=actions.CONFIG_JOIN_EXISTING_SYNC, - ), - orm.ButtonElement( - label="Create new Sync", - action=actions.CONFIG_CREATE_NEW_SYNC, - ), - ] - ), - orm.DividerBlock(), - ] -) - -NEW_SYNC_FORM = orm.BlockView( - blocks=[ - orm.InputBlock( - label="Sync Title", - action=actions.CONFIG_NEW_SYNC_TITLE, - element=orm.PlainTextInputElement(placeholder="Enter a title for this Sync"), - optional=False, - ), - orm.InputBlock( - label="Sync Description", - action=actions.CONFIG_NEW_SYNC_DESCRIPTION, - element=orm.PlainTextInputElement(placeholder="Enter a description for this Sync"), - optional=False, - ), - orm.ContextBlock( - element=orm.ContextElement( - initial_value="Reminder: this form is for creating NEW Syncs. If the Sync has already been set up " - "in another region, please use the 'Join existing Sync' button to join it.", - ), - ), - ] -) - -JOIN_SYNC_FORM = orm.BlockView( - blocks=[ - orm.InputBlock( - label="Sync Select", - action=actions.CONFIG_JOIN_SYNC_SELECT, - element=orm.StaticSelectElement(placeholder="Select a Sync to join"), - optional=False, - ), - orm.InputBlock( - label="Sync Channel Select", - action=actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT, - element=orm.ChannelsSelectElement(placeholder="Select a channel to use for this Sync"), - optional=False, - dispatch_action=True, - ), - ] -) - - -def build_config_form_sync_block(sync_channel: schemas.SyncChannel, sync: schemas.Sync) -> List[orm.BaseBlock]: - """Function to build a block for a sync channel. - - Args: - sync_channel (orm.SyncChannel): SyncChannel database record. - sync (orm.Sync): Sync database record. - - Returns: - List[orm.BaseBlock]: List of blocks to be appended to the config form. - """ - return [ - orm.SectionBlock( - label=f"*{sync.title}*\n{sync.description}\nChannel: <#{sync_channel.channel_id}>", - action=f"{actions.CONFIG_REMOVE_SYNC}_{sync_channel.id}", - element=orm.ButtonElement( - label="DeSync", - style="danger", - value=f"{sync_channel.id}", # TODO: add confirmation block - ), - ), - orm.DividerBlock(), - ] diff --git a/template.yaml b/template.yaml index 1ff977f..ef76c2b 100644 --- a/template.yaml +++ b/template.yaml @@ -1,8 +1,11 @@ AWSTemplateFormatVersion: "2010-09-09" Transform: AWS::Serverless-2016-10-31 -Description: syncbot-build-template +Description: > + SyncBot - Slack app that syncs posts and replies across workspaces. + Free-tier compatible: Lambda, API Gateway, RDS db.t3.micro, S3. + All infrastructure resources (RDS, VPC, S3 buckets) can optionally + point at existing instances so multiple apps can share one account. -# More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst Globals: Function: Timeout: 10 @@ -10,128 +13,636 @@ Globals: Tracing: Active Api: TracingEnabled: true + MethodSettings: + - ResourcePath: "/*" + HttpMethod: "*" + ThrottlingBurstLimit: 20 + ThrottlingRateLimit: 10 + +# ================================================================ +# Parameters +# ================================================================ Parameters: - SlackToken: - Description: Slack token that is passed by the bot + Stage: + Description: Deployment stage Type: String - Default: "123" + Default: staging + AllowedValues: + - staging + - prod + + # --- Slack --- + SlackSigningSecret: - Description: Auth token used to pass to the controller to get the commands + Description: Slack signing secret for request verification Type: String + NoEcho: true Default: "123" + SlackClientSecret: - Description: Auth token used to pass to the controller to get the commands + Description: Slack OAuth client secret Type: String + NoEcho: true Default: "123" + SlackOauthScopes: - Description: Auth token used to pass to the controller to get the commands + Description: Comma-separated list of Slack OAuth scopes Type: String Default: "app_mentions:read,channels:history,channels:join,chat:write,chat:write.customize,commands,files:read,files:write,team:read,users:read,channels:manage,users:read.email,reactions:read,reactions:write" - Stage: - Description: Parameter for getting the deployment stage - Type: String - Default: staging - DatabaseHost: - Description: RDS Database Host + + # --- Database (RDS) --- + + ExistingDatabaseHost: + Description: > + Endpoint of an existing RDS instance (e.g. mydb.xxxx.us-east-2.rds.amazonaws.com). + Leave EMPTY to create a new RDS instance. When set, all VPC and RDS + resources are skipped. Type: String - Default: "123" + Default: "" + DatabaseUser: - Description: RDS Database User + Description: Database username (master user if creating new RDS, or an existing user) Type: String - Default: "moneyball" + DatabasePassword: - Description: RDS Database Password + Description: Database password Type: String - Default: "123" + NoEcho: true + MinLength: 8 + DatabaseSchema: - Description: RDS Database Schema + Description: > + MySQL database/schema name. Each app sharing an RDS instance + should use a different schema name. Type: String Default: "syncbot" + + DatabaseInstanceClass: + Description: "RDS instance class (db.t3.micro is free-tier eligible). Ignored when using an existing database." + Type: String + Default: db.t3.micro + AllowedValues: + - db.t3.micro + - db.t3.small + - db.t3.medium + - db.t4g.micro + - db.t4g.small + + AllowedDBCidr: + Description: > + CIDR allowed to reach the database (e.g. your IP as x.x.x.x/32). + Ignored when using an existing database. + Type: String + Default: "0.0.0.0/0" + + VpcCidr: + Description: CIDR block for the VPC. Ignored when using an existing database. + Type: String + Default: "10.0.0.0/16" + + # --- S3 Buckets --- + + ExistingSlackStateBucket: + Description: > + Name of an existing S3 bucket for Slack OAuth state. + Leave EMPTY to create a new bucket. + Type: String + Default: "" + + ExistingInstallationBucket: + Description: > + Name of an existing S3 bucket for Slack installation data. + Leave EMPTY to create a new bucket. + Type: String + Default: "" + + ExistingImagesBucket: + Description: > + Name of an existing S3 bucket for synced images. + Leave EMPTY to create a new bucket. The bucket must allow + public reads if you want images to render in Slack. + Type: String + Default: "" + + # --- Security --- + PasswordEncryptKey: - Description: Hash encrypt key for decrypting email passwords + Description: Encryption key for sensitive data Type: String + NoEcho: true Default: "123" + RequireAdmin: + Description: > + When "true" (default), only workspace admins and owners can + configure SyncBot. Set to "false" to allow any user. + Type: String + Default: "true" + AllowedValues: + - "true" + - "false" + +# ================================================================ +# Conditions +# ================================================================ + +Conditions: + CreateDatabase: !Equals [!Ref ExistingDatabaseHost, ""] + CreateSlackStateBucket: !Equals [!Ref ExistingSlackStateBucket, ""] + CreateInstallationBucket: !Equals [!Ref ExistingInstallationBucket, ""] + CreateImagesBucket: !Equals [!Ref ExistingImagesBucket, ""] + Mappings: StagesMap: staging: SlackClientID: "1966318390773.6037875913205" - SlackStateS3Bucket: "slack-state-bucket" - SlackInstallS3Bucket: "slack-installation-bucket" KeepWarmName: "SyncBotKeepWarmTest" prod: SlackClientID: "1990266264068.6053437451057" - SlackStateS3Bucket: "slack-state-bucket" - SlackInstallS3Bucket: "slack-installation-bucket" KeepWarmName: "SyncBotKeepWarmProd" Resources: + # ============================================================ + # Networking + # + # Minimal VPC for RDS (AWS requires RDS to live in a VPC). + # Only public subnets — no NAT Gateway, no private subnets. + # Lambda runs OUTSIDE the VPC and connects to RDS over its + # public endpoint, keeping the architecture free-tier friendly. + # + # Skipped entirely when ExistingDatabaseHost is provided. + # ============================================================ + + VPC: + Type: AWS::EC2::VPC + Condition: CreateDatabase + Properties: + CidrBlock: !Ref VpcCidr + EnableDnsHostnames: true + EnableDnsSupport: true + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-vpc" + + InternetGateway: + Type: AWS::EC2::InternetGateway + Condition: CreateDatabase + Properties: + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-igw" + + VPCGatewayAttachment: + Type: AWS::EC2::VPCGatewayAttachment + Condition: CreateDatabase + Properties: + VpcId: !Ref VPC + InternetGatewayId: !Ref InternetGateway + + PublicSubnet1: + Type: AWS::EC2::Subnet + Condition: CreateDatabase + Properties: + VpcId: !Ref VPC + CidrBlock: !Select [0, !Cidr [!Ref VpcCidr, 4, 8]] + AvailabilityZone: !Select [0, !GetAZs ""] + MapPublicIpOnLaunch: true + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-public-1" + + PublicSubnet2: + Type: AWS::EC2::Subnet + Condition: CreateDatabase + Properties: + VpcId: !Ref VPC + CidrBlock: !Select [1, !Cidr [!Ref VpcCidr, 4, 8]] + AvailabilityZone: !Select [1, !GetAZs ""] + MapPublicIpOnLaunch: true + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-public-2" + + PublicRouteTable: + Type: AWS::EC2::RouteTable + Condition: CreateDatabase + Properties: + VpcId: !Ref VPC + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-public-rt" + + PublicRoute: + Type: AWS::EC2::Route + Condition: CreateDatabase + DependsOn: VPCGatewayAttachment + Properties: + RouteTableId: !Ref PublicRouteTable + DestinationCidrBlock: "0.0.0.0/0" + GatewayId: !Ref InternetGateway + + PublicSubnet1RouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Condition: CreateDatabase + Properties: + SubnetId: !Ref PublicSubnet1 + RouteTableId: !Ref PublicRouteTable + + PublicSubnet2RouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Condition: CreateDatabase + Properties: + SubnetId: !Ref PublicSubnet2 + RouteTableId: !Ref PublicRouteTable + + # ============================================================ + # Security Groups (skipped when using existing database) + # ============================================================ + + RDSSecurityGroup: + Type: AWS::EC2::SecurityGroup + Condition: CreateDatabase + Properties: + GroupDescription: Controls access to the SyncBot RDS instance + VpcId: !Ref VPC + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 3306 + ToPort: 3306 + CidrIp: !Ref AllowedDBCidr + Description: "MySQL access (Lambda connects over public internet)" + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-rds-sg" + + # ============================================================ + # RDS MySQL Database (free-tier: db.t3.micro, 20 GB gp2) + # + # Skipped entirely when ExistingDatabaseHost is provided. + # ============================================================ + + RDSParameterGroup: + Type: AWS::RDS::DBParameterGroup + Condition: CreateDatabase + Properties: + Family: mysql8.0 + Description: !Sub "SyncBot ${Stage} - enforces SSL connections" + Parameters: + require_secure_transport: "1" + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-db-params" + + DBSubnetGroup: + Type: AWS::RDS::DBSubnetGroup + Condition: CreateDatabase + Properties: + DBSubnetGroupDescription: Subnet group for SyncBot RDS + SubnetIds: + - !Ref PublicSubnet1 + - !Ref PublicSubnet2 + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-db-subnet-group" + + RDSInstance: + Type: AWS::RDS::DBInstance + Condition: CreateDatabase + DeletionPolicy: Snapshot + UpdateReplacePolicy: Snapshot + Properties: + DBInstanceIdentifier: !Sub "syncbot-${Stage}" + DBInstanceClass: !Ref DatabaseInstanceClass + Engine: mysql + EngineVersion: "8.0" + MasterUsername: !Ref DatabaseUser + MasterUserPassword: !Ref DatabasePassword + DBName: !Ref DatabaseSchema + AllocatedStorage: 20 + StorageType: gp2 + StorageEncrypted: true + PubliclyAccessible: true + MultiAZ: false + DBSubnetGroupName: !Ref DBSubnetGroup + DBParameterGroupName: !Ref RDSParameterGroup + VPCSecurityGroups: + - !Ref RDSSecurityGroup + BackupRetentionPeriod: 7 + PreferredBackupWindow: "03:00-04:00" + PreferredMaintenanceWindow: "sun:04:00-sun:05:00" + DeletionProtection: true + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-db" + + # ============================================================ + # S3 Buckets (free-tier: 5 GB total, 20 000 GET, 2 000 PUT) + # + # Each bucket is skipped when an existing bucket name is provided. + # ============================================================ + + SlackStateBucket: + Type: AWS::S3::Bucket + Condition: CreateSlackStateBucket + Properties: + BucketName: !Sub "syncbot-${Stage}-slack-state-${AWS::AccountId}" + BucketEncryption: + ServerSideEncryptionConfiguration: + - ServerSideEncryptionByDefault: + SSEAlgorithm: AES256 + PublicAccessBlockConfiguration: + BlockPublicAcls: true + BlockPublicPolicy: true + IgnorePublicAcls: true + RestrictPublicBuckets: true + LifecycleConfiguration: + Rules: + - Id: ExpireOAuthState + Status: Enabled + ExpirationInDays: 1 + + SlackInstallationBucket: + Type: AWS::S3::Bucket + Condition: CreateInstallationBucket + Properties: + BucketName: !Sub "syncbot-${Stage}-slack-installations-${AWS::AccountId}" + BucketEncryption: + ServerSideEncryptionConfiguration: + - ServerSideEncryptionByDefault: + SSEAlgorithm: AES256 + PublicAccessBlockConfiguration: + BlockPublicAcls: true + BlockPublicPolicy: true + IgnorePublicAcls: true + RestrictPublicBuckets: true + VersioningConfiguration: + Status: Enabled + + SyncBotImagesBucket: + Type: AWS::S3::Bucket + Condition: CreateImagesBucket + Properties: + BucketName: !Sub "syncbot-${Stage}-images-${AWS::AccountId}" + BucketEncryption: + ServerSideEncryptionConfiguration: + - ServerSideEncryptionByDefault: + SSEAlgorithm: AES256 + PublicAccessBlockConfiguration: + BlockPublicAcls: false + BlockPublicPolicy: false + IgnorePublicAcls: false + RestrictPublicBuckets: false + LifecycleConfiguration: + Rules: + - Id: ExpireOldImages + Status: Enabled + ExpirationInDays: 90 + + SyncBotImagesBucketPolicy: + Type: AWS::S3::BucketPolicy + Condition: CreateImagesBucket + Properties: + Bucket: !Ref SyncBotImagesBucket + PolicyDocument: + Version: "2012-10-17" + Statement: + - Sid: PublicReadGetObject + Effect: Allow + Principal: "*" + Action: "s3:GetObject" + Resource: !Sub "${SyncBotImagesBucket.Arn}/*" + + # ============================================================ + # Lambda Function (free-tier: 1M requests, 400 000 GB-s) + # + # Runs OUTSIDE the VPC so it can reach the Slack API and RDS + # public endpoint without a NAT Gateway. + # + # Each app gets its own Lambda function — this is inherent to + # the serverless model and doesn't affect free-tier billing + # (the 1M request quota is shared across ALL functions). + # ============================================================ + SyncBotFunction: - Type: AWS::Serverless::Function # More info about Function Resource: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#awsserverlessfunction + Type: AWS::Serverless::Function Properties: CodeUri: syncbot/ Handler: app.handler Runtime: python3.11 Architectures: - x86_64 + Timeout: 30 + MemorySize: 128 Policies: - - AmazonS3FullAccess - - AWSLambdaRole - - AmazonEventBridgeFullAccess - Timeout: 400 + - S3CrudPolicy: + BucketName: !If + - CreateSlackStateBucket + - !Ref SlackStateBucket + - !Ref ExistingSlackStateBucket + - S3CrudPolicy: + BucketName: !If + - CreateInstallationBucket + - !Ref SlackInstallationBucket + - !Ref ExistingInstallationBucket + - S3CrudPolicy: + BucketName: !If + - CreateImagesBucket + - !Ref SyncBotImagesBucket + - !Ref ExistingImagesBucket Events: SyncBot: - Type: Api # More info about API Event Source: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#api + Type: Api Properties: Path: /slack/events Method: post SyncBotInstall: - Type: Api # More info about API Event Source: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#api + Type: Api Properties: Path: /slack/install Method: get + SyncBotOAuthRedirect: + Type: Api + Properties: + Path: /slack/oauth_redirect + Method: get SyncBotKeepWarm: Type: ScheduleV2 Properties: ScheduleExpression: "rate(5 minutes)" Name: !FindInMap - StagesMap - - Ref: Stage + - !Ref Stage - KeepWarmName Environment: Variables: - SLACK_BOT_TOKEN: !Ref SlackToken + SLACK_BOT_TOKEN: "123" SLACK_SIGNING_SECRET: !Ref SlackSigningSecret ENV_SLACK_CLIENT_SECRET: !Ref SlackClientSecret ENV_SLACK_SCOPES: !Ref SlackOauthScopes - DATABASE_HOST: !Ref DatabaseHost - ADMIN_DATABASE_USER: !Ref DatabaseUser - ADMIN_DATABASE_PASSWORD: !Ref DatabasePassword - ADMIN_DATABASE_SCHEMA: !Ref DatabaseSchema ENV_SLACK_CLIENT_ID: !FindInMap - StagesMap - - Ref: Stage + - !Ref Stage - SlackClientID - ENV_SLACK_STATE_S3_BUCKET_NAME: !FindInMap - - StagesMap - - Ref: Stage - - SlackStateS3Bucket - ENV_SLACK_INSTALLATION_S3_BUCKET_NAME: !FindInMap - - StagesMap - - Ref: Stage - - SlackInstallS3Bucket + ENV_SLACK_STATE_S3_BUCKET_NAME: !If + - CreateSlackStateBucket + - !Ref SlackStateBucket + - !Ref ExistingSlackStateBucket + ENV_SLACK_INSTALLATION_S3_BUCKET_NAME: !If + - CreateInstallationBucket + - !Ref SlackInstallationBucket + - !Ref ExistingInstallationBucket + DATABASE_HOST: !If + - CreateDatabase + - !GetAtt RDSInstance.Endpoint.Address + - !Ref ExistingDatabaseHost + ADMIN_DATABASE_USER: !Ref DatabaseUser + ADMIN_DATABASE_PASSWORD: !Ref DatabasePassword + ADMIN_DATABASE_SCHEMA: !Ref DatabaseSchema + PASSWORD_ENCRYPT_KEY: !Ref PasswordEncryptKey + REQUIRE_ADMIN: !Ref RequireAdmin + S3_IMAGE_BUCKET: !If + - CreateImagesBucket + - !Ref SyncBotImagesBucket + - !Ref ExistingImagesBucket + S3_IMAGE_URL: !If + - CreateImagesBucket + - !Sub "https://${SyncBotImagesBucket}.s3.amazonaws.com/" + - !Sub "https://${ExistingImagesBucket}.s3.amazonaws.com/" + + # ============================================================ + # CloudWatch Alarms (free-tier: 10 alarms) + # + # Basic operational alarms that surface problems before users + # notice. All alarms use the free-tier standard-resolution + # (60-second) metrics already published by Lambda and API GW. + # ============================================================ + + LambdaErrorAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub "syncbot-${Stage}-lambda-errors" + AlarmDescription: > + Fires when the SyncBot Lambda function produces 3 or more + errors within a 5-minute window. + Namespace: AWS/Lambda + MetricName: Errors + Dimensions: + - Name: FunctionName + Value: !Ref SyncBotFunction + Statistic: Sum + Period: 300 + EvaluationPeriods: 1 + Threshold: 3 + ComparisonOperator: GreaterThanOrEqualToThreshold + TreatMissingData: notBreaching + + LambdaThrottleAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub "syncbot-${Stage}-lambda-throttles" + AlarmDescription: > + Fires when the SyncBot Lambda function is throttled, + indicating a concurrency limit has been reached. + Namespace: AWS/Lambda + MetricName: Throttles + Dimensions: + - Name: FunctionName + Value: !Ref SyncBotFunction + Statistic: Sum + Period: 300 + EvaluationPeriods: 1 + Threshold: 1 + ComparisonOperator: GreaterThanOrEqualToThreshold + TreatMissingData: notBreaching + + LambdaDurationAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub "syncbot-${Stage}-lambda-duration" + AlarmDescription: > + Fires when average Lambda duration exceeds 10 seconds, + indicating potential performance degradation. + Namespace: AWS/Lambda + MetricName: Duration + Dimensions: + - Name: FunctionName + Value: !Ref SyncBotFunction + Statistic: Average + Period: 300 + EvaluationPeriods: 2 + Threshold: 10000 + ComparisonOperator: GreaterThanOrEqualToThreshold + TreatMissingData: notBreaching + + ApiGateway5xxAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub "syncbot-${Stage}-api-5xx" + AlarmDescription: > + Fires when the API Gateway returns 5 or more 5xx errors + within a 5-minute window. + Namespace: AWS/ApiGateway + MetricName: 5XXError + Dimensions: + - Name: ApiName + Value: !Ref ServerlessRestApi + Statistic: Sum + Period: 300 + EvaluationPeriods: 1 + Threshold: 5 + ComparisonOperator: GreaterThanOrEqualToThreshold + TreatMissingData: notBreaching + +# ================================================================ +# Outputs +# ================================================================ Outputs: - # ServerlessRestApi is an implicit API created out of Events key under Serverless::Function - # Find out more about other implicit resources you can reference within SAM - # https://github.com/awslabs/serverless-application-model/blob/master/docs/internals/generated_resources.rst#api - SyncBotApi: - Description: API Gateway endpoint URL for Prod stage for SyncBot function + SyncBotApiUrl: + Description: API Gateway endpoint URL Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/slack/events/" - SyncBotFunction: - Description: SyncBot Lambda Function ARN + + SyncBotInstallUrl: + Description: Slack app installation URL + Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/slack/install" + + SyncBotFunctionArn: + Description: SyncBot Lambda function ARN Value: !GetAtt SyncBotFunction.Arn - SyncBotFunctionIamRole: - Description: Implicit IAM Role created for Hello World function - Value: !GetAtt SyncBotFunctionRole.Arn + + DatabaseHostInUse: + Description: Database host the Lambda is configured to connect to + Value: !If + - CreateDatabase + - !GetAtt RDSInstance.Endpoint.Address + - !Ref ExistingDatabaseHost + + RDSEndpoint: + Condition: CreateDatabase + Description: RDS MySQL endpoint address (only when RDS is created by this stack) + Value: !GetAtt RDSInstance.Endpoint.Address + + RDSPort: + Condition: CreateDatabase + Description: RDS MySQL port (only when RDS is created by this stack) + Value: !GetAtt RDSInstance.Endpoint.Port + + VpcId: + Condition: CreateDatabase + Description: VPC ID (only when VPC is created by this stack) + Value: !Ref VPC + + ImagesBucketName: + Description: S3 bucket name for synced images + Value: !If + - CreateImagesBucket + - !Ref SyncBotImagesBucket + - !Ref ExistingImagesBucket + + ImagesBucketUrl: + Description: Public URL for synced images + Value: !If + - CreateImagesBucket + - !Sub "https://${SyncBotImagesBucket}.s3.amazonaws.com/" + - !Sub "https://${ExistingImagesBucket}.s3.amazonaws.com/" diff --git a/tests/test_db.py b/tests/test_db.py new file mode 100644 index 0000000..9408c5e --- /dev/null +++ b/tests/test_db.py @@ -0,0 +1,105 @@ +"""Unit tests for syncbot.utils.db connection pooling and retry logic.""" + +import os +from unittest.mock import patch + +import pytest + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("ADMIN_DATABASE_USER", "root") +os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") +os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from sqlalchemy.exc import OperationalError + +from db import _MAX_RETRIES, _with_retry + +# ----------------------------------------------------------------------- +# _with_retry decorator +# ----------------------------------------------------------------------- + + +class TestWithRetry: + def test_success_no_retry(self): + call_count = 0 + + @_with_retry + def fn(): + nonlocal call_count + call_count += 1 + return "ok" + + assert fn() == "ok" + assert call_count == 1 + + def test_retries_on_operational_error(self): + call_count = 0 + + @_with_retry + def fn(): + nonlocal call_count + call_count += 1 + if call_count <= _MAX_RETRIES: + raise OperationalError("statement", {}, Exception("connection lost")) + return "recovered" + + assert fn() == "recovered" + assert call_count == _MAX_RETRIES + 1 + + def test_exhausts_retries_raises(self): + @_with_retry + def fn(): + raise OperationalError("statement", {}, Exception("connection lost")) + + with pytest.raises(OperationalError): + fn() + + def test_non_operational_error_not_retried(self): + call_count = 0 + + @_with_retry + def fn(): + nonlocal call_count + call_count += 1 + raise ValueError("not a db error") + + with pytest.raises(ValueError): + fn() + assert call_count == 1 + + +# ----------------------------------------------------------------------- +# Engine creation uses QueuePool +# ----------------------------------------------------------------------- + + +class TestEngineConfig: + @patch.dict( + os.environ, + { + "DATABASE_HOST": "localhost", + "ADMIN_DATABASE_USER": "root", + "ADMIN_DATABASE_PASSWORD": "test", + "ADMIN_DATABASE_SCHEMA": "syncbot", + }, + ) + def test_engine_uses_queue_pool(self): + from sqlalchemy.pool import QueuePool + + import db as db_mod + from db import get_engine + + old_engine = db_mod.GLOBAL_ENGINE + old_schema = db_mod.GLOBAL_SCHEMA + engine = None + try: + db_mod.GLOBAL_ENGINE = None + db_mod.GLOBAL_SCHEMA = None + engine = get_engine(schema="test_schema_unique") + assert isinstance(engine.pool, QueuePool) + finally: + if engine: + engine.dispose() + db_mod.GLOBAL_ENGINE = old_engine + db_mod.GLOBAL_SCHEMA = old_schema diff --git a/tests/test_handlers.py b/tests/test_handlers.py new file mode 100644 index 0000000..dd4edce --- /dev/null +++ b/tests/test_handlers.py @@ -0,0 +1,347 @@ +"""Unit tests for syncbot.utils.handlers event parsing and dispatch.""" + +import os +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("ADMIN_DATABASE_USER", "root") +os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") +os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from handlers import ( + EventContext, + _is_own_bot_message, + _parse_event_fields, + _sanitize_text, +) +from handlers.groups import _generate_invite_code + +# ----------------------------------------------------------------------- +# _parse_event_fields +# ----------------------------------------------------------------------- + + +class TestParseEventFields: + def _make_client(self): + client = MagicMock() + client.users_info.return_value = { + "user": { + "id": "U123", + "profile": {"display_name": "TestUser", "real_name": "Test User"}, + } + } + return client + + def test_basic_message(self): + body = { + "team_id": "T001", + "event": { + "type": "message", + "channel": "C001", + "user": "U001", + "text": "Hello world", + "ts": "1234567890.000001", + }, + } + ctx = _parse_event_fields(body, self._make_client()) + assert ctx["team_id"] == "T001" + assert ctx["channel_id"] == "C001" + assert ctx["user_id"] == "U001" + assert ctx["msg_text"] == "Hello world" + assert ctx["event_subtype"] is None + + def test_empty_text_defaults_to_space(self): + body = { + "team_id": "T001", + "event": { + "type": "message", + "channel": "C001", + "user": "U001", + "ts": "1234567890.000001", + }, + } + ctx = _parse_event_fields(body, self._make_client()) + assert ctx["msg_text"] == " " + + def test_message_changed_subtype(self): + body = { + "team_id": "T001", + "event": { + "type": "message", + "subtype": "message_changed", + "channel": "C001", + "message": { + "user": "U001", + "text": "Edited text", + "ts": "1234567890.000001", + }, + }, + } + ctx = _parse_event_fields(body, self._make_client()) + assert ctx["event_subtype"] == "message_changed" + assert ctx["msg_text"] == "Edited text" + assert ctx["user_id"] == "U001" + + def test_message_deleted_subtype(self): + body = { + "team_id": "T001", + "event": { + "type": "message", + "subtype": "message_deleted", + "channel": "C001", + "previous_message": { + "ts": "1234567890.000001", + }, + }, + } + ctx = _parse_event_fields(body, self._make_client()) + assert ctx["event_subtype"] == "message_deleted" + assert ctx["ts"] == "1234567890.000001" + + +# ----------------------------------------------------------------------- +# EventContext TypedDict +# ----------------------------------------------------------------------- + + +class TestEventContextType: + def test_event_context_is_dict(self): + ctx = EventContext( + team_id="T1", + channel_id="C1", + user_id="U1", + msg_text="hi", + mentioned_users=[], + thread_ts=None, + ts="123.456", + event_subtype=None, + ) + assert isinstance(ctx, dict) + assert ctx["team_id"] == "T1" + + +# ----------------------------------------------------------------------- +# _sanitize_text +# ----------------------------------------------------------------------- + + +class TestSanitizeText: + def test_strips_whitespace(self): + assert _sanitize_text(" hello ") == "hello" + + def test_truncates_long_text(self): + result = _sanitize_text("a" * 200, max_length=100) + assert len(result) == 100 + + def test_none_passthrough(self): + assert _sanitize_text(None) is None + + def test_empty_string_passthrough(self): + assert _sanitize_text("") == "" + + def test_custom_max_length(self): + result = _sanitize_text("abcdefgh", max_length=5) + assert result == "abcde" + + +# ----------------------------------------------------------------------- +# _is_own_bot_message +# ----------------------------------------------------------------------- + + +class TestIsOwnBotMessage: + def _make_client_with_bot_id(self, bot_id: str = "B_SYNCBOT"): + client = MagicMock() + client.auth_test.return_value = {"bot_id": bot_id} + return client + + def test_own_bot_message_detected(self): + body = {"event": {"type": "message", "subtype": "bot_message", "bot_id": "B_SYNCBOT", "text": "synced"}} + client = self._make_client_with_bot_id("B_SYNCBOT") + context = {"bot_id": "B_SYNCBOT"} + assert _is_own_bot_message(body, client, context) is True + + def test_other_bot_message_not_flagged(self): + body = {"event": {"type": "message", "subtype": "bot_message", "bot_id": "B_OTHER", "text": "hello"}} + client = self._make_client_with_bot_id("B_SYNCBOT") + context = {"bot_id": "B_SYNCBOT"} + assert _is_own_bot_message(body, client, context) is False + + def test_regular_user_message_not_flagged(self): + body = {"event": {"type": "message", "user": "U001", "text": "hello"}} + client = self._make_client_with_bot_id("B_SYNCBOT") + context = {"bot_id": "B_SYNCBOT"} + assert _is_own_bot_message(body, client, context) is False + + def test_own_bot_in_message_changed(self): + body = { + "event": { + "type": "message", + "subtype": "message_changed", + "channel": "C001", + "message": {"bot_id": "B_SYNCBOT", "subtype": "bot_message", "text": "edited"}, + }, + } + client = self._make_client_with_bot_id("B_SYNCBOT") + context = {"bot_id": "B_SYNCBOT"} + assert _is_own_bot_message(body, client, context) is True + + def test_other_bot_in_message_changed(self): + body = { + "event": { + "type": "message", + "subtype": "message_changed", + "channel": "C001", + "message": {"bot_id": "B_OTHER", "subtype": "bot_message", "text": "edited"}, + }, + } + client = self._make_client_with_bot_id("B_SYNCBOT") + context = {"bot_id": "B_SYNCBOT"} + assert _is_own_bot_message(body, client, context) is False + + def test_fallback_to_auth_test_when_context_empty(self): + body = {"event": {"type": "message", "subtype": "bot_message", "bot_id": "B_SYNCBOT", "text": "hi"}} + client = self._make_client_with_bot_id("B_SYNCBOT") + context = {} + assert _is_own_bot_message(body, client, context) is True + + +class TestParseEventFieldsBotMessage: + def _make_client(self): + client = MagicMock() + client.users_info.return_value = { + "user": {"id": "U123", "profile": {"display_name": "TestUser", "real_name": "Test User"}} + } + return client + + def test_bot_message_has_no_user_id(self): + body = { + "team_id": "T001", + "event": { + "type": "message", + "subtype": "bot_message", + "bot_id": "B_OTHER", + "username": "WeatherBot", + "text": "Today's forecast", + "ts": "1234567890.000001", + "channel": "C001", + }, + } + ctx = _parse_event_fields(body, self._make_client()) + assert ctx["user_id"] is None + assert ctx["event_subtype"] == "bot_message" + assert ctx["msg_text"] == "Today's forecast" + + +# ----------------------------------------------------------------------- +# _generate_invite_code +# ----------------------------------------------------------------------- + + +class TestGenerateInviteCode: + def test_code_format(self): + code = _generate_invite_code() + assert len(code) == 8 # 3 + dash + 4 + assert code[3] == "-" + assert code[:3].isalnum() + assert code[4:].isalnum() + + def test_code_is_uppercase(self): + code = _generate_invite_code() + assert code == code.upper() + + def test_codes_are_unique(self): + codes = {_generate_invite_code() for _ in range(50)} + assert len(codes) > 45 + + def test_custom_length(self): + code = _generate_invite_code(length=8) + assert len(code) == 9 # 3 + dash + 5 + assert code[3] == "-" + + +# ----------------------------------------------------------------------- +# Invite code normalisation (same logic as group invite code) +# ----------------------------------------------------------------------- + + +class TestInviteCodeValidation: + def test_code_normalisation_adds_dash(self): + raw = "a7xk9m" + normalized = raw.strip().upper() + if "-" not in normalized and len(normalized) >= 6: + normalized = f"{normalized[:3]}-{normalized[3:]}" + assert normalized == "A7X-K9M" + + def test_code_already_formatted(self): + raw = "A7X-K9M" + normalized = raw.strip().upper() + if "-" not in normalized and len(normalized) >= 6: + normalized = f"{normalized[:3]}-{normalized[3:]}" + assert normalized == "A7X-K9M" + + def test_code_with_whitespace(self): + raw = " a7x-k9m " + normalized = raw.strip().upper() + if "-" not in normalized and len(normalized) >= 6: + normalized = f"{normalized[:3]}-{normalized[3:]}" + assert normalized == "A7X-K9M" + + +# ----------------------------------------------------------------------- +# get_request_type — group prefix matching +# ----------------------------------------------------------------------- + + +class TestRequestTypeGroupPrefix: + def test_leave_group_prefix_resolved(self): + from helpers import get_request_type + from slack import actions + + body = { + "type": "block_actions", + "actions": [{"action_id": f"{actions.CONFIG_LEAVE_GROUP}_42"}], + } + req_type, req_id = get_request_type(body) + assert req_type == "block_actions" + assert req_id == actions.CONFIG_LEAVE_GROUP + + +# ----------------------------------------------------------------------- +# handle_new_sync_submission (unit-level: verifies the handler wiring) +# ----------------------------------------------------------------------- + + +class TestNewSyncSubmission: + """Verify that handle_new_sync_submission uses conversations.info to get the channel name.""" + + def test_rejects_unauthorized_user(self): + from handlers import handle_new_sync_submission + + client = MagicMock() + client.users_info.return_value = {"user": {"is_admin": False, "is_owner": False}} + body = {"view": {"team_id": "T001"}, "user": {"id": "U001"}} + logger = MagicMock() + + with patch("handlers.sync.helpers.is_user_authorized", return_value=False): + handle_new_sync_submission(body, client, logger, {}) + + client.conversations_info.assert_not_called() + client.conversations_join.assert_not_called() + + def test_rejects_missing_channel_id(self): + from handlers import handle_new_sync_submission + + client = MagicMock() + body = {"view": {"team_id": "T001"}, "user": {"id": "U001"}} + logger = MagicMock() + + with ( + patch("handlers.sync.helpers.is_user_authorized", return_value=True), + patch("handlers.sync.forms.NEW_SYNC_FORM") as mock_form, + ): + mock_form.get_selected_values.return_value = {} + handle_new_sync_submission(body, client, logger, {}) + + client.conversations_info.assert_not_called() diff --git a/tests/test_helpers.py b/tests/test_helpers.py new file mode 100644 index 0000000..2b1e4b4 --- /dev/null +++ b/tests/test_helpers.py @@ -0,0 +1,243 @@ +"""Unit tests for syncbot.utils.helpers core utility functions.""" + +import os +import time +from unittest.mock import MagicMock, patch + +import pytest + +# Ensure minimal env vars are set before importing app code +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("ADMIN_DATABASE_USER", "root") +os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") +os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +# Placeholder only; never a real token (avoids secret scanners) +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +import helpers + +# ----------------------------------------------------------------------- +# safe_get +# ----------------------------------------------------------------------- + + +class TestSafeGet: + def test_simple_dict(self): + assert helpers.safe_get({"a": 1}, "a") == 1 + + def test_nested_dict(self): + data = {"a": {"b": {"c": 42}}} + assert helpers.safe_get(data, "a", "b", "c") == 42 + + def test_missing_key_returns_none(self): + assert helpers.safe_get({"a": 1}, "b") is None + + def test_nested_missing_key_returns_none(self): + assert helpers.safe_get({"a": {"b": 1}}, "a", "c") is None + + def test_none_data_returns_none(self): + assert helpers.safe_get(None) is None + + def test_empty_dict_returns_none(self): + assert helpers.safe_get({}, "a") is None + + def test_list_index_access(self): + data = {"items": [{"name": "first"}, {"name": "second"}]} + assert helpers.safe_get(data, "items", 0, "name") == "first" + assert helpers.safe_get(data, "items", 1, "name") == "second" + + def test_list_index_out_of_bounds(self): + data = {"items": [1]} + assert helpers.safe_get(data, "items", 5) is None + + def test_deeply_nested(self): + data = {"a": {"b": {"c": {"d": {"e": "deep"}}}}} + assert helpers.safe_get(data, "a", "b", "c", "d", "e") == "deep" + + +# ----------------------------------------------------------------------- +# Encryption helpers +# ----------------------------------------------------------------------- + + +class TestEncryption: + @patch.dict(os.environ, {"PASSWORD_ENCRYPT_KEY": "my-secret-key"}) + def test_encrypt_decrypt_roundtrip(self): + # Use a non-secret placeholder; encryption accepts any string + token = "xoxb-0-0" + encrypted = helpers.encrypt_bot_token(token) + assert encrypted != token + decrypted = helpers.decrypt_bot_token(encrypted) + assert decrypted == token + + @patch.dict(os.environ, {"PASSWORD_ENCRYPT_KEY": "my-secret-key"}) + def test_decrypt_invalid_token_raises(self): + with pytest.raises(ValueError, match="decryption failed"): + helpers.decrypt_bot_token("not-a-valid-encrypted-token") + + @patch.dict(os.environ, {"PASSWORD_ENCRYPT_KEY": "123"}) + def test_encryption_disabled_with_default_key(self): + token = "xoxb-0-0" + assert helpers.encrypt_bot_token(token) == token + assert helpers.decrypt_bot_token(token) == token + + @patch.dict(os.environ, {}, clear=False) + def test_encryption_disabled_when_key_missing(self): + os.environ.pop("PASSWORD_ENCRYPT_KEY", None) + token = "xoxb-0-0" + assert helpers.encrypt_bot_token(token) == token + assert helpers.decrypt_bot_token(token) == token + + @patch.dict(os.environ, {"PASSWORD_ENCRYPT_KEY": "key-A"}) + def test_wrong_key_raises(self): + token = "xoxb-0-0" + encrypted = helpers.encrypt_bot_token(token) + + with ( + patch.dict(os.environ, {"PASSWORD_ENCRYPT_KEY": "key-B"}), + pytest.raises(ValueError, match="decryption failed"), + ): + helpers.decrypt_bot_token(encrypted) + + +# ----------------------------------------------------------------------- +# In-process cache +# ----------------------------------------------------------------------- + + +class TestCache: + def setup_method(self): + helpers._CACHE.clear() + + def test_cache_set_and_get(self): + helpers._cache_set("k1", "value1") + assert helpers._cache_get("k1") == "value1" + + def test_cache_miss(self): + assert helpers._cache_get("nonexistent") is None + + def test_cache_expiry(self): + helpers._cache_set("k2", "value2", ttl=0) + time.sleep(0.01) + assert helpers._cache_get("k2") is None + + def test_cache_within_ttl(self): + helpers._cache_set("k3", "value3", ttl=60) + assert helpers._cache_get("k3") == "value3" + + +# ----------------------------------------------------------------------- +# get_request_type +# ----------------------------------------------------------------------- + + +class TestGetRequestType: + def test_event_callback(self): + body = {"type": "event_callback", "event": {"type": "message"}} + assert helpers.get_request_type(body) == ("event_callback", "message") + + def test_view_submission(self): + body = {"type": "view_submission", "view": {"callback_id": "my_callback"}} + assert helpers.get_request_type(body) == ("view_submission", "my_callback") + + def test_command(self): + body = {"command": "/config-syncbot"} + assert helpers.get_request_type(body) == ("command", "/config-syncbot") + + def test_unknown(self): + body = {"type": "something_else"} + assert helpers.get_request_type(body) == ("unknown", "unknown") + + +# ----------------------------------------------------------------------- +# slack_retry decorator +# ----------------------------------------------------------------------- + + +# ----------------------------------------------------------------------- +# get_bot_info_from_event +# ----------------------------------------------------------------------- + + +class TestGetBotInfoFromEvent: + def test_extracts_username_and_icon(self): + body = { + "event": { + "type": "message", + "subtype": "bot_message", + "bot_id": "B123", + "username": "WeatherBot", + "icons": {"image_48": "https://example.com/icon48.png"}, + "text": "hello", + } + } + name, icon = helpers.get_bot_info_from_event(body) + assert name == "WeatherBot" + assert icon == "https://example.com/icon48.png" + + def test_fallback_name_when_no_username(self): + body = {"event": {"type": "message", "subtype": "bot_message", "bot_id": "B123", "text": "hello"}} + name, icon = helpers.get_bot_info_from_event(body) + assert name == "Bot" + assert icon is None + + def test_icon_fallback_order(self): + body = { + "event": { + "type": "message", + "subtype": "bot_message", + "bot_id": "B123", + "username": "MyBot", + "icons": {"image_36": "https://example.com/icon36.png", "image_72": "https://example.com/icon72.png"}, + "text": "hello", + } + } + name, icon = helpers.get_bot_info_from_event(body) + assert icon == "https://example.com/icon36.png" + + +# ----------------------------------------------------------------------- +# slack_retry decorator +# ----------------------------------------------------------------------- + + +class TestSlackRetry: + def test_success_on_first_try(self): + @helpers.slack_retry + def fn(): + return "ok" + + assert fn() == "ok" + + def test_retries_on_429(self): + from slack_sdk.errors import SlackApiError + + call_count = 0 + + mock_response = MagicMock() + mock_response.status_code = 429 + mock_response.headers = {"Retry-After": "0"} + + @helpers.slack_retry + def fn(): + nonlocal call_count + call_count += 1 + if call_count < 3: + raise SlackApiError("rate_limited", response=mock_response) + return "ok" + + assert fn() == "ok" + assert call_count == 3 + + def test_non_retryable_error_raises_immediately(self): + from slack_sdk.errors import SlackApiError + + mock_response = MagicMock() + mock_response.status_code = 404 + + @helpers.slack_retry + def fn(): + raise SlackApiError("not_found", response=mock_response) + + with pytest.raises(SlackApiError): + fn() From 613c83f832c1fb82e87f8fc9ae1fe4f07053d20a Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Mon, 9 Mar 2026 19:55:12 -0500 Subject: [PATCH 02/45] Code cleanup and debugging. Fixed cross-workspace cache bug. Updated variable naming conventions to make it more readable. Split up README into separate docs. --- .env.example | 7 +- IMPROVEMENTS.md | 42 ++- README.md | 434 ++++++------------------------- docs/API_REFERENCE.md | 30 +++ docs/BACKUP_AND_MIGRATION.md | 23 ++ docs/DEPLOYMENT.md | 81 ++++++ docs/USER_GUIDE.md | 62 +++++ syncbot/app.py | 5 - syncbot/builders/_common.py | 6 +- syncbot/builders/channel_sync.py | 12 +- syncbot/builders/home.py | 79 +++--- syncbot/builders/user_mapping.py | 12 +- syncbot/constants.py | 4 +- syncbot/db/__init__.py | 6 +- syncbot/federation/api.py | 54 ++-- syncbot/handlers/__init__.py | 4 + syncbot/handlers/channel_sync.py | 100 +++---- syncbot/handlers/group_manage.py | 20 +- syncbot/handlers/groups.py | 52 ++-- syncbot/handlers/sync.py | 144 ++++++++-- syncbot/handlers/tokens.py | 32 +-- syncbot/handlers/users.py | 22 +- syncbot/helpers/__init__.py | 2 + syncbot/helpers/_cache.py | 7 + syncbot/helpers/export_import.py | 46 ++-- syncbot/helpers/notifications.py | 16 +- syncbot/helpers/workspace.py | 30 +-- syncbot/routing.py | 2 + syncbot/slack/actions.py | 10 + 29 files changed, 701 insertions(+), 643 deletions(-) create mode 100644 docs/API_REFERENCE.md create mode 100644 docs/BACKUP_AND_MIGRATION.md create mode 100644 docs/DEPLOYMENT.md create mode 100644 docs/USER_GUIDE.md diff --git a/.env.example b/.env.example index fa618e8..9200e78 100644 --- a/.env.example +++ b/.env.example @@ -15,9 +15,10 @@ ADMIN_DATABASE_USER=root ADMIN_DATABASE_PASSWORD=rootpass ADMIN_DATABASE_SCHEMA=syncbot -# DANGER: When set to true, app startup DROPS the database and reinitializes -# from db/init.sql. All data is lost. Only for local/dev reset. -# DANGER_DROP_AND_INIT_DB=false +# When true, a "Reset Database" button appears on the Home tab. +# Clicking it opens a confirmation modal, then drops and reinitializes the DB +# from db/init.sql. All data is lost. Only for local/dev use. +# ENABLE_DB_RESET=false # ----------------------------------------------------------------------------- # Local Development Mode diff --git a/IMPROVEMENTS.md b/IMPROVEMENTS.md index a7f39d2..872e690 100644 --- a/IMPROVEMENTS.md +++ b/IMPROVEMENTS.md @@ -197,7 +197,7 @@ This document outlines the improvements made to the SyncBot application and addi - A workspace "publishes" one of its channels to a specific paired workspace, making it available for syncing - The paired workspace "subscribes" by selecting one of their own channels to receive messages - Each publish is scoped to exactly one pairing — publishing to workspace B and workspace C are separate operations - - Channel Sync modal shows: published channels (with Unpublish buttons), available channels from partner (with Subscribe buttons), and a Publish Channel button + - Channel Sync modal shows: published channels (with Unpublish buttons), available channels from other group members (with Subscribe buttons), and a Publish Channel button - Welcome messages are posted in both channels when a subscription is established - Unpublishing cleans up both sides (soft-deletes SyncChannels, bot leaves channels) - **Database changes** — added `pairing_id` column to `syncs` table (FK to `workspace_pairings`, `ON DELETE CASCADE`), removed UNIQUE constraint on `syncs.title` (same channel can be published to multiple pairings) @@ -239,11 +239,11 @@ This document outlines the improvements made to the SyncBot application and addi - **Automatic reinstall recovery** — if the workspace reinstalls within the retention period, all pairings and sync channels are automatically restored - **Lifecycle notifications** — consistent notification model using channel messages and admin DMs: - **Started** — new pairing activated: admin DMs in both workspaces - - **Paused** — workspace uninstalls: admin DMs + channel messages in partner workspace - - **Resumed** — workspace reinstalls: admin DMs + channel messages in partner workspace - - **Stopped** — manual removal: admin DMs + channel messages in partner workspace - - **Purged** — auto-cleanup after retention period: admin DMs to partner workspace -- **Paused indicator** — Home tab and pairing form show `:double_vertical_bar: Paused (uninstalled)` for soft-deleted partner workspaces with no action buttons + - **Paused** — workspace uninstalls: admin DMs + channel messages in member workspaces + - **Resumed** — workspace reinstalls: admin DMs + channel messages in member workspaces + - **Stopped** — manual removal: admin DMs + channel messages in member workspaces + - **Purged** — auto-cleanup after retention period: admin DMs to member workspaces +- **Paused indicator** — Home tab and pairing form show `:double_vertical_bar: Paused (uninstalled)` for soft-deleted member workspaces with no action buttons - **Configurable retention** — `SOFT_DELETE_RETENTION_DAYS` env var (default 30 days) controls how long soft-deleted data is kept before permanent purge - **Lazy daily purge** — stale soft-deleted workspaces are hard-deleted via `ON DELETE CASCADE` during the first `app_home_opened` event each day - **Manifest updated** — added `tokens_revoked` to bot events, `im:write` to OAuth scopes @@ -302,7 +302,7 @@ This document outlines the improvements made to the SyncBot application and addi - **Sync lifecycle controls** — individual channel syncs can be paused, resumed, or stopped from the Home tab - **`status` column** on `sync_channels` — supports `active` and `paused` states - **Paused syncs** — messages, threads, edits, deletes, and reactions are not processed for paused channels; the handler checks `status` before dispatching -- **Stop with confirmation** — stopping a sync shows a confirmation modal before soft-deleting; the bot leaves the channel and notifies the partner workspace +- **Stop with confirmation** — stopping a sync shows a confirmation modal before soft-deleting; the bot leaves the channel and notifies other member workspaces - **Admin attribution** — pause/resume/stop actions are attributed to the admin who performed them in notification messages - **Home tab indicators** — paused syncs show a `:double_vertical_bar: Paused` status on the Home tab with a Resume button @@ -318,8 +318,8 @@ This document outlines the improvements made to the SyncBot application and addi ### 36. Direct Pairing Requests (Completed) - **Request-based pairing** — admins can send a direct pairing request to another workspace instead of manually sharing codes -- **DM notifications** — the partner workspace's admins receive a DM with Accept/Decline buttons and context about the requesting workspace -- **Home tab notification** — pending inbound pairing requests are shown on the partner's Home tab with Accept/Decline buttons +- **DM notifications** — the target workspace's admins receive a DM with Accept/Decline buttons and context about the requesting workspace +- **Home tab notification** — pending inbound pairing requests are shown on the target workspace's Home tab with Accept/Decline buttons - **Bidirectional activation** — accepting a request activates the pairing on both sides, refreshes user directories, runs auto-matching, and updates both Home tabs - **DM cleanup** — pairing request DMs are replaced with updated status messages when accepted, declined, or cancelled @@ -328,7 +328,7 @@ This document outlines the improvements made to the SyncBot application and addi - **Message count** — each sync displays the number of tracked messages from `PostMeta` (e.g., "Synced since: February 18, 2026 · 42 messages tracked") - **Remote channel deep links** — target channel names in the Home tab and subscription modals are rendered as deep links using `slack://channel?team=T...&id=C...` URLs - **Consolidated published channels** — all synced channels across pairings are shown in a single sorted list on the Home tab -- **Partner Home tab refresh** — all mutations (publish, unpublish, subscribe, pause, resume, stop, pairing changes) automatically re-publish the partner workspace's Home tab +- **Member Home tab refresh** — all mutations (publish, unpublish, subscribe, pause, resume, stop, pairing changes) automatically re-publish every affected group member's Home tab ### 38. User Mapping Screen Redesign (Completed) - **Dedicated Home tab screen** — user mapping is now a full-screen Home tab view instead of a nested modal, providing more space and a better experience @@ -414,7 +414,7 @@ This document outlines the improvements made to the SyncBot application and addi - **Cached built blocks** — After a full refresh, the built Block Kit payload is cached (in-process, keyed by team/user and optionally group for User Mapping). When the hash matches, the app re-publishes that cached view with one `views.publish` instead of re-running all DB and Slack calls. - **60-second cooldown** — If the user clicks Refresh again within 60 seconds and the hash is unchanged, the app re-publishes the cached view with a context message: "No new data. Wait __ seconds before refreshing again." The displayed seconds are the current remaining time from the last refresh (recomputed on each click). Cooldown constant: `REFRESH_COOLDOWN_SECONDS` (default 60) in `constants.py`. - **Request-scoped caching** — `get_workspace_by_id(workspace_id, context=None)` and `get_admin_ids(client, team_id=None, context=None)` use the request `context` dict when provided: one DB read per distinct workspace, one `users.list` per distinct team per request. Reduces duplicate lookups when building the Home tab or when multiple workspaces' Home tabs are refreshed in one invocation. -- **Context through push-refresh paths** — When a change in one workspace triggers Home tab refreshes in others (e.g. publish channel, join group, user mapping refresh), the handler's `context` is passed into `_refresh_group_member_homes` and `refresh_home_tab_for_workspace`, so all `build_home_tab` calls in that request share the same request-scoped cache. Call sites updated in `channel_sync.py`, `group_manage.py`, `users.py`, `groups.py`, and `sync.py`. +- **Context isolation for cross-workspace refreshes** — When a change in one workspace triggers Home tab refreshes in other group members, `context=None` is passed to `refresh_home_tab_for_workspace` to prevent the acting workspace's request-scoped cache (bot token, admin IDs) from leaking into other workspaces' refresh paths. The acting workspace's own refresh still uses `context=context`. - **User Mapping Refresh** — Same pattern applied to the User Mapping screen: content hash, cached blocks, 60s cooldown with message, and `build_user_mapping_screen(..., context=..., return_blocks=True)` for caching. Request-scoped `get_workspace_by_id` used when building the screen. ### 45. Backup, Restore, and Data Migration (Completed) @@ -423,6 +423,20 @@ This document outlines the improvements made to the SyncBot application and addi - **Workspace migration export/import** — Export produces workspace-scoped JSON (syncs, sync channels, post meta, user directory, user mappings) with optional `source_instance` (webhook_url, instance_id, public_key, one-time connection code). Ed25519 signature for tampering detection. Import verifies signature, resolves or creates federated group (using `source_instance` when present), replace mode (remove then create SyncChannels/PostMeta/user_directory/user_mappings), optional tampering confirmation; Home tab and sync-list caches invalidated after import. - **Instance A detection** — Federated pair request accepts optional `team_id` and `workspace_name`; stored as `primary_team_id` and `primary_workspace_name` on `federated_workspaces`. If a local workspace with that `team_id` exists, it is soft-deleted so the federated connection is the only representation of that workspace on the instance. +### 46. Code Quality & Documentation Restructure (Completed) +- **Database reset via UI** — Renamed `DANGER_DROP_AND_INIT_DB` (auto-drop on startup) to `ENABLE_DB_RESET` (boolean env var). When enabled, a red "Reset Database" button appears in a "Danger Zone" section at the bottom of the Home tab. Clicking it opens a confirmation modal; confirming drops and reinitializes the database from `db/init.sql`, clears all caches, and publishes a confirmation message. No longer runs automatically on startup. +- **Variable naming convention audit** — Standardized variable names across 14 files to align with the domain model: + - `partner` / `p_ws` / `p_ch` / `p_client` → `member_ws` / `sync_channel` / `member_client` (maps to `workspace_group_members` table) + - `sc` (SyncChannel) → `sync_channel`; `ch` (ambiguous) → `sync_channel` or `slack_channel` depending on type + - `pm` → `post_meta` (PostMeta) or `pending_member` (WorkspaceGroupMember) to resolve ambiguity + - `fm` → `fed_member`; `pw` → `pending_ws` or `publisher_ws`; `och` → `other_channel` + - `m` in multi-line loops → `member`, `membership`, or `fed_member` as appropriate + - All log messages and comments updated to match +- **Naming convention established** — `_SCREAMING_CASE` for private module-level constants (true constants set once at import time); `_lowercase` for private functions, mutable state, and implementation-detail values; no-prefix `SCREAMING_CASE` for public constants +- **Cross-workspace context bug fix** — Fixed all handlers that were passing the acting workspace's `context` dict into other group members' Home tab refreshes. The `context` contains workspace-specific state (bot token, admin ID cache) that could contaminate other workspaces' builds. Now `context=None` for all cross-workspace refreshes. +- **README restructured** — Reduced README from ~580 lines to ~220 lines, keeping only install/deploy/run instructions. Moved end-user guide, backup/migration, CI/CD, shared infrastructure, and API reference into `docs/` folder (`USER_GUIDE.md`, `BACKUP_AND_MIGRATION.md`, `DEPLOYMENT.md`, `API_REFERENCE.md`). +- **Documentation consistency** — Updated `IMPROVEMENTS.md` and all doc files to use new domain terminology (group members instead of partners). + ## Remaining Recommendations ### Low Priority @@ -432,7 +446,7 @@ This document outlines the improvements made to the SyncBot application and addi - Review and update other dependencies 2. **Database Migrations** - - Consider adopting Alembic for formal migration management + - Currently using `db/init.sql` as the single source of truth (pre-release); consider adopting Alembic for formal migration management post-release 3. **Advanced Testing** - Add integration tests for database operations @@ -464,4 +478,6 @@ This document outlines the improvements made to the SyncBot application and addi - File downloads are streamed with timeouts and size caps to prevent DoS - Fernet key derivation is cached for performance; bot identity is resolved in a single API call - Duplicated code has been consolidated into shared helpers throughout handlers and federation modules -- Home and User Mapping Refresh buttons use content hash, cached blocks, and a 60s cooldown to minimize RDS and Slack API usage when nothing has changed; request-scoped caching and context passing through push-refresh paths keep multi-workspace updates lightweight \ No newline at end of file +- Home and User Mapping Refresh buttons use content hash, cached blocks, and a 60s cooldown to minimize RDS and Slack API usage when nothing has changed; request-scoped caching keeps builds lightweight, and cross-workspace refreshes use `context=None` to prevent cache contamination +- Variable naming follows a consistent domain-model convention: `member_ws`/`member_client` for group members, `sync_channel` for ORM records, `slack_channel` for raw API dicts +- Pre-release schema management uses `db/init.sql` as the single source of truth (no separate migration scripts) \ No newline at end of file diff --git a/README.md b/README.md index e411fed..6e5db8c 100644 --- a/README.md +++ b/README.md @@ -1,93 +1,13 @@ # SyncBot SyncBot Icon -SyncBot is a Slack app originally developed for the [F3 Community](https://github.com/F3Nation-Community/syncbot) and has been forked here for general use by other Slack Workspace admins. It is intended to provide a replication ("Sync") service for messages and replies across Slack Workspaces on the free tier. Once configured, messages, threads, edits, deletes, reactions, images, videos, and GIFs are automatically mirrored to every channel in a Sync group. - -## End-User Quick Start - -1. Click the install link from a desktop browser (make sure you've selected the correct workspace in the upper right) -2. Open the **SyncBot** app from the sidebar and click the **Home** tab (requires workspace admin or owner) -3. The Home tab shows everything in one view: - - **SyncBot Configuration (top row)** — **Refresh** and **Backup/Restore** (full-instance backup download and restore from JSON) - - **Workspace Groups** — create or join groups of workspaces that can sync channels together - - **Per-group sections** — for each group you can publish channels, manage user mapping (dedicated Home tab screen), and see/manage channel syncs inline - - **Synced Channels** — each row shows the local channel and workspace list in brackets (e.g. _[Any: Your Workspace, Partner Workspace]_), with pause/resume and stop controls, synced-since date, and tracked message count - - **External Connections** *(when federation is enabled)* — Generate/Enter Connection Code and **Data Migration** (export workspace data for migration to another instance, or import a migration file) - -Things to know: - -- Only workspace **admins and owners** can configure syncs (set `REQUIRE_ADMIN=false` to allow all users) -- Messages, threads, edits, deletes, reactions, images, videos, and GIFs are all synced -- Messages from other bots are synced; only SyncBot's own messages are filtered to prevent loops -- Existing messages are not back-filled; syncing starts from the moment a channel is linked -- Do not add SyncBot manually to channels. SyncBot adds itself when you configure a Sync. If it detects it was added to an unconfigured channel it will post a message and leave automatically -- Both public and private channels are supported -- **Workspace Groups**: Workspaces must belong to the same **group** before they can sync channels or map users. Admins can create a new group (which generates an invite code) or join an existing group by entering a code. A workspace can be in multiple groups with different combinations of other workspaces -- **Sync Modes**: When publishing a channel inside a group, admins choose either **1-to-1** (only a specific workspace can subscribe) or **group-wide** (any group member can subscribe independently) -- **Pause/Resume**: Individual channel syncs can be paused and resumed without losing configuration. Paused channels do not sync any messages, threads, or reactions -- **Selective Stop**: When a workspace stops syncing a channel, only that workspace's history is removed. Other workspaces continue syncing uninterrupted. The published channel remains available until the original publisher unpublishes it -- **Uninstall/Reinstall**: If a workspace uninstalls SyncBot, group memberships and syncs are paused (not deleted). Reinstalling within the retention period (default 30 days, configurable via `SOFT_DELETE_RETENTION_DAYS`) automatically restores everything. Group members are notified via DMs and channel messages -- **User Mapping**: Users are automatically mapped across workspaces by email or display name. Admins can manually edit mappings via the User Mapping screen (scoped per group). Remote users are displayed as "Display Name (Workspace Name)" and sorted by normalized name -- **Refresh buttons**: The Home tab and User Mapping screens have Refresh buttons. To keep RDS and Slack API usage low, repeated clicks with no data changes are handled lightly: a 60-second cooldown applies, and when nothing has changed the app reuses cached content and shows "No new data. Wait __ seconds before refreshing again." when you click again too soon -- **Media Sync**: Images and videos are uploaded directly to target channels (or via S3 if configured). GIFs from the Slack GIF picker or GIPHY are synced as image blocks -- **External Connections** *(opt-in)*: Workspaces running their own SyncBot deployment can be connected via the "External Connections" section on the Home tab. One admin generates a connection code and shares it out-of-band; the other admin enters it. Messages, edits, deletes, reactions, and user matching work across instances. **Data Migration** in the same section lets you export your workspace data (syncs, channels, post meta, user directory, user mappings) for moving to another instance, or import a migration file after connecting. Disabled by default — set `SYNCBOT_FEDERATION_ENABLED=true` and `SYNCBOT_PUBLIC_URL` to enable -- **Backup/Restore**: Use **Backup/Restore** on the Home tab to download a full-instance backup (all tables as JSON) or restore from a backup file. Intended for disaster recovery (e.g. before rebuilding AWS). Backup includes an integrity check (HMAC); restore checks the encryption key hash — if it differs, bot tokens will not decrypt until workspaces re-authorize. Restore targets an empty or fresh database -- **Data Migration**: When federation is enabled, **Data Migration** opens a modal to export your workspace data (for moving that workspace to its own instance) or import a migration file. The export can include a one-time connection code so the new instance can connect to the old one in one step. Import uses replace mode (existing sync channels in the federated group are replaced). User mappings are carried over (same Slack workspace, so user IDs match). Exports are signed (Ed25519) for tampering detection; import still proceeds on mismatch but shows a warning +SyncBot is a Slack app originally developed for the [F3 Community](https://github.com/F3Nation-Community/syncbot) and has been forked here for general use by other Slack Workspace admins. It provides a replication ("Sync") service for messages and replies across Slack Workspaces on the free tier. Once configured, messages, threads, edits, deletes, reactions, images, videos, and GIFs are automatically mirrored to every channel in a Sync group. ---- - -## Deploying to AWS - -SyncBot ships with a full AWS SAM template (`template.yaml`) that provisions everything on the **free tier**: - -| Resource | Service | Free-Tier Detail | -|----------|---------|-----------------| -| Compute | Lambda (128 MB) | 1M requests/month free | -| API | API Gateway v1 | 1M calls/month free | -| Database | RDS MySQL (db.t3.micro) | 750 hrs/month free (12 months) | -| Storage | S3 (3 buckets) | 5 GB free | - ---- - -## Architecture - -See [ARCHITECTURE.md](ARCHITECTURE.md) for message sync flow, AWS infrastructure, backup/restore and data migration flows, and performance/cost optimizations (including Refresh button behavior and request-scoped caching). - ---- - -## Backup, Restore, and Data Migration - -### Full-instance backup and restore - -Use **Backup/Restore** (Home tab, next to Refresh) to: - -- **Download backup** — Generates a JSON file containing all tables (workspaces, groups, syncs, channels, post meta, user directory, user mappings, federation, instance keys). The file is sent to your DM. Backup includes an HMAC for integrity and a hash of the encryption key. **Use the same `PASSWORD_ENCRYPT_KEY` on the target instance** so restored bot tokens decrypt; otherwise workspaces must reinstall the app to re-authorize. -- **Restore from backup** — Paste the backup JSON in the modal and submit. Restore is intended for an **empty or fresh database** (e.g. after an AWS rebuild). If the encryption key hash or HMAC does not match, you will see a warning and can still proceed (e.g. if you edited the file on purpose). - -After restore, Home tab caches are cleared so the next Refresh shows current data. - -### Workspace data migration (federation) - -When **External Connections** is enabled, **Data Migration** (in that section) lets you: - -- **Export** — Download a workspace-scoped JSON file (syncs, sync channels, post meta, user directory, user mappings) plus an optional one-time connection code so the new instance can connect to the source in one step. The file is signed (Ed25519) for tampering detection. -- **Import** — Paste a migration file, then submit. If the file includes a connection payload and you are not yet connected, the app establishes the federation connection and creates the group, then imports. Existing sync channels for that workspace in the federated group are **replaced** (replace mode). User mappings are imported where both workspaces exist on the new instance. If the signature check fails, a warning is shown but you can still proceed. - -After import, Home tab and sync-list caches for that workspace are cleared. - -**Instance A behavior:** When a workspace that used to be on Instance A connects to A from a new instance (B) via federation and sends its `team_id`, A soft-deletes the matching local workspace row so only the federated connection represents that workspace. See [ARCHITECTURE.md](ARCHITECTURE.md) for details. +> **New to SyncBot?** See the [User Guide](docs/USER_GUIDE.md) for a walkthrough of all features. --- -### Prerequisites - -| Tool | Version | Purpose | -|------|---------|---------| -| **AWS SAM CLI** | latest | Build & deploy Lambda + infra | -| **Docker** | latest | SAM uses a container to build the Lambda package | -| **MySQL client** *(optional)* | any | Run schema scripts against the DB | - -### Create a Slack app +## Create a Slack App Before deploying (or developing locally) you need a Slack app: @@ -106,7 +26,28 @@ Before deploying (or developing locally) you need a Slack app: > **Why do I need to install the app manually for local dev?** In production, SyncBot uses OAuth so each workspace gets its own token automatically. In local development mode, there's no OAuth flow — you connect to a single workspace using a bot token you copy from the Slack app settings. -### First-time deploy +--- + +## Deploying to AWS + +SyncBot ships with a full AWS SAM template (`template.yaml`) that provisions everything on the **free tier**: + +| Resource | Service | Free-Tier Detail | +|----------|---------|-----------------| +| Compute | Lambda (128 MB) | 1M requests/month free | +| API | API Gateway v1 | 1M calls/month free | +| Database | RDS MySQL (db.t3.micro) | 750 hrs/month free (12 months) | +| Storage | S3 (3 buckets) | 5 GB free | + +### Prerequisites + +| Tool | Version | Purpose | +|------|---------|---------| +| **AWS SAM CLI** | latest | Build & deploy Lambda + infra | +| **Docker** | latest | SAM uses a container to build the Lambda package | +| **MySQL client** *(optional)* | any | Run schema scripts against the DB | + +### First-Time Deploy 1. **Build** the Lambda package: @@ -130,36 +71,7 @@ mysql -h -u -p syncbot < db/init.sql 4. **Update your Slack app URLs** to point at the API Gateway endpoint shown in the stack outputs (e.g., `https://xxxxx.execute-api.us-east-2.amazonaws.com/Prod/slack/events`). -### Sharing infrastructure across apps - -If you run multiple apps in the same AWS account, you can point SyncBot at existing resources instead of creating new ones. Every `Existing*` parameter defaults to empty (create new); set it to an existing resource name to reuse it. - -| Parameter | What it skips | -|-----------|---------------| -| `ExistingDatabaseHost` | VPC, subnets, security groups, RDS instance | -| `ExistingSlackStateBucket` | Slack OAuth state S3 bucket | -| `ExistingInstallationBucket` | Slack installation data S3 bucket | -| `ExistingImagesBucket` | Synced-images S3 bucket | - -Example — deploy with an existing RDS and images bucket: - -```bash -sam deploy --guided \ - --parameter-overrides \ - ExistingDatabaseHost=mydb.xxxx.us-east-2.rds.amazonaws.com \ - ExistingImagesBucket=my-shared-images-bucket -``` - -Each app sharing the same RDS should use a **different `DatabaseSchema`** (the default is `syncbot`). Create the schema and initialize the tables on the existing instance: - -```bash -mysql -h -u -p -e "CREATE DATABASE IF NOT EXISTS syncbot;" -mysql -h -u -p syncbot < db/init.sql -``` - -**What about API Gateway and Lambda?** Each stack always creates its own API Gateway and Lambda function. These are lightweight resources that don't affect free-tier billing — the free tier quotas (1M API calls, 1M Lambda requests) are shared across your entire account regardless of how many gateways or functions you have. If you want a unified domain across apps, put a CloudFront distribution or API Gateway custom domain in front. - -### Subsequent deploys +### Subsequent Deploys ```bash sam build --use-container @@ -169,56 +81,7 @@ sam deploy --config-env prod # production profile The `samconfig.toml` file stores per-environment settings so you don't have to re-enter parameters. -### CI/CD via GitHub Actions - -Pushes to `main` automatically build and deploy via `.github/workflows/sam-pipeline.yml`: - -1. **Build** — `sam build --use-container` -2. **Deploy to test** — automatic -3. **Deploy to prod** — requires manual approval (configure in GitHub environment settings) - -#### One-time setup - -1. **Create an IAM user** for deployments with permissions for CloudFormation, Lambda, API Gateway, S3, IAM, and RDS. Generate an access key pair. - -2. **Create a SAM deployment bucket** — SAM needs an S3 bucket to upload build artifacts during deploy: - -```bash -aws s3 mb s3://my-sam-deploy-bucket --region us-east-2 -``` - -3. **Create GitHub Environments** — Go to your repo → **Settings** → **Environments** and create two environments: `test` and `prod`. For `prod`, enable **Required reviewers** so production deploys need manual approval. - -4. **Add GitHub Secrets** — Under **Settings** → **Secrets and variables** → **Actions**, add these as **environment secrets** for both `test` and `prod`: - -| Secret | Where to find it | -|--------|-----------------| -| `AWS_ACCESS_KEY_ID` | IAM user access key (step 1) | -| `AWS_SECRET_ACCESS_KEY` | IAM user secret key (step 1) | -| `SLACK_SIGNING_SECRET` | Slack app → Basic Information → App Credentials | -| `SLACK_CLIENT_SECRET` | Slack app → Basic Information → App Credentials | -| `DATABASE_PASSWORD` | The RDS master password you chose | -| `PASSWORD_ENCRYPT_KEY` | Any passphrase for bot-token encryption at rest | - -5. **Add GitHub Variables** — Under the same settings page, add these as **environment variables** for each environment: - -| Variable | `test` value | `prod` value | -|----------|-------------|-------------| -| `AWS_STACK_NAME` | `syncbot-test` | `syncbot-prod` | -| `AWS_S3_BUCKET` | `my-sam-deploy-bucket` | `my-sam-deploy-bucket` | -| `STAGE_NAME` | `staging` | `prod` | - -#### Deploy flow - -Once configured, merge or push to `main` and the pipeline runs: - -``` -push to main → sam build → deploy to test → (manual approval) → deploy to prod -``` - -Monitor progress in your repo's **Actions** tab. The first deploy creates the full CloudFormation stack (VPC, RDS, Lambda, API Gateway, S3 buckets). Subsequent deploys update only what changed. - -> **Tip:** If you prefer to do the very first deploy manually (to see the interactive prompts), run `sam deploy --guided` locally first, then let the pipeline handle all future deploys. +> For shared infrastructure, CI/CD setup, and advanced deployment options, see [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md). --- @@ -235,19 +98,14 @@ Opens the project inside a Docker container with full editor integration — Int ```bash git clone https://github.com/GITHUB_ORG_NAME/syncbot.git cd syncbot -``` - -Copy the example env file and fill in your bot token (from the [Create a Slack app](#create-a-slack-app) step above): - -```bash cp .env.example .env ``` -At minimum, set `SLACK_BOT_TOKEN` to the `xoxb-...` token you copied from **OAuth & Permissions** after installing the app to your workspace. +Set `SLACK_BOT_TOKEN` to the `xoxb-...` token you copied from **OAuth & Permissions** after installing the app. #### 2. Open in Dev Container -Open the project folder in your VSCodium-based editor, then: +Open the project folder in your editor, then: - Press `Cmd+Shift+P` → **Dev Containers: Reopen in Container** - Or click the green remote indicator in the bottom-left corner → **Reopen in Container** @@ -256,8 +114,6 @@ The first build takes a minute or two. After that, your editor is running inside #### 3. Run the app -Open the integrated terminal (it's already inside the container) and run: - ```bash cd syncbot && python app.py ``` @@ -266,7 +122,7 @@ The app starts on **port 3000** (auto-forwarded to your host). #### 4. Expose to Slack -In a **local** terminal (outside the container), start a tunnel using your favorite platform, for instance [Cloudflare Tunnel](https://developers.cloudflare.com/cloudflare-one/networks/connectors/cloudflare-tunnel/) or [ngrok](https://ngrok.com/docs/what-is-ngrok): +In a **local** terminal (outside the container), start a tunnel: ```bash cloudflared tunnel --url http://localhost:3000/ @@ -276,7 +132,7 @@ or ngrok http 3000 ``` -Then update your Slack app's **Event Subscriptions** and **Interactivity** URLs to point at the public URL. +Then update your Slack app's **Event Subscriptions** and **Interactivity** URLs to the public URL. #### 5. Run tests @@ -296,75 +152,30 @@ The database schema is initialized automatically on first run. To reset it, rebu ### Option B: Docker Compose (without Dev Container) -Runs everything in containers but you edit files on your host. Good if you don't want to use the Dev Container extension. +Runs everything in containers but you edit files on your host. **Prerequisites:** Docker Desktop -#### 1. Clone and configure - ```bash git clone https://github.com/GITHUB_ORG_NAME/syncbot.git cd syncbot -``` - -Create a `.env` file (same as Option A above — `cp .env.example .env` and set your `SLACK_BOT_TOKEN`). - -#### 2. Start the app - -```bash +cp .env.example .env # set SLACK_BOT_TOKEN docker compose up --build ``` -This starts both MySQL and the app. The database schema is initialized automatically on first run. The app listens on **port 3000**. - -To run in the background: - -```bash -docker compose up --build -d -docker compose logs -f app # follow app logs -``` - -Code changes require a restart (no rebuild — the code is mounted as a volume): - -```bash -docker compose restart app -``` - -Only rebuild when `requirements.txt` changes: - -```bash -docker compose up --build -``` - -#### 3. Run tests and other commands - -```bash -docker compose exec app python -m pytest /app/tests -v -docker compose exec db mysql -u root -prootpass syncbot -``` - -#### Resetting +The app listens on **port 3000**. Code changes require `docker compose restart app`. Only rebuild when `requirements.txt` changes. ```bash -docker compose down # stop everything -docker compose down -v # stop and delete the database volume +docker compose exec app python -m pytest /app/tests -v # run tests +docker compose exec db mysql -u root -prootpass syncbot # database shell +docker compose down -v # stop + delete DB volume ``` --- ### Option C: Native Python -Run the app directly on your machine with a local or containerized MySQL instance. - -**Prerequisites:** - -| Tool | Version | Purpose | -|------|---------|---------| -| **Python** | 3.11+ | Runtime | -| **Poetry** | 1.6+ | Dependency management | -| **Docker** *(optional)* | latest | Easiest way to run MySQL locally | - -#### 1. Clone and install dependencies +**Prerequisites:** Python 3.11+, Poetry 1.6+, Docker *(optional, for MySQL)* ```bash git clone https://github.com/GITHUB_ORG_NAME/syncbot.git @@ -372,9 +183,7 @@ cd syncbot poetry install --with dev ``` -#### 2. Set up a local MySQL database - -Run a MySQL 8 instance (Docker is easiest): +Start a local MySQL instance: ```bash docker run -d --name syncbot-db \ @@ -382,56 +191,26 @@ docker run -d --name syncbot-db \ -e MYSQL_DATABASE=syncbot \ -p 3306:3306 \ mysql:8 -``` - -Initialize the schema: - -```bash mysql -h 127.0.0.1 -u root -prootpass syncbot < db/init.sql ``` -#### 3. Configure environment variables - -Copy the example env file and fill in your bot token (from the [Create a Slack app](#create-a-slack-app) step): +Configure and run: ```bash -cp .env.example .env +cp .env.example .env # set SLACK_BOT_TOKEN + verify DATABASE_HOST=127.0.0.1 source .env -``` - -At minimum, set `SLACK_BOT_TOKEN` to the `xoxb-...` token from **OAuth & Permissions**. For native Python, also verify the database values match your local MySQL (`DATABASE_HOST=127.0.0.1` by default). See `.env.example` for all available options. - -#### 4. Run the app - -```bash poetry run python syncbot/app.py ``` -The app starts a local Bolt server on **port 3000**. Use your favorite tunnel platform to expose it to Slack: - -```bash -cloudflared tunnel --url http://localhost:3000/ -``` -or -```bash -ngrok http 3000 -``` - -Then update your Slack app's **Event Subscriptions** and **Interactivity** URLs to the public URL. - -#### 5. Run tests - -```bash -poetry run pytest -v -``` - -All tests run against mocked dependencies — no database or Slack credentials needed. +The app starts on **port 3000**. Use a tunnel to expose it to Slack. Run tests with `poetry run pytest -v`. --- -## Environment Variables Reference +## Environment Variables + +See [`.env.example`](.env.example) for all available options with descriptions. -### Always required +### Always Required | Variable | Description | |----------|-------------| @@ -440,7 +219,7 @@ All tests run against mocked dependencies — no database or Slack credentials n | `ADMIN_DATABASE_PASSWORD` | MySQL password | | `ADMIN_DATABASE_SCHEMA` | MySQL database name | -### Required in production (Lambda) +### Required in Production (Lambda) | Variable | Description | |----------|-------------| @@ -452,62 +231,37 @@ All tests run against mocked dependencies — no database or Slack credentials n | `ENV_SLACK_INSTALLATION_S3_BUCKET_NAME` | S3 bucket for installations | | `PASSWORD_ENCRYPT_KEY` | Passphrase for Fernet bot-token encryption | -### Local development only +### Local Development Only | Variable | Description | |----------|-------------| -| `SLACK_BOT_TOKEN` | Bot token (presence triggers local-dev mode) | -| `AWS_ACCESS_KEY_ID` | For S3 uploads during local dev | -| `AWS_SECRET_ACCESS_KEY` | For S3 uploads during local dev | +| `SLACK_BOT_TOKEN` | Bot token (`xoxb-...`) — presence triggers local-dev mode | +| `LOCAL_DEVELOPMENT` | Set to `true` to skip token verification and use readable logs | ### Optional | Variable | Default | Description | |----------|---------|-------------| -| `LOCAL_DEVELOPMENT` | `false` | Set to `true` to skip Slack token verification at startup and use human-readable log output instead of JSON. | -| `REQUIRE_ADMIN` | `true` | When `true`, only workspace admins/owners can configure syncs. Set to `false` to allow all users. | -| `S3_IMAGE_BUCKET` | *(empty)* | S3 bucket name for synced images. When empty, images are uploaded directly to Slack via `files_upload_v2`. | -| `S3_IMAGE_URL` | *(auto from bucket)* | Public URL prefix for S3 images (e.g., `https://mybucket.s3.amazonaws.com/`). Auto-generated from `S3_IMAGE_BUCKET` if not set. | -| `S3_VIDEO_ENABLED` | `false` | When `true` and `S3_IMAGE_BUCKET` is set, videos are also stored in S3. When `false`, videos are uploaded directly to Slack regardless of S3 configuration. | -| `SOFT_DELETE_RETENTION_DAYS` | `30` | Days to keep soft-deleted workspace data before permanent purge. When a workspace uninstalls, its group memberships and syncs are paused; reinstalling within this window restores everything. | -| `SYNCBOT_FEDERATION_ENABLED` | `false` | Set to `true` to enable the External Connections feature (cross-instance sync with other SyncBot deployments). | -| `SYNCBOT_INSTANCE_ID` | *(auto-generated)* | Unique UUID for this SyncBot instance. Auto-generated on first run if not set. Used by external connections. | -| `SYNCBOT_PUBLIC_URL` | *(none)* | Publicly reachable base URL of this instance (e.g., `https://syncbot.example.com`). Required when external connections are enabled. | +| `REQUIRE_ADMIN` | `true` | Only admins/owners can configure syncs | +| `S3_IMAGE_BUCKET` | *(empty)* | S3 bucket for synced images | +| `S3_VIDEO_ENABLED` | `false` | Store videos in S3 (when bucket is set) | +| `SOFT_DELETE_RETENTION_DAYS` | `30` | Days before soft-deleted data is purged | +| `SYNCBOT_FEDERATION_ENABLED` | `false` | Enable External Connections | +| `SYNCBOT_PUBLIC_URL` | *(none)* | Public URL for external connections | +| `ENABLE_DB_RESET` | `false` | Show a "Reset Database" button on the Home tab | --- -## API Endpoints and Slack Commands - -### HTTP Endpoints (API Gateway) - -All endpoints are served by a single Lambda function. Slack sends requests to the `/slack/*` URLs after you configure the app. The `/api/federation/*` endpoints handle cross-instance communication for external connections. - -| Method | Path | Purpose | -|--------|------|---------| -| `POST` | `/slack/events` | Receives all Slack events (messages, actions, view submissions) and slash commands | -| `GET` | `/slack/install` | OAuth install page — redirects the user to Slack's authorization screen | -| `GET` | `/slack/oauth_redirect` | OAuth callback — Slack redirects here after the user approves the app | -| `POST` | `/api/federation/pair` | Accept an incoming external connection request | -| `POST` | `/api/federation/message` | Receive a forwarded message from a connected instance | -| `POST` | `/api/federation/message/edit` | Receive a message edit from a connected instance | -| `POST` | `/api/federation/message/delete` | Receive a message deletion from a connected instance | -| `POST` | `/api/federation/message/react` | Receive a reaction from a connected instance | -| `POST` | `/api/federation/users` | Exchange user directory with a connected instance | -| `GET` | `/api/federation/ping` | Health check for connected instances | - -### Subscribed Slack Events - -| Event | Handler | Description | -|-------|---------|-------------| -| `app_home_opened` | `handle_app_home_opened` | Publishes the Home tab with workspace groups, channel syncs, and user matching. | -| `member_joined_channel` | `handle_member_joined_channel` | Detects when SyncBot is added to an unconfigured channel; posts a message and leaves. | -| `message.channels` / `message.groups` | `respond_to_message_event` | Fires on new messages, edits, deletes, and file shares in public/private channels. Dispatches to sub-handlers for new posts, thread replies, edits, deletes, and reactions. | -| `reaction_added` / `reaction_removed` | `_handle_reaction` | Syncs emoji reactions to the corresponding message in all target channels. | -| `team_join` | `handle_team_join` | Fires when a new user joins a connected workspace. Adds the user to the directory and re-checks unmatched user mappings. | -| `tokens_revoked` | `handle_tokens_revoked` | Handles workspace uninstall — soft-deletes workspace data and notifies group members. | -| `user_profile_changed` | `handle_user_profile_changed` | Detects display name or email changes and updates the user directory and mappings. | +## Further Reading ---- +| Document | Description | +|----------|-------------| +| [User Guide](docs/USER_GUIDE.md) | End-user walkthrough of all features | +| [Architecture](ARCHITECTURE.md) | Message sync flow, AWS infrastructure, caching | +| [Backup & Migration](docs/BACKUP_AND_MIGRATION.md) | Full-instance backup/restore, workspace data migration | +| [Deployment](docs/DEPLOYMENT.md) | Shared infrastructure, CI/CD via GitHub Actions | +| [API Reference](docs/API_REFERENCE.md) | HTTP endpoints and subscribed Slack events | +| [Improvements](IMPROVEMENTS.md) | Completed and planned improvements | ## Project Structure @@ -517,62 +271,20 @@ syncbot/ │ ├── app.py # Entry point — Slack Bolt app + Lambda handler │ ├── constants.py # Env-var names, startup validation │ ├── routing.py # Event/action → handler dispatcher -│ ├── logger.py # Structured JSON logging, correlation IDs, metrics -│ ├── requirements.txt # Pinned runtime dependencies (used by SAM build) │ ├── builders/ # Slack UI construction (Home tab, modals, forms) -│ │ ├── home.py # App Home tab builder -│ │ ├── channel_sync.py # Publish/subscribe channel sync UI -│ │ ├── user_mapping.py # User mapping Home tab screen & edit modal -│ │ └── sync.py # Sync detail views │ ├── handlers/ # Slack event & action handlers -│ │ ├── messages.py # Message sync — posts, threads, edits, deletes, reactions -│ │ ├── groups.py # Group lifecycle — create, join, accept, cancel -│ │ ├── group_manage.py # Leave group with confirmation -│ │ ├── channel_sync.py # Publish, unpublish, subscribe, pause, resume, stop -│ │ ├── users.py # team_join, profile changes, user mapping edits -│ │ ├── tokens.py # Uninstall / tokens_revoked handler -│ │ ├── federation_cmds.py # Federation UI actions (generate/enter/remove codes) -│ │ ├── sync.py # Sync join/remove handlers -│ │ └── _common.py # Shared handler utilities (EventContext, sanitize, metadata) │ ├── helpers/ # Business logic, Slack API wrappers, utilities -│ │ ├── core.py # safe_get, request classification, admin checks -│ │ ├── slack_api.py # Slack API helpers (retry, bot identity, user info) -│ │ ├── encryption.py # Fernet bot-token encryption (cached PBKDF2) -│ │ ├── files.py # File download/upload (streaming, S3, size caps) -│ │ ├── notifications.py # Admin DMs, channel notifications -│ │ ├── user_matching.py # Cross-workspace user matching & mention resolution -│ │ ├── workspace.py # Workspace record helpers, group lookups -│ │ ├── oauth.py # OAuth install/redirect helpers -│ │ └── _cache.py # Simple in-process TTL cache │ ├── federation/ # Cross-instance sync (opt-in) -│ │ ├── core.py # HMAC signing, HTTP client, payload builders -│ │ └── api.py # Federation API endpoint handlers -│ ├── db/ -│ │ ├── __init__.py # Engine, session, DbManager (pooling + retry) -│ │ └── schemas.py # SQLAlchemy ORM models -│ └── slack/ -│ ├── actions.py # Action/callback ID constants -│ ├── forms.py # Form definitions -│ ├── blocks.py # Block Kit shorthand helpers -│ └── orm.py # Block Kit ORM (BlockView, SectionBlock, etc.) -├── db/ -│ └── init.sql # Complete database schema (pre-release: single source) -├── tests/ # pytest unit tests (60 tests) -├── .devcontainer/ # Dev Container config (Cursor/VS Code) -├── Dockerfile # App container for local development -├── docker-compose.yml # Full local stack (app + MySQL) +│ ├── db/ # Engine, session, ORM models +│ └── slack/ # Action IDs, forms, Block Kit helpers +├── db/init.sql # Database schema +├── tests/ # pytest unit tests +├── docs/ # Extended documentation ├── template.yaml # AWS SAM infrastructure-as-code -├── samconfig.toml # SAM CLI deploy profiles (staging / prod) -├── slack-manifest.yaml # Slack app manifest (paste into api.slack.com) -├── pyproject.toml # Poetry project config + ruff linter settings -└── .github/workflows/ - └── sam-pipeline.yml # CI/CD: build → deploy staging → deploy prod +├── slack-manifest.yaml # Slack app manifest +└── docker-compose.yml # Local development stack ``` -## Improvements and Roadmap - -See [IMPROVEMENTS.md](IMPROVEMENTS.md) for a detailed list of completed and planned improvements. - ## License This project is licensed under **AGPL-3.0**, which means you can use and modify it, just keep it open and shareable. See [LICENSE](LICENSE) for details. diff --git a/docs/API_REFERENCE.md b/docs/API_REFERENCE.md new file mode 100644 index 0000000..58a0221 --- /dev/null +++ b/docs/API_REFERENCE.md @@ -0,0 +1,30 @@ +# API Reference + +## HTTP Endpoints (API Gateway) + +All endpoints are served by a single Lambda function. Slack sends requests to the `/slack/*` URLs after you configure the app. The `/api/federation/*` endpoints handle cross-instance communication for external connections. + +| Method | Path | Purpose | +|--------|------|---------| +| `POST` | `/slack/events` | Receives all Slack events (messages, actions, view submissions) and slash commands | +| `GET` | `/slack/install` | OAuth install page — redirects the user to Slack's authorization screen | +| `GET` | `/slack/oauth_redirect` | OAuth callback — Slack redirects here after the user approves the app | +| `POST` | `/api/federation/pair` | Accept an incoming external connection request | +| `POST` | `/api/federation/message` | Receive a forwarded message from a connected instance | +| `POST` | `/api/federation/message/edit` | Receive a message edit from a connected instance | +| `POST` | `/api/federation/message/delete` | Receive a message deletion from a connected instance | +| `POST` | `/api/federation/message/react` | Receive a reaction from a connected instance | +| `POST` | `/api/federation/users` | Exchange user directory with a connected instance | +| `GET` | `/api/federation/ping` | Health check for connected instances | + +## Subscribed Slack Events + +| Event | Handler | Description | +|-------|---------|-------------| +| `app_home_opened` | `handle_app_home_opened` | Publishes the Home tab with workspace groups, channel syncs, and user matching. | +| `member_joined_channel` | `handle_member_joined_channel` | Detects when SyncBot is added to an unconfigured channel; posts a message and leaves. | +| `message.channels` / `message.groups` | `respond_to_message_event` | Fires on new messages, edits, deletes, and file shares in public/private channels. Dispatches to sub-handlers for new posts, thread replies, edits, deletes, and reactions. | +| `reaction_added` / `reaction_removed` | `_handle_reaction` | Syncs emoji reactions to the corresponding message in all target channels. | +| `team_join` | `handle_team_join` | Fires when a new user joins a connected workspace. Adds the user to the directory and re-checks unmatched user mappings. | +| `tokens_revoked` | `handle_tokens_revoked` | Handles workspace uninstall — soft-deletes workspace data and notifies group members. | +| `user_profile_changed` | `handle_user_profile_changed` | Detects display name or email changes and updates the user directory and mappings. | diff --git a/docs/BACKUP_AND_MIGRATION.md b/docs/BACKUP_AND_MIGRATION.md new file mode 100644 index 0000000..e5235f3 --- /dev/null +++ b/docs/BACKUP_AND_MIGRATION.md @@ -0,0 +1,23 @@ +# Backup, Restore, and Data Migration + +## Full-Instance Backup and Restore + +Use **Backup/Restore** (Home tab, next to Refresh) to: + +- **Download backup** — Generates a JSON file containing all tables (workspaces, groups, syncs, channels, post meta, user directory, user mappings, federation, instance keys). The file is sent to your DM. Backup includes an HMAC for integrity and a hash of the encryption key. **Use the same `PASSWORD_ENCRYPT_KEY` on the target instance** so restored bot tokens decrypt; otherwise workspaces must reinstall the app to re-authorize. +- **Restore from backup** — Paste the backup JSON in the modal and submit. Restore is intended for an **empty or fresh database** (e.g. after an AWS rebuild). If the encryption key hash or HMAC does not match, you will see a warning and can still proceed (e.g. if you edited the file on purpose). + +After restore, Home tab caches are cleared so the next Refresh shows current data. + +## Workspace Data Migration (Federation) + +When **External Connections** is enabled, **Data Migration** (in that section) lets you: + +- **Export** — Download a workspace-scoped JSON file (syncs, sync channels, post meta, user directory, user mappings) plus an optional one-time connection code so the new instance can connect to the source in one step. The file is signed (Ed25519) for tampering detection. +- **Import** — Paste a migration file, then submit. If the file includes a connection payload and you are not yet connected, the app establishes the federation connection and creates the group, then imports. Existing sync channels for that workspace in the federated group are **replaced** (replace mode). User mappings are imported where both workspaces exist on the new instance. If the signature check fails, a warning is shown but you can still proceed. + +After import, Home tab and sync-list caches for that workspace are cleared. + +### Instance A Behavior + +When a workspace that used to be on Instance A connects to A from a new instance (B) via federation and sends its `team_id`, A soft-deletes the matching local workspace row so only the federated connection represents that workspace. See [ARCHITECTURE.md](../ARCHITECTURE.md) for details. diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md new file mode 100644 index 0000000..72f8d61 --- /dev/null +++ b/docs/DEPLOYMENT.md @@ -0,0 +1,81 @@ +# Deployment Guide + +## Sharing Infrastructure Across Apps + +If you run multiple apps in the same AWS account, you can point SyncBot at existing resources instead of creating new ones. Every `Existing*` parameter defaults to empty (create new); set it to an existing resource name to reuse it. + +| Parameter | What it skips | +|-----------|---------------| +| `ExistingDatabaseHost` | VPC, subnets, security groups, RDS instance | +| `ExistingSlackStateBucket` | Slack OAuth state S3 bucket | +| `ExistingInstallationBucket` | Slack installation data S3 bucket | +| `ExistingImagesBucket` | Synced-images S3 bucket | + +Example — deploy with an existing RDS and images bucket: + +```bash +sam deploy --guided \ + --parameter-overrides \ + ExistingDatabaseHost=mydb.xxxx.us-east-2.rds.amazonaws.com \ + ExistingImagesBucket=my-shared-images-bucket +``` + +Each app sharing the same RDS should use a **different `DatabaseSchema`** (the default is `syncbot`). Create the schema and initialize the tables on the existing instance: + +```bash +mysql -h -u -p -e "CREATE DATABASE IF NOT EXISTS syncbot;" +mysql -h -u -p syncbot < db/init.sql +``` + +**What about API Gateway and Lambda?** Each stack always creates its own API Gateway and Lambda function. These are lightweight resources that don't affect free-tier billing — the free tier quotas (1M API calls, 1M Lambda requests) are shared across your entire account regardless of how many gateways or functions you have. If you want a unified domain across apps, put a CloudFront distribution or API Gateway custom domain in front. + +## CI/CD via GitHub Actions + +Pushes to `main` automatically build and deploy via `.github/workflows/sam-pipeline.yml`: + +1. **Build** — `sam build --use-container` +2. **Deploy to test** — automatic +3. **Deploy to prod** — requires manual approval (configure in GitHub environment settings) + +### One-Time Setup + +1. **Create an IAM user** for deployments with permissions for CloudFormation, Lambda, API Gateway, S3, IAM, and RDS. Generate an access key pair. + +2. **Create a SAM deployment bucket** — SAM needs an S3 bucket to upload build artifacts during deploy: + +```bash +aws s3 mb s3://my-sam-deploy-bucket --region us-east-2 +``` + +3. **Create GitHub Environments** — Go to your repo → **Settings** → **Environments** and create two environments: `test` and `prod`. For `prod`, enable **Required reviewers** so production deploys need manual approval. + +4. **Add GitHub Secrets** — Under **Settings** → **Secrets and variables** → **Actions**, add these as **environment secrets** for both `test` and `prod`: + +| Secret | Where to find it | +|--------|-----------------| +| `AWS_ACCESS_KEY_ID` | IAM user access key (step 1) | +| `AWS_SECRET_ACCESS_KEY` | IAM user secret key (step 1) | +| `SLACK_SIGNING_SECRET` | Slack app → Basic Information → App Credentials | +| `SLACK_CLIENT_SECRET` | Slack app → Basic Information → App Credentials | +| `DATABASE_PASSWORD` | The RDS master password you chose | +| `PASSWORD_ENCRYPT_KEY` | Any passphrase for bot-token encryption at rest | + +5. **Add GitHub Variables** — Under the same settings page, add these as **environment variables** for each environment: + +| Variable | `test` value | `prod` value | +|----------|-------------|-------------| +| `AWS_STACK_NAME` | `syncbot-test` | `syncbot-prod` | +| `AWS_S3_BUCKET` | `my-sam-deploy-bucket` | `my-sam-deploy-bucket` | +| `STAGE_NAME` | `staging` | `prod` | + +### Deploy Flow + +Once configured, merge or push to `main` and the pipeline runs: + +``` +push to main → sam build → deploy to test → (manual approval) → deploy to prod +``` + +Monitor progress in your repo's **Actions** tab. The first deploy creates the full CloudFormation stack (VPC, RDS, Lambda, API Gateway, S3 buckets). Subsequent deploys update only what changed. + +> **Tip:** If you prefer to do the very first deploy manually (to see the interactive prompts), run `sam deploy --guided` locally first, then let the pipeline handle all future deploys. diff --git a/docs/USER_GUIDE.md b/docs/USER_GUIDE.md new file mode 100644 index 0000000..b5add34 --- /dev/null +++ b/docs/USER_GUIDE.md @@ -0,0 +1,62 @@ +# SyncBot User Guide + +## Getting Started + +1. Click the install link from a desktop browser (make sure you've selected the correct workspace in the upper right) +2. Open the **SyncBot** app from the sidebar and click the **Home** tab (requires workspace admin or owner) +3. The Home tab shows everything in one view: + - **SyncBot Configuration (top row)** — **Refresh** and **Backup/Restore** (full-instance backup download and restore from JSON) + - **Workspace Groups** — create or join groups of workspaces that can sync channels together + - **Per-group sections** — for each group you can publish channels, manage user mapping (dedicated Home tab screen), and see/manage channel syncs inline + - **Synced Channels** — each row shows the local channel and workspace list in brackets (e.g. _[Any: Your Workspace, Other Workspace]_), with pause/resume and stop controls, synced-since date, and tracked message count + - **External Connections** *(when federation is enabled)* — Generate/Enter Connection Code and **Data Migration** (export workspace data for migration to another instance, or import a migration file) + +## Things to Know + +- Only workspace **admins and owners** can configure syncs (set `REQUIRE_ADMIN=false` to allow all users) +- Messages, threads, edits, deletes, reactions, images, videos, and GIFs are all synced +- Messages from other bots are synced; only SyncBot's own messages are filtered to prevent loops +- Existing messages are not back-filled; syncing starts from the moment a channel is linked +- Do not add SyncBot manually to channels. SyncBot adds itself when you configure a Sync. If it detects it was added to an unconfigured channel it will post a message and leave automatically +- Both public and private channels are supported + +## Workspace Groups + +Workspaces must belong to the same **group** before they can sync channels or map users. Admins can create a new group (which generates an invite code) or join an existing group by entering a code. A workspace can be in multiple groups with different combinations of other workspaces. + +## Sync Modes + +When publishing a channel inside a group, admins choose either **1-to-1** (only a specific workspace can subscribe) or **group-wide** (any group member can subscribe independently). + +## Pause / Resume / Stop + +- **Pause/Resume** — Individual channel syncs can be paused and resumed without losing configuration. Paused channels do not sync any messages, threads, or reactions. +- **Selective Stop** — When a workspace stops syncing a channel, only that workspace's history is removed. Other workspaces continue syncing uninterrupted. The published channel remains available until the original publisher unpublishes it. + +## Uninstall / Reinstall + +If a workspace uninstalls SyncBot, group memberships and syncs are paused (not deleted). Reinstalling within the retention period (default 30 days, configurable via `SOFT_DELETE_RETENTION_DAYS`) automatically restores everything. Group members are notified via DMs and channel messages. + +## User Mapping + +Users are automatically mapped across workspaces by email or display name. Admins can manually edit mappings via the User Mapping screen (scoped per group). Remote users are displayed as "Display Name (Workspace Name)" and sorted by normalized name. + +## Refresh Behavior + +The Home tab and User Mapping screens have Refresh buttons. To keep API usage low, repeated clicks with no data changes are handled lightly: a 60-second cooldown applies, and when nothing has changed the app reuses cached content and shows "No new data. Wait __ seconds before refreshing again." + +## Media Sync + +Images and videos are uploaded directly to target channels (or via S3 if configured). GIFs from the Slack GIF picker or GIPHY are synced as image blocks. + +## External Connections + +*(Opt-in — set `SYNCBOT_FEDERATION_ENABLED=true` and `SYNCBOT_PUBLIC_URL` to enable)* + +Workspaces running their own SyncBot deployment can be connected via the "External Connections" section on the Home tab. One admin generates a connection code and shares it out-of-band; the other admin enters it. Messages, edits, deletes, reactions, and user matching work across instances. + +**Data Migration** in the same section lets you export your workspace data (syncs, channels, post meta, user directory, user mappings) for moving to another instance, or import a migration file after connecting. See [Backup and Migration](BACKUP_AND_MIGRATION.md) for details. + +## Backup / Restore + +Use **Backup/Restore** on the Home tab to download a full-instance backup (all tables as JSON) or restore from a backup file. Intended for disaster recovery (e.g. before rebuilding AWS). See [Backup and Migration](BACKUP_AND_MIGRATION.md) for details. diff --git a/syncbot/app.py b/syncbot/app.py index 69007be..8f5c817 100644 --- a/syncbot/app.py +++ b/syncbot/app.py @@ -26,7 +26,6 @@ from slack_bolt.adapter.aws_lambda import SlackRequestHandler from constants import ( - DANGER_DROP_AND_INIT_DB, FEDERATION_ENABLED, HAS_REAL_BOT_TOKEN, LOCAL_DEVELOPMENT, @@ -69,10 +68,6 @@ def _redact_sensitive(obj, _depth=0): SlackRequestHandler.clear_all_log_handlers() configure_logging() -if os.environ.get(DANGER_DROP_AND_INIT_DB, "").strip().lower() == "true": - from db import drop_and_init_db - drop_and_init_db() - validate_config() app = App( diff --git a/syncbot/builders/_common.py b/syncbot/builders/_common.py index a871bfa..6b125d9 100644 --- a/syncbot/builders/_common.py +++ b/syncbot/builders/_common.py @@ -70,13 +70,13 @@ def _get_groups_for_workspace(workspace_id: int) -> list[tuple[WorkspaceGroup, W ], ) results: list[tuple[WorkspaceGroup, WorkspaceGroupMember]] = [] - for m in members: + for member in members: groups = DbManager.find_records( WorkspaceGroup, - [WorkspaceGroup.id == m.group_id, WorkspaceGroup.status == "active"], + [WorkspaceGroup.id == member.group_id, WorkspaceGroup.status == "active"], ) if groups: - results.append((groups[0], m)) + results.append((groups[0], member)) return results diff --git a/syncbot/builders/channel_sync.py b/syncbot/builders/channel_sync.py index 4ba44f5..c545c7f 100644 --- a/syncbot/builders/channel_sync.py +++ b/syncbot/builders/channel_sync.py @@ -78,9 +78,9 @@ def _build_inline_channel_sync( # Workspace names for bracket: local first, then others local_name = helpers.resolve_workspace_name(workspace_record) or f"Workspace {workspace_record.id}" other_names: list[str] = [] - for och in other_chs: - och_ws = helpers.get_workspace_by_id(och.workspace_id, context=context) - other_names.append(helpers.resolve_workspace_name(och_ws) if och_ws else f"Workspace {och.workspace_id}") + for other_channel in other_chs: + other_ws = helpers.get_workspace_by_id(other_channel.workspace_id, context=context) + other_names.append(helpers.resolve_workspace_name(other_ws) if other_ws else f"Workspace {other_channel.workspace_id}") all_ws_names = [local_name] + other_names if sync.sync_mode == "direct": @@ -156,9 +156,9 @@ def _build_inline_channel_sync( publisher_ws = helpers.get_workspace_by_id(other_chs[0].workspace_id, context=context) if other_chs else None publisher_name = helpers.resolve_workspace_name(publisher_ws) if publisher_ws else "another workspace" sub_names_avail: list[str] = [] - for och in other_chs: - och_ws = helpers.get_workspace_by_id(och.workspace_id, context=context) - sub_names_avail.append(helpers.resolve_workspace_name(och_ws) if och_ws else f"Workspace {och.workspace_id}") + for other_channel in other_chs: + other_ws = helpers.get_workspace_by_id(other_channel.workspace_id, context=context) + sub_names_avail.append(helpers.resolve_workspace_name(other_ws) if other_ws else f"Workspace {other_channel.workspace_id}") if sync.sync_mode == "direct": mode_tag = f" _[1-to-1: {sub_names_avail[0]}]_" if sub_names_avail else "" else: diff --git a/syncbot/builders/home.py b/syncbot/builders/home.py index 4cb7ece..a393501 100644 --- a/syncbot/builders/home.py +++ b/syncbot/builders/home.py @@ -70,15 +70,15 @@ def _home_tab_content_hash(workspace_record: Workspace) -> str: ], ) channel_sig = tuple( - (sc.workspace_id, sc.channel_id, sc.status or "active") - for sc in sorted(channels, key=lambda c: (c.workspace_id, c.channel_id)) + (sync_channel.workspace_id, sync_channel.channel_id, sync_channel.status or "active") + for sync_channel in sorted(channels, key=lambda c: (c.workspace_id, c.channel_id)) ) sync_channel_tuples.append((sync.id, channel_sig)) sync_channel_tuples.sort(key=lambda x: x[0]) # Per-member channel_count and mapped_count (shown in group section) member_sigs: list[tuple] = [] - for m in members: - ws_id = m.workspace_id or 0 + for member in members: + ws_id = member.workspace_id or 0 ch_count = 0 if ws_id and sync_ids: ch_count = len( @@ -234,6 +234,23 @@ def build_home_tab( if constants.FEDERATION_ENABLED: _build_federation_section(blocks, workspace_record) + # ── Database Reset (dev tool) ───────────────────────────── + if constants.ENABLE_DB_RESET: + blocks.append(divider()) + blocks.append(section(":warning: *Danger Zone*")) + blocks.append(block_context("Reset the database to its initial state. _All data will be permanently lost._")) + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label=":bomb: Reset Database", + action=actions.CONFIG_DB_RESET, + style="danger", + ), + ] + ) + ) + block_dicts = orm.BlockView(blocks=blocks).as_form_field() if return_blocks: return block_dicts @@ -268,10 +285,10 @@ def _build_pending_invite_section( ], ) inviter_names = [] - for m in inviting_members: - if m.workspace_id: - ws = helpers.get_workspace_by_id(m.workspace_id, context=context) - inviter_names.append(helpers.resolve_workspace_name(ws) if ws else f"Workspace {m.workspace_id}") + for member in inviting_members: + if member.workspace_id: + ws = helpers.get_workspace_by_id(member.workspace_id, context=context) + inviter_names.append(helpers.resolve_workspace_name(ws) if ws else f"Workspace {member.workspace_id}") from_label = f" from {', '.join(inviter_names)}" if inviter_names else "" @@ -310,7 +327,7 @@ def _build_group_section( blocks.append(divider()) all_members = _get_group_members(group.id) - other_members = [m for m in all_members if m.workspace_id != workspace_record.id] + other_members = [member for member in all_members if member.workspace_id != workspace_record.id] role_tag = " _(creator)_" if my_membership.role == "creator" else "" icon = ":link:" if len(other_members) > 0 else ":handshake:" @@ -321,21 +338,21 @@ def _build_group_section( syncs_for_group = DbManager.find_records(Sync, [Sync.group_id == group.id]) sync_ids = [s.id for s in syncs_for_group] - for m in all_members: - if m.workspace_id: - ws = helpers.get_workspace_by_id(m.workspace_id, context=context) - name = helpers.resolve_workspace_name(ws) if ws else f"Workspace {m.workspace_id}" - if m.workspace_id == workspace_record.id: + for member in all_members: + if member.workspace_id: + member_ws = helpers.get_workspace_by_id(member.workspace_id, context=context) + name = helpers.resolve_workspace_name(member_ws) if member_ws else f"Workspace {member.workspace_id}" + if member.workspace_id == workspace_record.id: name += " _(you)_" - elif m.federated_workspace_id: - fed_ws = DbManager.get_record(FederatedWorkspace, id=m.federated_workspace_id) + elif member.federated_workspace_id: + fed_ws = DbManager.get_record(FederatedWorkspace, id=member.federated_workspace_id) name = f":globe_with_meridians: {fed_ws.name}" if fed_ws and fed_ws.name else "External" else: name = "Unknown" - joined_str = f"{m.joined_at:%B %d, %Y}" if m.joined_at else "Unknown" + joined_str = f"{member.joined_at:%B %d, %Y}" if member.joined_at else "Unknown" - ws_id = m.workspace_id + ws_id = member.workspace_id channel_count = 0 if ws_id and sync_ids: channels = DbManager.find_records( @@ -375,10 +392,10 @@ def _build_group_section( WorkspaceGroupMember.deleted_at.is_(None), ], ) - for pm in pending_members: - if pm.workspace_id: - pw = helpers.get_workspace_by_id(pm.workspace_id, context=context) - pname = helpers.resolve_workspace_name(pw) if pw else f"Workspace {pm.workspace_id}" + for pending_member in pending_members: + if pending_member.workspace_id: + pending_ws = helpers.get_workspace_by_id(pending_member.workspace_id, context=context) + pname = helpers.resolve_workspace_name(pending_ws) if pending_ws else f"Workspace {pending_member.workspace_id}" else: pname = "Unknown" blocks.append(block_context(f":hourglass_flowing_sand: *{pname}* — _Pending invite_")) @@ -387,8 +404,8 @@ def _build_group_section( elements=[ orm.ButtonElement( label="Cancel Invite", - action=f"{actions.CONFIG_CANCEL_GROUP_REQUEST}_{pm.id}", - value=str(pm.id), + action=f"{actions.CONFIG_CANCEL_GROUP_REQUEST}_{pending_member.id}", + value=str(pending_member.id), ), ] ) @@ -463,16 +480,16 @@ def _build_federation_section( ) shown_fed: set[int] = set() - for fm in fed_members: - if not fm.federated_workspace_id or fm.federated_workspace_id in shown_fed: + for fed_member in fed_members: + if not fed_member.federated_workspace_id or fed_member.federated_workspace_id in shown_fed: continue my_groups = _get_groups_for_workspace(workspace_record.id) my_group_ids = {g.id for g, _ in my_groups} - if fm.group_id not in my_group_ids: + if fed_member.group_id not in my_group_ids: continue - shown_fed.add(fm.federated_workspace_id) - fed_ws = DbManager.get_record(FederatedWorkspace, id=fm.federated_workspace_id) + shown_fed.add(fed_member.federated_workspace_id) + fed_ws = DbManager.get_record(FederatedWorkspace, id=fed_member.federated_workspace_id) if not fed_ws: continue @@ -489,9 +506,9 @@ def _build_federation_section( elements=[ orm.ButtonElement( label="Remove Connection", - action=f"{actions.CONFIG_REMOVE_FEDERATION_CONNECTION}_{fm.id}", + action=f"{actions.CONFIG_REMOVE_FEDERATION_CONNECTION}_{fed_member.id}", style="danger", - value=str(fm.id), + value=str(fed_member.id), ), ] ) diff --git a/syncbot/builders/user_mapping.py b/syncbot/builders/user_mapping.py index c387d3a..2df42b4 100644 --- a/syncbot/builders/user_mapping.py +++ b/syncbot/builders/user_mapping.py @@ -149,10 +149,10 @@ def _display_for_mapping(m: UserMapping, ws_lookup: dict[int, str]) -> str: _avatar_lookup: dict[tuple[int, str], str] = {} for source_ws_id in linked_workspace_ids: ws = helpers.get_workspace_by_id(source_ws_id, context=context) - partner_client = None + member_client = None if ws and ws.bot_token: with contextlib.suppress(Exception): - partner_client = WebClient(token=helpers.decrypt_bot_token(ws.bot_token)) + member_client = WebClient(token=helpers.decrypt_bot_token(ws.bot_token)) dir_entries = DbManager.find_records( UserDirectory, [UserDirectory.workspace_id == source_ws_id, UserDirectory.deleted_at.is_(None)], @@ -160,9 +160,9 @@ def _display_for_mapping(m: UserMapping, ws_lookup: dict[int, str]) -> str: for entry in dir_entries: if entry.email: _email_lookup[(source_ws_id, entry.slack_user_id)] = entry.email - if partner_client: + if member_client: with contextlib.suppress(Exception): - _, avatar_url = helpers.get_user_info(partner_client, entry.slack_user_id) + _, avatar_url = helpers.get_user_info(member_client, entry.slack_user_id) if avatar_url: _avatar_lookup[(source_ws_id, entry.slack_user_id)] = avatar_url @@ -308,8 +308,8 @@ def build_user_mapping_edit_modal( avatar_accessory = None if source_ws and source_ws.bot_token: with contextlib.suppress(Exception): - partner_client = WebClient(token=helpers.decrypt_bot_token(source_ws.bot_token)) - _, avatar_url = helpers.get_user_info(partner_client, mapping.source_user_id) + member_client = WebClient(token=helpers.decrypt_bot_token(source_ws.bot_token)) + _, avatar_url = helpers.get_user_info(member_client, mapping.source_user_id) if avatar_url: avatar_accessory = orm.ImageAccessoryElement(image_url=avatar_url, alt_text=display) diff --git a/syncbot/constants.py b/syncbot/constants.py index d11014f..4cfb64f 100644 --- a/syncbot/constants.py +++ b/syncbot/constants.py @@ -33,8 +33,8 @@ ADMIN_DATABASE_PASSWORD = "ADMIN_DATABASE_PASSWORD" ADMIN_DATABASE_SCHEMA = "ADMIN_DATABASE_SCHEMA" -# When set to "true", app startup drops the database and reinitializes from db/init.sql. All data is lost. -DANGER_DROP_AND_INIT_DB = "DANGER_DROP_AND_INIT_DB" +# When set to "true", a red "Reset Database" button appears on the Home tab. +ENABLE_DB_RESET = os.environ.get("ENABLE_DB_RESET", "false").lower() == "true" LOCAL_DEVELOPMENT = os.environ.get("LOCAL_DEVELOPMENT", "false").lower() == "true" diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py index 1ebe94e..7173023 100644 --- a/syncbot/db/__init__.py +++ b/syncbot/db/__init__.py @@ -64,13 +64,13 @@ def _build_base_url(include_schema: bool = False) -> tuple[str, dict]: def drop_and_init_db() -> None: """Drop the database and reinitialize from db/init.sql. All data is lost. - Only run when DANGER_DROP_AND_INIT_DB is set to true. Caller must check. + Called from the "Reset Database" UI button (gated by ENABLE_DB_RESET). Resets GLOBAL_ENGINE and GLOBAL_SESSION so the next get_engine() uses a fresh DB. """ global GLOBAL_ENGINE, GLOBAL_SESSION, GLOBAL_SCHEMA - _logger.warning( - "DANGER_DROP_AND_INIT_DB is set: dropping database and reinitializing from init.sql. All data will be lost." + _logger.critical( + "DB RESET: dropping database and reinitializing from init.sql. All data will be lost." ) schema = os.environ.get(constants.ADMIN_DATABASE_SCHEMA, "syncbot") diff --git a/syncbot/federation/api.py b/syncbot/federation/api.py index cb43fba..8ab406e 100644 --- a/syncbot/federation/api.py +++ b/syncbot/federation/api.py @@ -169,15 +169,15 @@ def _resolve_channel_for_federated( if not records: return None - sc = records[0] - if not _federated_has_channel_access(fed_ws, sc): + sync_channel = records[0] + if not _federated_has_channel_access(fed_ws, sync_channel): return None - workspace = helpers.get_workspace_by_id(sc.workspace_id) + workspace = helpers.get_workspace_by_id(sync_channel.workspace_id) if not workspace or not workspace.bot_token: return None - return sc, workspace + return sync_channel, workspace def _get_local_workspace_ids(fed_ws: schemas.FederatedWorkspace) -> set[int]: @@ -191,11 +191,11 @@ def _get_local_workspace_ids(fed_ws: schemas.FederatedWorkspace) -> set[int]: ], ) ws_ids: set[int] = set() - for fm in fed_members: + for fed_member in fed_members: group_members = DbManager.find_records( schemas.WorkspaceGroupMember, [ - schemas.WorkspaceGroupMember.group_id == fm.group_id, + schemas.WorkspaceGroupMember.group_id == fed_member.group_id, schemas.WorkspaceGroupMember.workspace_id.isnot(None), schemas.WorkspaceGroupMember.status == "active", schemas.WorkspaceGroupMember.deleted_at.is_(None), @@ -345,7 +345,7 @@ def handle_message(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, resolved = _resolve_channel_for_federated(channel_id, fed_ws, require_active=True) if not resolved: return _NOT_FOUND - sc, workspace = resolved + sync_channel, workspace = resolved user_name = user.get("display_name", "Remote User") user_avatar = user.get("avatar_url") @@ -358,7 +358,7 @@ def handle_message(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, schemas.PostMeta, [ schemas.PostMeta.post_id == thread_post_id, - schemas.PostMeta.sync_channel_id == sc.id, + schemas.PostMeta.sync_channel_id == sync_channel.id, ], ) if post_records: @@ -389,12 +389,12 @@ def handle_message(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, ts = helpers.safe_get(res, "ts") if post_id and ts: - pm = schemas.PostMeta( + post_meta = schemas.PostMeta( post_id=post_id if isinstance(post_id, bytes) else post_id.encode()[:100], - sync_channel_id=sc.id, + sync_channel_id=sync_channel.id, ts=float(ts), ) - DbManager.create_record(pm) + DbManager.create_record(post_meta) _logger.info( "federation_message_received", @@ -426,18 +426,18 @@ def handle_message_edit(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple resolved = _resolve_channel_for_federated(channel_id, fed_ws) if not resolved: return _NOT_FOUND - sc, workspace = resolved + sync_channel, workspace = resolved - post_records = _find_post_records(post_id, sc.id) + post_records = _find_post_records(post_id, sync_channel.id) updated = 0 ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) - for pm in post_records: + for post_meta in post_records: try: - ws_client.chat_update(channel=channel_id, ts=str(pm.ts), text=text) + ws_client.chat_update(channel=channel_id, ts=str(post_meta.ts), text=text) updated += 1 except Exception: - _logger.warning("federation_edit_failed", extra={"channel_id": channel_id, "ts": str(pm.ts)}) + _logger.warning("federation_edit_failed", extra={"channel_id": channel_id, "ts": str(post_meta.ts)}) return 200, {"ok": True, "updated": updated} @@ -459,18 +459,18 @@ def handle_message_delete(body: dict, fed_ws: schemas.FederatedWorkspace) -> tup resolved = _resolve_channel_for_federated(channel_id, fed_ws) if not resolved: return _NOT_FOUND - sc, workspace = resolved + sync_channel, workspace = resolved - post_records = _find_post_records(post_id, sc.id) + post_records = _find_post_records(post_id, sync_channel.id) deleted = 0 ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) - for pm in post_records: + for post_meta in post_records: try: - ws_client.chat_delete(channel=channel_id, ts=str(pm.ts)) + ws_client.chat_delete(channel=channel_id, ts=str(post_meta.ts)) deleted += 1 except Exception: - _logger.warning("federation_delete_failed", extra={"channel_id": channel_id, "ts": str(pm.ts)}) + _logger.warning("federation_delete_failed", extra={"channel_id": channel_id, "ts": str(post_meta.ts)}) return 200, {"ok": True, "deleted": deleted} @@ -494,21 +494,21 @@ def handle_message_react(body: dict, fed_ws: schemas.FederatedWorkspace) -> tupl resolved = _resolve_channel_for_federated(channel_id, fed_ws) if not resolved: return _NOT_FOUND - sc, workspace = resolved + sync_channel, workspace = resolved - post_records = _find_post_records(post_id, sc.id) + post_records = _find_post_records(post_id, sync_channel.id) applied = 0 ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) - for pm in post_records: + for post_meta in post_records: try: if action == "add": - ws_client.reactions_add(channel=channel_id, timestamp=str(pm.ts), name=reaction) + ws_client.reactions_add(channel=channel_id, timestamp=str(post_meta.ts), name=reaction) else: - ws_client.reactions_remove(channel=channel_id, timestamp=str(pm.ts), name=reaction) + ws_client.reactions_remove(channel=channel_id, timestamp=str(post_meta.ts), name=reaction) applied += 1 except Exception: - _logger.warning("federation_react_failed", extra={"channel_id": channel_id, "ts": str(pm.ts)}) + _logger.warning("federation_react_failed", extra={"channel_id": channel_id, "ts": str(post_meta.ts)}) return 200, {"ok": True, "applied": applied} diff --git a/syncbot/handlers/__init__.py b/syncbot/handlers/__init__.py index 6a12f13..a2fae12 100644 --- a/syncbot/handlers/__init__.py +++ b/syncbot/handlers/__init__.py @@ -62,6 +62,8 @@ from handlers.sync import ( check_join_sync_channel, handle_app_home_opened, + handle_db_reset, + handle_db_reset_confirm, handle_join_sync_submission, handle_member_joined_channel, handle_new_sync_submission, @@ -95,6 +97,8 @@ "handle_data_migration_confirm_submit", "handle_data_migration_export", "handle_data_migration_submit", + "handle_db_reset", + "handle_db_reset_confirm", "handle_accept_group_invite", "handle_create_group", "handle_create_group_submit", diff --git a/syncbot/handlers/channel_sync.py b/syncbot/handlers/channel_sync.py index 2e47d51..d31a776 100644 --- a/syncbot/handlers/channel_sync.py +++ b/syncbot/handlers/channel_sync.py @@ -41,11 +41,11 @@ def _get_publishable_channel_options(client: WebClient, workspace_id: int) -> li cursor=cursor or None, ) chs = helpers.safe_get(resp, "channels") or [] - for ch in chs: - cid = ch.get("id") + for slack_channel in chs: + cid = slack_channel.get("id") if not cid or cid in synced_ids: continue - name = ch.get("name") or cid + name = slack_channel.get("name") or cid label = f"#{name}" if len(label) > 75: label = label[:72] + "..." @@ -340,7 +340,7 @@ def handle_publish_channel_submit( except Exception as e: _logger.error(f"Failed to publish channel {channel_id}: {e}") - # Refresh Home for all admins in current workspace, then partner workspaces + # Refresh Home for all admins in current workspace, then other group members builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) _refresh_group_member_homes(group_id, workspace_record.id, logger, context=context) @@ -391,20 +391,20 @@ def handle_unpublish_channel( [schemas.SyncChannel.sync_id == sync_id, schemas.SyncChannel.deleted_at.is_(None)], ) - for ch in all_channels: + for sync_channel in all_channels: try: - ws = helpers.get_workspace_by_id(ch.workspace_id) - if ws and ws.bot_token: - name = admin_name if workspace_record and ch.workspace_id == workspace_record.id else admin_label - ws_client = WebClient(token=helpers.decrypt_bot_token(ws.bot_token)) + member_ws = helpers.get_workspace_by_id(sync_channel.workspace_id) + if member_ws and member_ws.bot_token: + name = admin_name if workspace_record and sync_channel.workspace_id == workspace_record.id else admin_label + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) helpers.notify_synced_channels( - ws_client, - [ch.channel_id], + member_client, + [sync_channel.channel_id], f":octagonal_sign: *{name}* unpublished this channel. Syncing is no longer available.", ) - ws_client.conversations_leave(channel=ch.channel_id) + member_client.conversations_leave(channel=sync_channel.channel_id) except Exception as e: - _logger.warning(f"Failed to notify/leave channel {ch.channel_id}: {e}") + _logger.warning(f"Failed to notify/leave channel {sync_channel.channel_id}: {e}") DbManager.delete_records(schemas.Sync, [schemas.Sync.id == sync_id]) @@ -454,32 +454,32 @@ def _toggle_sync_status( [schemas.SyncChannel.sync_id == sync_id, schemas.SyncChannel.deleted_at.is_(None)], ) - for ch in all_channels: + for sync_channel in all_channels: DbManager.update_records( schemas.SyncChannel, - [schemas.SyncChannel.id == ch.id], + [schemas.SyncChannel.id == sync_channel.id], {schemas.SyncChannel.status: target_status}, ) ws_cache: dict[int, schemas.Workspace | None] = {} - for ch in all_channels: + for sync_channel in all_channels: try: - ws = ws_cache.get(ch.workspace_id) or helpers.get_workspace_by_id(ch.workspace_id) - ws_cache[ch.workspace_id] = ws - if ws and ws.bot_token: - name = admin_name if workspace_record and ch.workspace_id == workspace_record.id else admin_label - partner_chs = [c for c in all_channels if c.workspace_id != ch.workspace_id] - if partner_chs: - p_ws = ws_cache.get(partner_chs[0].workspace_id) or helpers.get_workspace_by_id(partner_chs[0].workspace_id) - ws_cache[partner_chs[0].workspace_id] = p_ws - partner_ref = helpers.resolve_channel_name(partner_chs[0].channel_id, p_ws) - msg = f":{emoji}: *{name}* {verb} syncing with *{partner_ref}*." + channel_ws = ws_cache.get(sync_channel.workspace_id) or helpers.get_workspace_by_id(sync_channel.workspace_id) + ws_cache[sync_channel.workspace_id] = channel_ws + if channel_ws and channel_ws.bot_token: + name = admin_name if workspace_record and sync_channel.workspace_id == workspace_record.id else admin_label + other_channels = [c for c in all_channels if c.workspace_id != sync_channel.workspace_id] + if other_channels: + other_ws = ws_cache.get(other_channels[0].workspace_id) or helpers.get_workspace_by_id(other_channels[0].workspace_id) + ws_cache[other_channels[0].workspace_id] = other_ws + channel_ref = helpers.resolve_channel_name(other_channels[0].channel_id, other_ws) + msg = f":{emoji}: *{name}* {verb} syncing with *{channel_ref}*." else: msg = f":{emoji}: *{name}* {verb} channel syncing." - ws_client = WebClient(token=helpers.decrypt_bot_token(ws.bot_token)) - helpers.notify_synced_channels(ws_client, [ch.channel_id], msg) + ws_client = WebClient(token=helpers.decrypt_bot_token(channel_ws.bot_token)) + helpers.notify_synced_channels(ws_client, [sync_channel.channel_id], msg) except Exception as e: - _logger.warning(f"Failed to notify channel {ch.channel_id} about {verb}: {e}") + _logger.warning(f"Failed to notify channel {sync_channel.channel_id} about {verb}: {e}") _logger.info(log_event, extra={"sync_id": sync_id, "channels": len(all_channels)}) @@ -594,15 +594,15 @@ def handle_stop_sync_confirm( my_channel = next((c for c in all_channels if c.workspace_id == workspace_record.id), None) other_channels = [c for c in all_channels if c.workspace_id != workspace_record.id] - for ch in all_channels: + for sync_channel in all_channels: try: - ws = helpers.get_workspace_by_id(ch.workspace_id) - if ws and ws.bot_token: - if ch.workspace_id == workspace_record.id and other_channels: - p_ws = helpers.get_workspace_by_id(other_channels[0].workspace_id) - partner_ref = helpers.resolve_channel_name(other_channels[0].channel_id, p_ws) - msg = f":octagonal_sign: *{admin_name}* stopped syncing with *{partner_ref}*." - elif ch.workspace_id != workspace_record.id: + channel_ws = helpers.get_workspace_by_id(sync_channel.workspace_id) + if channel_ws and channel_ws.bot_token: + if sync_channel.workspace_id == workspace_record.id and other_channels: + other_ws = helpers.get_workspace_by_id(other_channels[0].workspace_id) + channel_ref = helpers.resolve_channel_name(other_channels[0].channel_id, other_ws) + msg = f":octagonal_sign: *{admin_name}* stopped syncing with *{channel_ref}*." + elif sync_channel.workspace_id != workspace_record.id: my_ref = ( helpers.resolve_channel_name(my_channel.channel_id, workspace_record) if my_channel @@ -611,10 +611,10 @@ def handle_stop_sync_confirm( msg = f":octagonal_sign: *{admin_label}* stopped syncing with *{my_ref}*." else: msg = f":octagonal_sign: *{admin_name}* stopped channel syncing." - ws_client = WebClient(token=helpers.decrypt_bot_token(ws.bot_token)) - helpers.notify_synced_channels(ws_client, [ch.channel_id], msg) + ws_client = WebClient(token=helpers.decrypt_bot_token(channel_ws.bot_token)) + helpers.notify_synced_channels(ws_client, [sync_channel.channel_id], msg) except Exception as e: - _logger.warning(f"Failed to notify channel {ch.channel_id}: {e}") + _logger.warning(f"Failed to notify channel {sync_channel.channel_id}: {e}") if my_channel: DbManager.delete_records(schemas.PostMeta, [schemas.PostMeta.sync_channel_id == my_channel.id]) @@ -785,12 +785,12 @@ def handle_subscribe_channel_submit( if publisher_channels: pub_ch = publisher_channels[0] pub_ws = helpers.get_workspace_by_id(pub_ch.workspace_id) - partner_ref = helpers.resolve_channel_name(pub_ch.channel_id, pub_ws) + channel_ref = helpers.resolve_channel_name(pub_ch.channel_id, pub_ws) else: - partner_ref = sync_record.title or "the partner channel" + channel_ref = sync_record.title or "the other channel" client.chat_postMessage( channel=channel_id, - text=f":arrows_counterclockwise: *{admin_name}* started syncing this channel with *{partner_ref}*. Messages will be shared automatically.", + text=f":arrows_counterclockwise: *{admin_name}* started syncing this channel with *{channel_ref}*. Messages will be shared automatically.", ) except Exception as exc: _logger.debug(f"subscribe_channel: failed to notify subscriber channel {channel_id}: {exc}") @@ -833,15 +833,15 @@ def _refresh_group_member_homes( ) -> None: """Refresh the Home tab for all group members except the acting workspace. - Uses context=None when refreshing partners so admin lookups are always + Uses context=None when refreshing other members so admin lookups are always fresh for each workspace (avoids request-scoped cache from the acting ws). """ members = _get_group_members(group_id) refreshed: set[int] = set() - for m in members: - if not m.workspace_id or m.workspace_id == exclude_workspace_id or m.workspace_id in refreshed: + for member in members: + if not member.workspace_id or member.workspace_id == exclude_workspace_id or member.workspace_id in refreshed: continue - ws = helpers.get_workspace_by_id(m.workspace_id, context=context) - if ws: - builders.refresh_home_tab_for_workspace(ws, logger, context=None) - refreshed.add(m.workspace_id) + member_ws = helpers.get_workspace_by_id(member.workspace_id, context=context) + if member_ws: + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) + refreshed.add(member.workspace_id) diff --git a/syncbot/handlers/group_manage.py b/syncbot/handlers/group_manage.py index 3e8aabd..1f279f4 100644 --- a/syncbot/handlers/group_manage.py +++ b/syncbot/handlers/group_manage.py @@ -163,10 +163,10 @@ def handle_leave_group_confirm( from datetime import UTC, datetime now = datetime.now(UTC) - for m in members: + for member in members: DbManager.update_records( schemas.WorkspaceGroupMember, - [schemas.WorkspaceGroupMember.id == m.id], + [schemas.WorkspaceGroupMember.id == member.id], { schemas.WorkspaceGroupMember.status: "inactive", schemas.WorkspaceGroupMember.deleted_at: now, @@ -191,20 +191,20 @@ def handle_leave_group_confirm( DbManager.delete_records(schemas.WorkspaceGroup, [schemas.WorkspaceGroup.id == group_id]) _logger.info("group_deleted_empty", extra={"group_id": group_id}) else: - for m in remaining_members: - if not m.workspace_id: + for member in remaining_members: + if not member.workspace_id: continue - partner = helpers.get_workspace_by_id(m.workspace_id) - if not partner or not partner.bot_token or partner.deleted_at: + member_ws = helpers.get_workspace_by_id(member.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at: continue try: - partner_client = WebClient(token=helpers.decrypt_bot_token(partner.bot_token)) + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) helpers.notify_admins_dm( - partner_client, + member_client, f":wave: *{admin_label}* left the group *{group.name}*.", ) - builders.refresh_home_tab_for_workspace(partner, logger, context=context) + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) except Exception as e: - _logger.warning(f"Failed to notify group member {m.workspace_id}: {e}") + _logger.warning(f"Failed to notify group member {member.workspace_id}: {e}") builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) diff --git a/syncbot/handlers/groups.py b/syncbot/handlers/groups.py index 5e2683e..4a79f15 100644 --- a/syncbot/handlers/groups.py +++ b/syncbot/handlers/groups.py @@ -47,25 +47,25 @@ def _activate_group_membership( ], ) - partner_clients: list[tuple[WebClient, int]] = [] + member_clients: list[tuple[WebClient, int]] = [] for member in members: if not member.workspace_id: continue - partner = helpers.get_workspace_by_id(member.workspace_id) - if not partner or not partner.bot_token or partner.deleted_at: + member_ws = helpers.get_workspace_by_id(member.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at: continue try: - partner_client = WebClient(token=helpers.decrypt_bot_token(partner.bot_token)) - helpers._refresh_user_directory(partner_client, partner.id) - partner_clients.append((partner_client, partner.id)) + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) + helpers._refresh_user_directory(member_client, member_ws.id) + member_clients.append((member_client, member_ws.id)) except Exception as e: - _logger.warning(f"Failed to refresh user directory for workspace {partner.id}: {e}") + _logger.warning(f"Failed to refresh user directory for workspace {member_ws.id}: {e}") try: - helpers.seed_user_mappings(workspace_record.id, partner.id, group_id=group.id) - helpers.seed_user_mappings(partner.id, workspace_record.id, group_id=group.id) + helpers.seed_user_mappings(workspace_record.id, member_ws.id, group_id=group.id) + helpers.seed_user_mappings(member_ws.id, workspace_record.id, group_id=group.id) except Exception as e: _logger.warning(f"Failed to seed user mappings: {e}") @@ -74,11 +74,11 @@ def _activate_group_membership( except Exception as e: _logger.warning(f"Auto-match failed for workspace {workspace_record.id}: {e}") - for p_client, p_id in partner_clients: + for member_client, member_ws_id in member_clients: try: - helpers.run_auto_match_for_workspace(p_client, p_id) + helpers.run_auto_match_for_workspace(member_client, member_ws_id) except Exception as e: - _logger.warning(f"Auto-match failed for partner workspace {p_id}: {e}") + _logger.warning(f"Auto-match failed for member workspace {member_ws_id}: {e}") def handle_create_group( @@ -329,16 +329,16 @@ def handle_join_group_submit( for m in other_members: if not m.workspace_id: continue - partner = helpers.get_workspace_by_id(m.workspace_id) - if not partner or not partner.bot_token or partner.deleted_at: + member_ws = helpers.get_workspace_by_id(m.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at: continue try: - partner_client = WebClient(token=helpers.decrypt_bot_token(partner.bot_token)) + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) helpers.notify_admins_dm( - partner_client, + member_client, f":handshake: *{admin_label}* joined the group *{group.name}*.", ) - builders.refresh_home_tab_for_workspace(partner, logger, context=context) + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) except Exception as e: _logger.warning(f"Failed to notify group member {m.workspace_id}: {e}") @@ -572,7 +572,7 @@ def handle_invite_workspace_submit( }, ) - builders.refresh_home_tab_for_workspace(target_ws, logger, context=context) + builders.refresh_home_tab_for_workspace(target_ws, logger, context=None) builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) @@ -639,16 +639,16 @@ def handle_accept_group_invite( for m in other_members: if not m.workspace_id: continue - partner = helpers.get_workspace_by_id(m.workspace_id) - if not partner or not partner.bot_token or partner.deleted_at: + member_ws = helpers.get_workspace_by_id(m.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at: continue try: - partner_client = WebClient(token=helpers.decrypt_bot_token(partner.bot_token)) + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) helpers.notify_admins_dm( - partner_client, + member_client, f":handshake: *{ws_name}* has joined the group *{group.name}*.", ) - builders.refresh_home_tab_for_workspace(partner, logger, context=context) + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) except Exception as e: _logger.warning(f"Failed to notify group member {m.workspace_id}: {e}") @@ -717,11 +717,11 @@ def handle_decline_group_invite( for m in all_members: if not m.workspace_id: continue - partner = helpers.get_workspace_by_id(m.workspace_id) - if not partner or not partner.bot_token or partner.deleted_at: + member_ws = helpers.get_workspace_by_id(m.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at: continue with contextlib.suppress(Exception): - builders.refresh_home_tab_for_workspace(partner, logger, context=context) + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) def _update_invite_dms( diff --git a/syncbot/handlers/sync.py b/syncbot/handlers/sync.py index 5c11909..708c732 100644 --- a/syncbot/handlers/sync.py +++ b/syncbot/handlers/sync.py @@ -63,7 +63,7 @@ def handle_remove_sync( logger.warning(f"Failed to leave channel {sync_channel_record.channel_id}: {e}") builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) - partner_chs = DbManager.find_records( + other_sync_channels = DbManager.find_records( schemas.SyncChannel, [ schemas.SyncChannel.sync_id == sync_channel_record.sync_id, @@ -71,10 +71,10 @@ def handle_remove_sync( schemas.SyncChannel.workspace_id != workspace_record.id, ], ) - for p_ch in partner_chs: - p_ws = helpers.get_workspace_by_id(p_ch.workspace_id, context=context) - if p_ws: - builders.refresh_home_tab_for_workspace(p_ws, logger, context=context) + for sync_channel in other_sync_channels: + member_ws = helpers.get_workspace_by_id(sync_channel.workspace_id, context=context) + if member_ws: + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) def handle_app_home_opened( @@ -206,7 +206,7 @@ def handle_join_sync_submission( acting_user_id = helpers.safe_get(body, "user", "id") or user_id admin_name, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) - partner_channels: list = [] + other_sync_channels: list = [] try: client.conversations_join(channel=channel_id) channel_sync_record = schemas.SyncChannel( @@ -216,7 +216,7 @@ def handle_join_sync_submission( created_at=datetime.now(UTC), ) DbManager.create_record(channel_sync_record) - partner_channels = DbManager.find_records( + other_sync_channels = DbManager.find_records( schemas.SyncChannel, [ schemas.SyncChannel.sync_id == sync_id, @@ -224,38 +224,38 @@ def handle_join_sync_submission( schemas.SyncChannel.workspace_id != workspace_record.id, ], ) - if partner_channels: - p_ch = partner_channels[0] - p_ws = helpers.get_workspace_by_id(p_ch.workspace_id) - partner_ref = helpers.resolve_channel_name(p_ch.channel_id, p_ws) + if other_sync_channels: + first_channel = other_sync_channels[0] + first_ws = helpers.get_workspace_by_id(first_channel.workspace_id) + channel_ref = helpers.resolve_channel_name(first_channel.channel_id, first_ws) else: - partner_ref = sync_record.title or "the partner channel" + channel_ref = sync_record.title or "the other channel" client.chat_postMessage( channel=channel_id, - text=f":arrows_counterclockwise: *{admin_name}* started syncing this channel with *{partner_ref}*. Messages will be shared automatically.", + text=f":arrows_counterclockwise: *{admin_name}* started syncing this channel with *{channel_ref}*. Messages will be shared automatically.", ) local_ref = helpers.resolve_channel_name(channel_id, workspace_record) - for p_ch in partner_channels: + for sync_channel in other_sync_channels: try: - p_ws = helpers.get_workspace_by_id(p_ch.workspace_id) - if p_ws and p_ws.bot_token: - p_client = WebClient(token=helpers.decrypt_bot_token(p_ws.bot_token)) - p_client.chat_postMessage( - channel=p_ch.channel_id, + member_ws = helpers.get_workspace_by_id(sync_channel.workspace_id) + if member_ws and member_ws.bot_token: + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) + member_client.chat_postMessage( + channel=sync_channel.channel_id, text=f":arrows_counterclockwise: *{admin_label}* started syncing *{local_ref}* with this channel. Messages will be shared automatically.", ) except Exception as exc: - _logger.debug(f"join_sync: failed to notify publisher channel {p_ch.channel_id}: {exc}") + _logger.debug(f"join_sync: failed to notify channel {sync_channel.channel_id}: {exc}") except Exception as e: logger.error(f"Failed to join sync channel {channel_id}: {e}") builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) - for p_ch in partner_channels: - p_ws = helpers.get_workspace_by_id(p_ch.workspace_id, context=context) - if p_ws: - builders.refresh_home_tab_for_workspace(p_ws, logger, context=context) + for sync_channel in other_sync_channels: + member_ws = helpers.get_workspace_by_id(sync_channel.workspace_id, context=context) + if member_ws: + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) def handle_new_sync_submission( @@ -399,3 +399,99 @@ def check_join_sync_channel( title_text="Join Sync", callback_id=actions.CONFIG_JOIN_SYNC_SUMBIT, ) + + +# --------------------------------------------------------------------------- +# Database Reset (gated by ENABLE_DB_RESET) +# --------------------------------------------------------------------------- + +def handle_db_reset( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open a confirmation modal warning the user before a full DB reset.""" + if not constants.ENABLE_DB_RESET: + return + + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + return + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + modal_blocks = [ + orm.SectionBlock( + label=( + ":rotating_light: *This will permanently delete ALL data* :rotating_light:\n\n" + "Every workspace, group, channel sync, user mapping, and federation connection " + "in this database will be erased and the schema will be reinitialized from `init.sql`.\n\n" + "*This action cannot be undone.*" + ), + ).as_form_field(), + ] + + client.views_open( + trigger_id=trigger_id, + view={ + "type": "modal", + "callback_id": actions.CONFIG_DB_RESET_CONFIRM, + "title": {"type": "plain_text", "text": "Reset Database?"}, + "submit": {"type": "plain_text", "text": "Yes, reset everything"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "blocks": modal_blocks, + }, + ) + + +def handle_db_reset_confirm( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Execute the database reset after user confirmed via modal.""" + if not constants.ENABLE_DB_RESET: + return + + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + return + + _logger.critical( + "DB_RESET triggered by user %s — dropping database and reinitializing from init.sql", + user_id, + ) + + from db import drop_and_init_db + drop_and_init_db() + + helpers.clear_all_caches() + + team_id = ( + helpers.safe_get(body, "view", "team_id") + or helpers.safe_get(body, "team", "id") + or helpers.safe_get(body, "team_id") + ) + if team_id and user_id: + try: + client.views_publish( + user_id=user_id, + view={ + "type": "home", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":white_check_mark: *Database has been reset.*\nPlease reinstall the app or re-open this tab to get started fresh.", + }, + } + ], + }, + ) + except Exception as e: + _logger.warning("Failed to publish post-reset Home tab: %s", e) diff --git a/syncbot/handlers/tokens.py b/syncbot/handlers/tokens.py index 08e01b5..397b67d 100644 --- a/syncbot/handlers/tokens.py +++ b/syncbot/handlers/tokens.py @@ -22,7 +22,7 @@ def handle_tokens_revoked( """Handle ``tokens_revoked`` event: a workspace uninstalled the app. Soft-deletes the workspace, its group memberships, and its sync channels. - Notifies partner workspaces in shared groups via admin DMs and channel messages. + Notifies other group member workspaces via admin DMs and channel messages. """ team_id = helpers.safe_get(body, "team_id") if not team_id: @@ -67,10 +67,10 @@ def handle_tokens_revoked( schemas.SyncChannel.deleted_at.is_(None), ], ) - for ch in my_channels: + for sync_channel in my_channels: DbManager.update_records( schemas.SyncChannel, - [schemas.SyncChannel.id == ch.id], + [schemas.SyncChannel.id == sync_channel.id], {schemas.SyncChannel.deleted_at: now, schemas.SyncChannel.status: "paused"}, ) @@ -88,42 +88,42 @@ def handle_tokens_revoked( for m in group_members: if not m.workspace_id or m.workspace_id in notified_ws: continue - partner = helpers.get_workspace_by_id(m.workspace_id) - if not partner or not partner.bot_token or partner.deleted_at: + member_ws = helpers.get_workspace_by_id(m.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at: continue notified_ws.add(m.workspace_id) try: - partner_client = WebClient(token=helpers.decrypt_bot_token(partner.bot_token)) + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) helpers.notify_admins_dm( - partner_client, + member_client, f":double_vertical_bar: *{ws_name}* has uninstalled SyncBot. " f"Syncing has been paused. If they reinstall within {retention_days} days, " "syncing will resume automatically.", ) - partner_channel_ids = [] - for ch in my_channels: + member_channel_ids = [] + for sync_channel in my_channels: sibling_channels = DbManager.find_records( schemas.SyncChannel, [ - schemas.SyncChannel.sync_id == ch.sync_id, + schemas.SyncChannel.sync_id == sync_channel.sync_id, schemas.SyncChannel.workspace_id == m.workspace_id, schemas.SyncChannel.deleted_at.is_(None), ], ) - for sc in sibling_channels: - partner_channel_ids.append(sc.channel_id) + for sibling in sibling_channels: + member_channel_ids.append(sibling.channel_id) - if partner_channel_ids: + if member_channel_ids: helpers.notify_synced_channels( - partner_client, - partner_channel_ids, + member_client, + member_channel_ids, f":double_vertical_bar: Syncing with *{ws_name}* has been paused because they uninstalled the app.", ) except Exception as e: - _logger.warning(f"handle_tokens_revoked: failed to notify partner {m.workspace_id}: {e}") + _logger.warning(f"handle_tokens_revoked: failed to notify member {m.workspace_id}: {e}") _logger.info( "workspace_soft_deleted", diff --git a/syncbot/handlers/users.py b/syncbot/handlers/users.py index 5b31f5c..51c56d0 100644 --- a/syncbot/handlers/users.py +++ b/syncbot/handlers/users.py @@ -89,9 +89,9 @@ def handle_user_profile_changed( members = _get_group_members(group.id) for m in members: if m.workspace_id and m.workspace_id != workspace_record.id and m.workspace_id not in notified_ws: - partner = helpers.get_workspace_by_id(m.workspace_id, context=context) - if partner: - builders.refresh_home_tab_for_workspace(partner, logger, context=context) + member_ws = helpers.get_workspace_by_id(m.workspace_id, context=context) + if member_ws: + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) notified_ws.add(m.workspace_id) _logger.info( @@ -175,27 +175,27 @@ def handle_user_mapping_refresh( for group, _ in _get_groups_for_workspace(workspace_record.id): members.extend(_get_group_members(group.id)) - partner_clients: list[tuple[WebClient, int]] = [] + member_clients: list[tuple[WebClient, int]] = [] for m in members: if not m.workspace_id or m.workspace_id == workspace_record.id: continue try: helpers._CACHE.pop(f"dir_refresh:{m.workspace_id}", None) - partner_ws = helpers.get_workspace_by_id(m.workspace_id, context=context) - if partner_ws and partner_ws.bot_token: - partner_client = WebClient(token=helpers.decrypt_bot_token(partner_ws.bot_token)) - helpers._refresh_user_directory(partner_client, m.workspace_id) - partner_clients.append((partner_client, m.workspace_id)) + member_ws = helpers.get_workspace_by_id(m.workspace_id, context=context) + if member_ws and member_ws.bot_token: + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) + helpers._refresh_user_directory(member_client, m.workspace_id) + member_clients.append((member_client, m.workspace_id)) helpers.seed_user_mappings(m.workspace_id, workspace_record.id, group_id=gid_opt) helpers.seed_user_mappings(workspace_record.id, m.workspace_id, group_id=gid_opt) except Exception: pass helpers.run_auto_match_for_workspace(client, workspace_record.id) - for p_client, p_id in partner_clients: + for member_client, member_ws_id in member_clients: try: - helpers.run_auto_match_for_workspace(p_client, p_id) + helpers.run_auto_match_for_workspace(member_client, member_ws_id) except Exception: pass diff --git a/syncbot/helpers/__init__.py b/syncbot/helpers/__init__.py index a77ab69..4d3d9cb 100644 --- a/syncbot/helpers/__init__.py +++ b/syncbot/helpers/__init__.py @@ -12,6 +12,7 @@ _cache_delete_prefix, _cache_get, _cache_set, + clear_all_caches, ) from helpers.core import ( format_admin_label, @@ -90,6 +91,7 @@ "_cache_delete_prefix", "_cache_get", "_cache_set", + "clear_all_caches", "_get_user_profile", "_normalize_name", "_refresh_user_directory", diff --git a/syncbot/helpers/_cache.py b/syncbot/helpers/_cache.py index f20d665..99c781b 100644 --- a/syncbot/helpers/_cache.py +++ b/syncbot/helpers/_cache.py @@ -36,3 +36,10 @@ def _cache_delete_prefix(prefix: str) -> int: for k in to_remove: _CACHE.pop(k, None) return len(to_remove) + + +def clear_all_caches() -> int: + """Remove every entry from the in-process cache. Returns count removed.""" + count = len(_CACHE) + _CACHE.clear() + return count diff --git a/syncbot/helpers/export_import.py b/syncbot/helpers/export_import.py index 1be5818..6ecffe9 100644 --- a/syncbot/helpers/export_import.py +++ b/syncbot/helpers/export_import.py @@ -233,10 +233,10 @@ def build_migration_export(workspace_id: int, include_source_instance: bool = Tr ], ) groups_data = [] - for m in memberships: - g = DbManager.get_record(schemas.WorkspaceGroup, m.group_id) + for membership in memberships: + g = DbManager.get_record(schemas.WorkspaceGroup, membership.group_id) if g: - groups_data.append({"name": g.name, "role": m.role}) + groups_data.append({"name": g.name, "role": membership.role}) # Syncs that have at least one SyncChannel for W sync_channels_w = DbManager.find_records( @@ -246,7 +246,7 @@ def build_migration_export(workspace_id: int, include_source_instance: bool = Tr schemas.SyncChannel.deleted_at.is_(None), ], ) - sync_ids = {sc.sync_id for sc in sync_channels_w} + sync_ids = {sync_channel.sync_id for sync_channel in sync_channels_w} syncs_data = [] sync_channels_data = [] post_meta_by_key = {} @@ -258,9 +258,9 @@ def build_migration_export(workspace_id: int, include_source_instance: bool = Tr pub_team = None tgt_team = None if sync.publisher_workspace_id: - pw = DbManager.get_record(schemas.Workspace, sync.publisher_workspace_id) - if pw: - pub_team = pw.team_id + publisher_ws = DbManager.get_record(schemas.Workspace, sync.publisher_workspace_id) + if publisher_ws: + pub_team = publisher_ws.team_id if sync.target_workspace_id: tw = DbManager.get_record(schemas.Workspace, sync.target_workspace_id) if tw: @@ -272,20 +272,20 @@ def build_migration_export(workspace_id: int, include_source_instance: bool = Tr "target_team_id": tgt_team, "is_publisher": sync.publisher_workspace_id == workspace_id, }) - for sc in sync_channels_w: - if sc.sync_id != sync_id: + for sync_channel in sync_channels_w: + if sync_channel.sync_id != sync_id: continue sync_channels_data.append({ "sync_title": sync.title, - "channel_id": sc.channel_id, - "status": sc.status or "active", + "channel_id": sync_channel.channel_id, + "status": sync_channel.status or "active", }) - key = f"{sync.title}:{sc.channel_id}" + key = f"{sync.title}:{sync_channel.channel_id}" post_metas = DbManager.find_records( schemas.PostMeta, - [schemas.PostMeta.sync_channel_id == sc.id], + [schemas.PostMeta.sync_channel_id == sync_channel.id], ) - post_meta_by_key[key] = [{"post_id": pm.post_id, "ts": float(pm.ts)} for pm in post_metas] + post_meta_by_key[key] = [{"post_id": post_meta.post_id, "ts": float(post_meta.ts)} for post_meta in post_metas] # user_directory for W ud_records = DbManager.find_records( @@ -416,14 +416,14 @@ def import_migration_data( ], ) now = datetime.now(UTC) - for sc in channels_to_remove: + for sync_channel in channels_to_remove: DbManager.delete_records( schemas.PostMeta, - [schemas.PostMeta.sync_channel_id == sc.id], + [schemas.PostMeta.sync_channel_id == sync_channel.id], ) DbManager.update_records( schemas.SyncChannel, - [schemas.SyncChannel.id == sc.id], + [schemas.SyncChannel.id == sync_channel.id], {schemas.SyncChannel.deleted_at: now}, ) @@ -463,20 +463,20 @@ def import_migration_data( sync_id = title_to_sync.get(sync_title) if not sync_id: continue - new_sc = schemas.SyncChannel( + new_sync_channel = schemas.SyncChannel( sync_id=sync_id, workspace_id=workspace_id, channel_id=channel_id, status=status, created_at=datetime.now(UTC), ) - DbManager.create_record(new_sc) + DbManager.create_record(new_sync_channel) key = f"{sync_title}:{channel_id}" - for pm in post_meta_export.get(key, []): + for post_meta in post_meta_export.get(key, []): DbManager.create_record(schemas.PostMeta( - post_id=pm["post_id"], - sync_channel_id=new_sc.id, - ts=Decimal(str(pm["ts"])), + post_id=post_meta["post_id"], + sync_channel_id=new_sync_channel.id, + ts=Decimal(str(post_meta["ts"])), )) # user_directory for W (replace: remove existing for this workspace then insert) diff --git a/syncbot/helpers/notifications.py b/syncbot/helpers/notifications.py index 11243b2..f8bf5b0 100644 --- a/syncbot/helpers/notifications.py +++ b/syncbot/helpers/notifications.py @@ -200,22 +200,22 @@ def purge_stale_soft_deletes() -> int: schemas.WorkspaceGroupMember.deleted_at.is_(None), ], ) - for m in other_members: - if not m.workspace_id or m.workspace_id in notified_ws: + for member in other_members: + if not member.workspace_id or member.workspace_id in notified_ws: continue - partner = get_workspace_by_id(m.workspace_id) - if not partner or not partner.bot_token or partner.deleted_at is not None: + member_ws = get_workspace_by_id(member.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at is not None: continue - notified_ws.add(m.workspace_id) + notified_ws.add(member.workspace_id) try: - partner_client = WebClient(token=decrypt_bot_token(partner.bot_token)) + member_client = WebClient(token=decrypt_bot_token(member_ws.bot_token)) notify_admins_dm( - partner_client, + member_client, f":wastebasket: *{ws_name}* has been permanently removed " f"after {retention_days} days of inactivity.", ) except Exception as e: - _logger.warning(f"purge: failed to notify partner {m.workspace_id}: {e}") + _logger.warning(f"purge: failed to notify member {member.workspace_id}: {e}") DbManager.delete_records(schemas.Workspace, [schemas.Workspace.id == ws.id]) purged += 1 diff --git a/syncbot/helpers/workspace.py b/syncbot/helpers/workspace.py index d92b106..79c799b 100644 --- a/syncbot/helpers/workspace.py +++ b/syncbot/helpers/workspace.py @@ -217,12 +217,12 @@ def _restore_workspace( schemas.SyncChannel.deleted_at.isnot(None), ], ) - for ch in my_soft_channels: - sync = DbManager.get_record(schemas.Sync, id=ch.sync_id) + for sync_channel in my_soft_channels: + sync = DbManager.get_record(schemas.Sync, id=sync_channel.sync_id) if sync and sync.group_id in restored_group_ids: DbManager.update_records( schemas.SyncChannel, - [schemas.SyncChannel.id == ch.id], + [schemas.SyncChannel.id == sync_channel.id], {schemas.SyncChannel.deleted_at: None, schemas.SyncChannel.status: "active"}, ) @@ -240,23 +240,23 @@ def _restore_workspace( for m in members: if not m.workspace_id or m.workspace_id in notified_ws: continue - partner = get_workspace_by_id(m.workspace_id) - if not partner or not partner.bot_token or partner.deleted_at is not None: + member_ws = get_workspace_by_id(m.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at is not None: continue notified_ws.add(m.workspace_id) try: - partner_client = WebClient(token=decrypt_bot_token(partner.bot_token)) + member_client = WebClient(token=decrypt_bot_token(member_ws.bot_token)) notify_admins_dm( - partner_client, + member_client, f":arrow_forward: *{ws_name}* has been restored. Group syncing will resume.", ) syncs_in_group = DbManager.find_records( schemas.Sync, [schemas.Sync.group_id == group_id], ) - partner_ch_ids = [] + other_channel_ids = [] for sync in syncs_in_group: - partner_channels = DbManager.find_records( + other_sync_channels = DbManager.find_records( schemas.SyncChannel, [ schemas.SyncChannel.sync_id == sync.id, @@ -264,16 +264,16 @@ def _restore_workspace( schemas.SyncChannel.deleted_at.is_(None), ], ) - for sc in partner_channels: - partner_ch_ids.append(sc.channel_id) - if partner_ch_ids: + for sync_channel in other_sync_channels: + other_channel_ids.append(sync_channel.channel_id) + if other_channel_ids: notify_synced_channels( - partner_client, - partner_ch_ids, + member_client, + other_channel_ids, f":arrow_forward: Syncing with *{ws_name}* has been resumed.", ) except Exception as e: - _logger.warning(f"_restore_workspace: failed to notify partner {m.workspace_id}: {e}") + _logger.warning(f"_restore_workspace: failed to notify member {m.workspace_id}: {e}") _logger.info( "workspace_restored", diff --git a/syncbot/routing.py b/syncbot/routing.py index c320b93..439b614 100644 --- a/syncbot/routing.py +++ b/syncbot/routing.py @@ -39,6 +39,7 @@ actions.CONFIG_BACKUP_DOWNLOAD: handlers.handle_backup_download, actions.CONFIG_DATA_MIGRATION: handlers.handle_data_migration, actions.CONFIG_DATA_MIGRATION_EXPORT: handlers.handle_data_migration_export, + actions.CONFIG_DB_RESET: handlers.handle_db_reset, actions.CONFIG_GENERATE_FEDERATION_CODE: handlers.handle_generate_federation_code, actions.CONFIG_ENTER_FEDERATION_CODE: handlers.handle_enter_federation_code, actions.CONFIG_REMOVE_FEDERATION_CONNECTION: handlers.handle_remove_federation_connection, @@ -75,6 +76,7 @@ actions.CONFIG_BACKUP_RESTORE_CONFIRM: handlers.handle_backup_restore_confirm_submit, actions.CONFIG_DATA_MIGRATION_SUBMIT: handlers.handle_data_migration_submit, actions.CONFIG_DATA_MIGRATION_CONFIRM: handlers.handle_data_migration_confirm_submit, + actions.CONFIG_DB_RESET_CONFIRM: handlers.handle_db_reset_confirm, } """View submission ``callback_id`` -> handler.""" diff --git a/syncbot/slack/actions.py b/syncbot/slack/actions.py index 286ce47..1a18c52 100644 --- a/syncbot/slack/actions.py +++ b/syncbot/slack/actions.py @@ -204,3 +204,13 @@ CONFIG_REMOVE_FEDERATION_CONNECTION = "remove_federation_connection" """Action: user clicked "Remove Connection" on an external connection (prefix-matched).""" + +# --------------------------------------------------------------------------- +# Database Reset (dev/admin tool, gated by ENABLE_DB_RESET env var) +# --------------------------------------------------------------------------- + +CONFIG_DB_RESET = "db_reset" +"""Action: user clicked "Reset Database" on the Home tab.""" + +CONFIG_DB_RESET_CONFIRM = "db_reset_confirm" +"""Callback: user confirmed database reset in the warning modal.""" From 0709e0f274e3044c70cd68af1e9935fe8681c269 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Thu, 12 Mar 2026 09:49:35 -0500 Subject: [PATCH 03/45] Added log output level to env. --- .env.example | 6 ++++++ syncbot/logger.py | 11 ++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/.env.example b/.env.example index 9200e78..e497051 100644 --- a/.env.example +++ b/.env.example @@ -68,6 +68,12 @@ ADMIN_DATABASE_SCHEMA=syncbot # storage costs. Set S3_VIDEO_ENABLED=true to store videos in S3 as well. # S3_VIDEO_ENABLED=false +# ----------------------------------------------------------------------------- +# Logging (optional) +# ----------------------------------------------------------------------------- +# Log output level: DEBUG, INFO, WARNING, ERROR, or CRITICAL (default: INFO). +# LOG_LEVEL=INFO + # ----------------------------------------------------------------------------- # Soft-Delete Retention (optional) # ----------------------------------------------------------------------------- diff --git a/syncbot/logger.py b/syncbot/logger.py index 97a1f5b..7cd116e 100644 --- a/syncbot/logger.py +++ b/syncbot/logger.py @@ -183,6 +183,10 @@ def format(self, record: logging.LogRecord) -> str: def configure_logging(level: int = logging.INFO) -> None: """Replace the root logger's handlers with a single structured-JSON handler. + The effective level is determined by the ``LOG_LEVEL`` environment variable + (e.g. ``DEBUG``, ``INFO``, ``WARNING``, ``ERROR``, ``CRITICAL``). If the + variable is unset or invalid the *level* parameter is used as a fallback. + Uses :class:`DevFormatter` (human-readable, colorized) when ``LOCAL_DEVELOPMENT`` is enabled, otherwise :class:`StructuredFormatter` (single-line JSON for CloudWatch). @@ -196,8 +200,13 @@ def configure_logging(level: int = logging.INFO) -> None: return _configured = True + env_level = os.environ.get("LOG_LEVEL", "").strip().upper() + effective_level = getattr(logging, env_level, None) if env_level else None + if not isinstance(effective_level, int): + effective_level = level + root = logging.getLogger() - root.setLevel(level) + root.setLevel(effective_level) # Remove any existing handlers (e.g. Slack Bolt's defaults). for h in list(root.handlers): From b72358f689740839fcea9494b910ebc862c6760e Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Thu, 12 Mar 2026 11:08:19 -0500 Subject: [PATCH 04/45] Debugging and code cleanup. --- syncbot/app.py | 21 +- syncbot/builders/channel_sync.py | 6 +- syncbot/builders/home.py | 59 ++--- syncbot/db/__init__.py | 23 +- syncbot/handlers/__init__.py | 12 +- syncbot/handlers/channel_sync.py | 7 +- syncbot/handlers/export_import.py | 386 +++++++++++++++++++++--------- syncbot/handlers/messages.py | 26 +- syncbot/handlers/sync.py | 53 ++-- syncbot/helpers/export_import.py | 2 +- syncbot/routing.py | 6 +- syncbot/slack/actions.py | 9 + 12 files changed, 413 insertions(+), 197 deletions(-) diff --git a/syncbot/app.py b/syncbot/app.py index 8f5c817..05df11f 100644 --- a/syncbot/app.py +++ b/syncbot/app.py @@ -40,9 +40,19 @@ set_correlation_id, ) from routing import MAIN_MAPPER -from slack.actions import CONFIG_PUBLISH_CHANNEL_SUBMIT, CONFIG_PUBLISH_MODE_SUBMIT +from slack.actions import ( + CONFIG_BACKUP_RESTORE_SUBMIT, + CONFIG_DATA_MIGRATION_SUBMIT, + CONFIG_PUBLISH_CHANNEL_SUBMIT, + CONFIG_PUBLISH_MODE_SUBMIT, +) -_DEFERRED_ACK_VIEWS = frozenset({CONFIG_PUBLISH_MODE_SUBMIT, CONFIG_PUBLISH_CHANNEL_SUBMIT}) +_DEFERRED_ACK_VIEWS = frozenset({ + CONFIG_PUBLISH_MODE_SUBMIT, + CONFIG_PUBLISH_CHANNEL_SUBMIT, + CONFIG_BACKUP_RESTORE_SUBMIT, + CONFIG_DATA_MIGRATION_SUBMIT, +}) """view_submission callback_ids whose handlers control their own ack response.""" _SENSITIVE_KEYS = frozenset({ @@ -153,9 +163,12 @@ def _tracked_ack(*args, **kwargs): run_function = MAIN_MAPPER.get(request_type, {}).get(request_id) if run_function: try: - run_function(body, client, logger, context) + result = run_function(body, client, logger, context) if defer_ack and not ack_called: - ack() + if isinstance(result, dict): + ack(**result) + else: + ack() emit_metric( "request_handled", duration_ms=round(get_request_duration_ms(), 1), diff --git a/syncbot/builders/channel_sync.py b/syncbot/builders/channel_sync.py index c545c7f..c30ca5a 100644 --- a/syncbot/builders/channel_sync.py +++ b/syncbot/builders/channel_sync.py @@ -14,7 +14,7 @@ from db.schemas import PostMeta, Sync, SyncChannel, Workspace, WorkspaceGroup, WorkspaceGroupMember from helpers import safe_get from slack import actions, orm -from slack.blocks import context, section +from slack.blocks import context as block_context, section _logger = logging.getLogger(__name__) @@ -70,7 +70,7 @@ def _build_inline_channel_sync( if not published_syncs and not waiting_syncs and not available_syncs: return - blocks.append(context("*Synced Channels*")) + blocks.append(block_context("*Synced Channels*")) for sync, my_ch, other_chs, is_paused in published_syncs: my_ref = _format_channel_ref(my_ch.channel_id, workspace_record, is_local=True) @@ -118,7 +118,7 @@ def _build_inline_channel_sync( context_parts.append(f"{msg_count} message{'s' if msg_count != 1 else ''} tracked") if context_parts: - blocks.append(context(" · ".join(context_parts))) + blocks.append(block_context(" · ".join(context_parts))) blocks.append( orm.ActionsBlock( elements=[ diff --git a/syncbot/builders/home.py b/syncbot/builders/home.py index a393501..ff1712c 100644 --- a/syncbot/builders/home.py +++ b/syncbot/builders/home.py @@ -162,23 +162,6 @@ def build_home_tab( blocks: list[orm.BaseBlock] = [] - blocks.append(header("SyncBot Configuration")) - blocks.append( - orm.ActionsBlock( - elements=[ - orm.ButtonElement( - label=":arrows_counterclockwise: Refresh", - action=actions.CONFIG_REFRESH_HOME, - ), - orm.ButtonElement( - label=":floppy_disk: Backup/Restore", - action=actions.CONFIG_BACKUP_RESTORE, - ), - ] - ) - ) - blocks.append(divider()) - if not is_admin: blocks.append(block_context(":lock: Only workspace admins and owners can configure SyncBot.")) block_dicts = orm.BlockView(blocks=blocks).as_form_field() @@ -187,6 +170,28 @@ def build_home_tab( client.views_publish(user_id=user_id, view={"type": "home", "blocks": block_dicts}) return None + blocks.append(header("SyncBot Configuration")) + top_buttons = [ + orm.ButtonElement( + label=":arrows_counterclockwise: Refresh", + action=actions.CONFIG_REFRESH_HOME, + ), + orm.ButtonElement( + label=":floppy_disk: Backup/Restore", + action=actions.CONFIG_BACKUP_RESTORE, + ), + ] + if constants.ENABLE_DB_RESET: + top_buttons.append( + orm.ButtonElement( + label=":bomb: Reset Database", + action=actions.CONFIG_DB_RESET, + style="danger", + ), + ) + blocks.append(orm.ActionsBlock(elements=top_buttons)) + blocks.append(divider()) + # Compute hash for admin view so we can update cache after publish (manual or automatic) current_hash = _home_tab_content_hash(workspace_record) @@ -234,22 +239,6 @@ def build_home_tab( if constants.FEDERATION_ENABLED: _build_federation_section(blocks, workspace_record) - # ── Database Reset (dev tool) ───────────────────────────── - if constants.ENABLE_DB_RESET: - blocks.append(divider()) - blocks.append(section(":warning: *Danger Zone*")) - blocks.append(block_context("Reset the database to its initial state. _All data will be permanently lost._")) - blocks.append( - orm.ActionsBlock( - elements=[ - orm.ButtonElement( - label=":bomb: Reset Database", - action=actions.CONFIG_DB_RESET, - style="danger", - ), - ] - ) - ) block_dicts = orm.BlockView(blocks=blocks).as_form_field() if return_blocks: @@ -290,11 +279,11 @@ def _build_pending_invite_section( ws = helpers.get_workspace_by_id(member.workspace_id, context=context) inviter_names.append(helpers.resolve_workspace_name(ws) if ws else f"Workspace {member.workspace_id}") - from_label = f" from {', '.join(inviter_names)}" if inviter_names else "" + inviter_label = ", ".join(inviter_names) if inviter_names else "Another workspace" blocks.append(divider()) blocks.append( - section(f":envelope: *{group.name}*{from_label}\n_You've been invited to join this group_") + section(f":handshake: *{inviter_label}* has invited your workspace to join the group *{group.name}*.") ) blocks.append( orm.ActionsBlock( diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py index 7173023..83480ed 100644 --- a/syncbot/db/__init__.py +++ b/syncbot/db/__init__.py @@ -77,10 +77,9 @@ def drop_and_init_db() -> None: url_no_db, connect_args = _build_base_url(include_schema=False) engine_no_db = create_engine(url_no_db, connect_args=connect_args, pool_pre_ping=True) - with engine_no_db.connect() as conn: + with engine_no_db.begin() as conn: conn.execute(text(f"DROP DATABASE IF EXISTS `{schema}`")) conn.execute(text(f"CREATE DATABASE `{schema}` CHARACTER SET utf8mb4")) - conn.commit() engine_no_db.dispose() @@ -106,11 +105,10 @@ def drop_and_init_db() -> None: combined = " ".join(lines) statements = [s.strip() for s in combined.split(";") if s.strip()] - with engine_with_db.connect() as conn: + with engine_with_db.begin() as conn: for stmt in statements: if stmt: conn.execute(text(stmt)) - conn.commit() engine_with_db.dispose() @@ -320,6 +318,23 @@ def create_record(record: BaseClass, schema=None) -> BaseClass: close_session(session) return record + @staticmethod + @_with_retry + def merge_record(record: BaseClass, schema=None) -> BaseClass: + """Insert or update a record based on its primary key.""" + session = get_session(schema=schema) + try: + merged = session.merge(record) + session.flush() + session.expunge(merged) + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + return merged + @staticmethod @_with_retry def create_records(records: list[BaseClass], schema=None): diff --git a/syncbot/handlers/__init__.py b/syncbot/handlers/__init__.py index a2fae12..76ca0f7 100644 --- a/syncbot/handlers/__init__.py +++ b/syncbot/handlers/__init__.py @@ -25,11 +25,11 @@ from handlers.export_import import ( handle_backup_download, handle_backup_restore, - handle_backup_restore_confirm_submit, + handle_backup_restore_proceed, handle_backup_restore_submit, handle_data_migration, - handle_data_migration_confirm_submit, handle_data_migration_export, + handle_data_migration_proceed, handle_data_migration_submit, ) from handlers.federation_cmds import ( @@ -63,7 +63,7 @@ check_join_sync_channel, handle_app_home_opened, handle_db_reset, - handle_db_reset_confirm, + handle_db_reset_proceed, handle_join_sync_submission, handle_member_joined_channel, handle_new_sync_submission, @@ -91,14 +91,14 @@ "handle_app_home_opened", "handle_backup_download", "handle_backup_restore", - "handle_backup_restore_confirm_submit", + "handle_backup_restore_proceed", "handle_backup_restore_submit", "handle_data_migration", - "handle_data_migration_confirm_submit", + "handle_data_migration_proceed", "handle_data_migration_export", "handle_data_migration_submit", "handle_db_reset", - "handle_db_reset_confirm", + "handle_db_reset_proceed", "handle_accept_group_invite", "handle_create_group", "handle_create_group_submit", diff --git a/syncbot/handlers/channel_sync.py b/syncbot/handlers/channel_sync.py index d31a776..0f30939 100644 --- a/syncbot/handlers/channel_sync.py +++ b/syncbot/handlers/channel_sync.py @@ -467,6 +467,12 @@ def _toggle_sync_status( channel_ws = ws_cache.get(sync_channel.workspace_id) or helpers.get_workspace_by_id(sync_channel.workspace_id) ws_cache[sync_channel.workspace_id] = channel_ws if channel_ws and channel_ws.bot_token: + ws_client = WebClient(token=helpers.decrypt_bot_token(channel_ws.bot_token)) + if target_status == "active": + try: + ws_client.conversations_join(channel=sync_channel.channel_id) + except Exception: + pass name = admin_name if workspace_record and sync_channel.workspace_id == workspace_record.id else admin_label other_channels = [c for c in all_channels if c.workspace_id != sync_channel.workspace_id] if other_channels: @@ -476,7 +482,6 @@ def _toggle_sync_status( msg = f":{emoji}: *{name}* {verb} syncing with *{channel_ref}*." else: msg = f":{emoji}: *{name}* {verb} channel syncing." - ws_client = WebClient(token=helpers.decrypt_bot_token(channel_ws.bot_token)) helpers.notify_synced_channels(ws_client, [sync_channel.channel_id], msg) except Exception as e: _logger.warning(f"Failed to notify channel {sync_channel.channel_id} about {verb}: {e}") diff --git a/syncbot/handlers/export_import.py b/syncbot/handlers/export_import.py index 6998a33..7ab3357 100644 --- a/syncbot/handlers/export_import.py +++ b/syncbot/handlers/export_import.py @@ -7,6 +7,7 @@ from slack_sdk.web import WebClient +import builders import constants import helpers from db import DbManager, schemas @@ -20,10 +21,17 @@ def _is_admin(client: WebClient, user_id: str, body: dict) -> bool: return helpers.is_user_authorized(client, user_id) +def _open_dm_channel(client: WebClient, user_id: str) -> str: + """Open (or reopen) a DM with *user_id* and return the channel ID.""" + resp = client.conversations_open(users=[user_id]) + return resp["channel"]["id"] + + # --------------------------------------------------------------------------- # Backup/Restore # --------------------------------------------------------------------------- + def handle_backup_restore( body: dict, client: WebClient, @@ -40,32 +48,38 @@ def handle_backup_restore( from slack import orm - blocks = [ - orm.SectionBlock(label="*Download backup*\nGenerate a full-instance backup (JSON) and receive it in your DM."), + download_blocks = [ + orm.SectionBlock(label="*Backup*\nSend a JSON backup file as a SyncBot DM."), orm.ActionsBlock( elements=[ orm.ButtonElement( - label=":floppy_disk: Download backup", + label=":floppy_disk: Send Backup File", action=actions.CONFIG_BACKUP_DOWNLOAD, ), ], ), orm.DividerBlock(), orm.SectionBlock( - label="*Restore from backup*\nPaste the backup JSON below. You will be asked to confirm if the encryption key or integrity check does not match.", - ), - orm.InputBlock( - label="Backup JSON", - action=actions.CONFIG_BACKUP_RESTORE_JSON_INPUT, - element=orm.PlainTextInputElement( - placeholder='Paste backup JSON here (e.g. {"version": 1, ...})', - multiline=True, - max_length=3000, - ), + label="*Restore*\nUpload a JSON backup file. The integrity of the file will be checked.", ), ] - view = orm.BlockView(blocks=blocks) + restore_block = { + "type": "input", + "block_id": actions.CONFIG_BACKUP_RESTORE_JSON_INPUT, + "label": {"type": "plain_text", "text": " "}, + "element": { + "type": "file_input", + "action_id": actions.CONFIG_BACKUP_RESTORE_JSON_INPUT, + "filetypes": ["json"], + "max_files": 1, + }, + } + + view = orm.BlockView(blocks=download_blocks) + modal_blocks = view.as_form_field() + modal_blocks.append(restore_block) + client.views_open( trigger_id=trigger_id, view={ @@ -74,7 +88,7 @@ def handle_backup_restore( "title": {"type": "plain_text", "text": "Backup / Restore"}, "submit": {"type": "plain_text", "text": "Restore"}, "close": {"type": "plain_text", "text": "Cancel"}, - "blocks": view.as_form_field(), + "blocks": modal_blocks, }, ) @@ -92,22 +106,37 @@ def handle_backup_download( try: payload = ei.build_full_backup() json_str = json.dumps(payload, default=ei._json_serializer, indent=2) - client.files_upload( + dm_channel = _open_dm_channel(client, user_id) + client.files_upload_v2( content=json_str, filename=f"syncbot-backup-{datetime.now(UTC).strftime('%Y%m%d-%H%M%S')}.json", - channels=user_id, + channel=dm_channel, initial_comment="Your SyncBot full-instance backup. Keep this file secure.", ) except Exception as e: _logger.exception("backup_download failed: %s", e) return - # Optionally update the modal to say "Backup sent to your DM" - response_url = helpers.safe_get(body, "response_url") - if response_url: + + view_id = helpers.safe_get(body, "view", "id") + if view_id: try: - from slack_sdk.webhook import WebhookClient - w = WebhookClient(response_url) - w.send(text=":white_check_mark: Backup sent to your DM.") + client.views_update( + view_id=view_id, + view={ + "type": "modal", + "title": {"type": "plain_text", "text": "Backup / Restore"}, + "close": {"type": "plain_text", "text": "Close"}, + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":white_check_mark: *Backup Sent!*\n\nCheck your SyncBot DMs to download the backup file.", + }, + }, + ], + }, + ) except Exception: pass @@ -124,22 +153,53 @@ def handle_backup_restore_submit( return None values = helpers.safe_get(body, "view", "state", "values") or {} - json_text = "" - for _block_id, block_data in values.items(): - for action_id, action_data in block_data.items(): - if action_id == actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: - json_text = (action_data.get("value") or "").strip() + file_data = helpers.safe_get( + values, actions.CONFIG_BACKUP_RESTORE_JSON_INPUT, actions.CONFIG_BACKUP_RESTORE_JSON_INPUT + ) + files = file_data.get("files") if file_data else None - if not json_text: - return {"response_action": "errors", "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: "Paste backup JSON to restore."}} + if not files: + return { + "response_action": "errors", + "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: "Upload a JSON backup file to restore."}, + } + + file_info = files[0] + file_url = file_info.get("url_private_download") or file_info.get("url_private") + if not file_url: + return { + "response_action": "errors", + "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: "Could not retrieve the uploaded file."}, + } + + try: + import urllib.request + + req = urllib.request.Request(file_url, headers={"Authorization": f"Bearer {client.token}"}) + with urllib.request.urlopen(req) as resp: + json_text = resp.read().decode("utf-8") + except Exception as e: + _logger.exception("backup_restore: failed to download uploaded file: %s", e) + return { + "response_action": "errors", + "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: "Failed to download the uploaded file."}, + } try: data = json.loads(json_text) except json.JSONDecodeError as e: - return {"response_action": "errors", "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: f"Invalid JSON: {e}"}} + return { + "response_action": "errors", + "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: f"Invalid JSON in uploaded file: {e}"}, + } if data.get("version") != ei.BACKUP_VERSION: - return {"response_action": "errors", "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: f"Unsupported backup version (expected {ei.BACKUP_VERSION})."}} + return { + "response_action": "errors", + "errors": { + actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: f"Unsupported backup version (expected {ei.BACKUP_VERSION})." + }, + } hmac_ok = ei.verify_backup_hmac(data) key_ok = ei.verify_backup_encryption_key(data) @@ -147,75 +207,98 @@ def handle_backup_restore_submit( # If warnings needed, store payload in cache and show confirmation modal if not hmac_ok or not key_ok: from helpers._cache import _cache_set + cache_key = f"restore_pending:{user_id}" _cache_set(cache_key, data, ttl=600) return { "response_action": "push", "view": { "type": "modal", - "callback_id": actions.CONFIG_BACKUP_RESTORE_CONFIRM, - "title": {"type": "plain_text", "text": "Confirm restore"}, - "submit": {"type": "plain_text", "text": "Proceed anyway"}, + "title": {"type": "plain_text", "text": "Confirm Restore"}, "close": {"type": "plain_text", "text": "Cancel"}, - "private_metadata": user_id, "blocks": [ { "type": "section", "text": { "type": "mrkdwn", "text": ( - ("*Integrity check failed.* The file may have been modified or could be malicious. Only proceed if you intentionally edited the file.\n\n" if not hmac_ok else "") - + ("*Encryption key mismatch.* Restored bot tokens will not be usable; workspaces must reinstall the app to re-authorize.\n\n" if not key_ok else "") - + "Do you want to proceed with restore anyway?" + ( + "*WARNING: Integrity Check Failed!* The file has been tampered with. Only proceed if you intentionally edited the file.\n\n" + if not hmac_ok + else "" + ) + + ( + "*WARNING: Encryption Key Mismatch!* Restored bot tokens will not be usable. Workspaces will have to reinstall the app.\n\n" + if not key_ok + else "" + ) + + "Do you want to proceed with the restore anyway?" ), }, }, + { + "type": "actions", + "elements": [ + { + "type": "button", + "text": {"type": "plain_text", "text": "Proceed Anyway"}, + "style": "danger", + "action_id": actions.CONFIG_BACKUP_RESTORE_PROCEED, + "value": user_id, + }, + ], + }, ], }, } + context["ack"]() _do_restore(data, client, user_id) return None -def handle_backup_restore_confirm_submit( +def handle_backup_restore_proceed( body: dict, client: WebClient, logger: Logger, context: dict, -) -> dict | None: - """Second-step restore when user confirmed warnings.""" +) -> None: + """Proceed with restore after user clicked the danger button despite warnings.""" user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) if not _is_admin(client, user_id, body): - return None - private_metadata = (helpers.safe_get(body, "view", "private_metadata") or "").strip() - if not private_metadata: - return {"response_action": "errors", "errors": {"": "Missing state."}} + return from helpers._cache import _cache_get - data = _cache_get(f"restore_pending:{private_metadata}") + + data = _cache_get(f"restore_pending:{user_id}") if not data: - return {"response_action": "errors", "errors": {"": "Restore data expired. Please paste the backup JSON again and submit."}} + _logger.warning("backup_restore_proceed: restore data expired for user %s", user_id) + return _do_restore(data, client, user_id) - return None def _do_restore(data: dict, client: WebClient, user_id: str) -> None: - """Run restore and invalidate caches.""" + """Run restore, invalidate caches, and refresh the Home tab for all restored workspaces.""" try: team_ids = ei.restore_full_backup(data, skip_hmac_check=True, skip_encryption_key_check=True) ei.invalidate_home_tab_caches_for_all_teams(team_ids) except Exception as e: _logger.exception("restore failed: %s", e) raise - # Refresh home for user - team_id = helpers.safe_get(client, "team_id") # not on client - # We don't have team_id here easily; the next time user opens Home they'll get fresh data due to cache clear. + + for tid in team_ids: + ws = DbManager.find_records(schemas.Workspace, [schemas.Workspace.team_id == tid]) + if ws: + try: + builders.refresh_home_tab_for_workspace(ws[0], _logger) + except Exception as e: + _logger.warning("_do_restore: failed to refresh home tab for %s: %s", tid, e) # --------------------------------------------------------------------------- # Data Migration # --------------------------------------------------------------------------- + def handle_data_migration( body: dict, client: WebClient, @@ -234,7 +317,7 @@ def handle_data_migration( from slack import orm - blocks = [ + export_blocks = [ orm.SectionBlock( label="*Export*\nDownload your workspace data for migration to another instance. You will receive a JSON file in your DM.", ), @@ -248,20 +331,26 @@ def handle_data_migration( ), orm.DividerBlock(), orm.SectionBlock( - label="*Import*\nPaste a migration file JSON below. Existing sync channels in the federated group will be replaced.", - ), - orm.InputBlock( - label="Migration JSON", - action=actions.CONFIG_DATA_MIGRATION_JSON_INPUT, - element=orm.PlainTextInputElement( - placeholder='Paste migration JSON here (e.g. {"version": 1, "workspace": {...}, ...})', - multiline=True, - max_length=3000, - ), + label="*Import*\nUpload a migration JSON file. Existing sync channels in the federated group will be replaced.", ), ] - view = orm.BlockView(blocks=blocks) + import_block = { + "type": "input", + "block_id": actions.CONFIG_DATA_MIGRATION_JSON_INPUT, + "label": {"type": "plain_text", "text": " "}, + "element": { + "type": "file_input", + "action_id": actions.CONFIG_DATA_MIGRATION_JSON_INPUT, + "filetypes": ["json"], + "max_files": 1, + }, + } + + view = orm.BlockView(blocks=export_blocks) + modal_blocks = view.as_form_field() + modal_blocks.append(import_block) + client.views_open( trigger_id=trigger_id, view={ @@ -270,7 +359,7 @@ def handle_data_migration( "title": {"type": "plain_text", "text": "Data Migration"}, "submit": {"type": "plain_text", "text": "Import"}, "close": {"type": "plain_text", "text": "Cancel"}, - "blocks": view.as_form_field(), + "blocks": modal_blocks, }, ) @@ -294,10 +383,11 @@ def handle_data_migration_export( try: payload = ei.build_migration_export(workspace_record.id, include_source_instance=True) json_str = json.dumps(payload, default=ei._json_serializer, indent=2) - client.files_upload( + dm_channel = _open_dm_channel(client, user_id) + client.files_upload_v2( content=json_str, filename=f"syncbot-migration-{workspace_record.team_id}-{datetime.now(UTC).strftime('%Y%m%d-%H%M%S')}.json", - channels=user_id, + channel=dm_channel, initial_comment="Your SyncBot workspace migration file. Use it on the new instance after connecting via federation.", ) except Exception as e: @@ -319,31 +409,70 @@ def handle_data_migration_submit( return None values = helpers.safe_get(body, "view", "state", "values") or {} - json_text = "" - for _block_id, block_data in values.items(): - for action_id, action_data in block_data.items(): - if action_id == actions.CONFIG_DATA_MIGRATION_JSON_INPUT: - json_text = (action_data.get("value") or "").strip() + file_data = helpers.safe_get( + values, actions.CONFIG_DATA_MIGRATION_JSON_INPUT, actions.CONFIG_DATA_MIGRATION_JSON_INPUT + ) + files = file_data.get("files") if file_data else None - if not json_text: - return {"response_action": "errors", "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Paste migration JSON to import."}} + if not files: + return { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Upload a migration JSON file to import."}, + } + + file_info = files[0] + file_url = file_info.get("url_private_download") or file_info.get("url_private") + if not file_url: + return { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Could not retrieve the uploaded file."}, + } + + try: + import urllib.request + + req = urllib.request.Request(file_url, headers={"Authorization": f"Bearer {client.token}"}) + with urllib.request.urlopen(req) as resp: + json_text = resp.read().decode("utf-8") + except Exception as e: + _logger.exception("data_migration_submit: failed to download uploaded file: %s", e) + return { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Failed to download the uploaded file."}, + } try: data = json.loads(json_text) except json.JSONDecodeError as e: - return {"response_action": "errors", "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: f"Invalid JSON: {e}"}} + return { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: f"Invalid JSON in uploaded file: {e}"}, + } if data.get("version") != ei.MIGRATION_VERSION: - return {"response_action": "errors", "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: f"Unsupported migration version (expected {ei.MIGRATION_VERSION})."}} + return { + "response_action": "errors", + "errors": { + actions.CONFIG_DATA_MIGRATION_JSON_INPUT: f"Unsupported migration version (expected {ei.MIGRATION_VERSION})." + }, + } workspace_payload = data.get("workspace", {}) export_team_id = workspace_payload.get("team_id") if not export_team_id: - return {"response_action": "errors", "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Migration file missing workspace.team_id."}} + return { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Migration file missing workspace.team_id."}, + } workspace_record = helpers.get_workspace_record(team_id, body, context, client) if not workspace_record or workspace_record.team_id != export_team_id: - return {"response_action": "errors", "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "This migration file is for a different workspace. Open the app from the workspace that matches the migration file."}} + return { + "response_action": "errors", + "errors": { + actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "This migration file is for a different workspace. Open the app from the workspace that matches the migration file." + }, + } # Build team_id -> workspace_id on B team_id_to_workspace_id = {workspace_record.team_id: workspace_record.id} @@ -357,6 +486,7 @@ def handle_data_migration_submit( if source and source.get("connection_code"): import secrets from federation import core as federation + result = federation.initiate_federation_connect( source["webhook_url"], source["connection_code"], @@ -395,20 +525,24 @@ def handle_data_migration_submit( created_by_workspace_id=workspace_record.id, ) DbManager.create_record(new_group) - DbManager.create_record(schemas.WorkspaceGroupMember( - group_id=new_group.id, - workspace_id=workspace_record.id, - status="active", - role="creator", - joined_at=now, - )) - DbManager.create_record(schemas.WorkspaceGroupMember( - group_id=new_group.id, - federated_workspace_id=fed_ws.id, - status="active", - role="member", - joined_at=now, - )) + DbManager.create_record( + schemas.WorkspaceGroupMember( + group_id=new_group.id, + workspace_id=workspace_record.id, + status="active", + role="creator", + joined_at=now, + ) + ) + DbManager.create_record( + schemas.WorkspaceGroupMember( + group_id=new_group.id, + federated_workspace_id=fed_ws.id, + status="active", + role="member", + joined_at=now, + ) + ) # Resolve federated group (W + connection to source instance) my_groups = helpers.get_groups_for_workspace(workspace_record.id) @@ -424,28 +558,35 @@ def handle_data_migration_submit( candidate_groups = [fm.group_id for fm in fed_members if fm.group_id in my_group_ids] group_id = candidate_groups[0] if candidate_groups else None if not group_id: - return {"response_action": "errors", "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "No federation connection found. Connect to the other instance first (Enter Connection Code), then import."}} + return { + "response_action": "errors", + "errors": { + actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "No federation connection found. Connect to the other instance first (Enter Connection Code), then import." + }, + } sig_ok = ei.verify_migration_signature(data) if not sig_ok and source: # Store in cache and show confirmation modal (private_metadata size limit) from helpers._cache import _cache_set + cache_key = f"migration_import_pending:{user_id}" - _cache_set(cache_key, { - "data": data, - "group_id": group_id, - "workspace_id": workspace_record.id, - "team_id_to_workspace_id": team_id_to_workspace_id, - }, ttl=600) + _cache_set( + cache_key, + { + "data": data, + "group_id": group_id, + "workspace_id": workspace_record.id, + "team_id_to_workspace_id": team_id_to_workspace_id, + }, + ttl=600, + ) return { "response_action": "push", "view": { "type": "modal", - "callback_id": actions.CONFIG_DATA_MIGRATION_CONFIRM, - "title": {"type": "plain_text", "text": "Confirm import"}, - "submit": {"type": "plain_text", "text": "Proceed anyway"}, + "title": {"type": "plain_text", "text": "Confirm Import"}, "close": {"type": "plain_text", "text": "Cancel"}, - "private_metadata": user_id, "blocks": [ { "type": "section", @@ -454,10 +595,23 @@ def handle_data_migration_submit( "text": "*Integrity check failed.* The file may have been modified or could be malicious. Only proceed if you intentionally edited the file.\n\nProceed with import anyway?", }, }, + { + "type": "actions", + "elements": [ + { + "type": "button", + "text": {"type": "plain_text", "text": "Proceed Anyway"}, + "style": "danger", + "action_id": actions.CONFIG_DATA_MIGRATION_PROCEED, + "value": user_id, + }, + ], + }, ], }, } + context["ack"]() ei.import_migration_data( data, workspace_record.id, @@ -468,35 +622,34 @@ def handle_data_migration_submit( return None -def handle_data_migration_confirm_submit( +def handle_data_migration_proceed( body: dict, client: WebClient, logger: Logger, context: dict, -) -> dict | None: - """Second-step import when user confirmed tampering warning.""" +) -> None: + """Proceed with import after user clicked the danger button despite warnings.""" if not constants.FEDERATION_ENABLED: - return None + return user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) if not _is_admin(client, user_id, body): - return None - private_metadata = (helpers.safe_get(body, "view", "private_metadata") or "").strip() - if not private_metadata: - return {"response_action": "errors", "errors": {"": "Missing state."}} + return from helpers._cache import _cache_get - meta = _cache_get(f"migration_import_pending:{private_metadata}") + + meta = _cache_get(f"migration_import_pending:{user_id}") if not meta: - return {"response_action": "errors", "errors": {"": "Import data expired. Please paste the migration JSON again and submit."}} + _logger.warning("data_migration_proceed: import data expired for user %s", user_id) + return data = meta.get("data") group_id = meta.get("group_id") workspace_id = meta.get("workspace_id") team_id_to_workspace_id = meta.get("team_id_to_workspace_id", {}) if not data or not group_id or not workspace_id: - return {"response_action": "errors", "errors": {"": "Missing import data."}} + return workspace_record = DbManager.get_record(schemas.Workspace, workspace_id) if not workspace_record: - return {"response_action": "errors", "errors": {"": "Workspace not found."}} + return ei.import_migration_data( data, @@ -505,4 +658,3 @@ def handle_data_migration_confirm_submit( team_id_to_workspace_id=team_id_to_workspace_id, ) ei.invalidate_home_tab_caches_for_team(workspace_record.team_id) - return None diff --git a/syncbot/handlers/messages.py b/syncbot/handlers/messages.py index 4195ddf..8a5daf0 100644 --- a/syncbot/handlers/messages.py +++ b/syncbot/handlers/messages.py @@ -157,6 +157,15 @@ def _handle_new_post( sync_records = helpers.get_sync_list(team_id, channel_id) if not sync_records: + any_sync_channel = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + if any_sync_channel: + return if user_id: try: client.chat_postMessage( @@ -581,11 +590,12 @@ def _handle_reaction( ws_name = helpers.resolve_workspace_name(source_ws) if source_ws else None posted_from = f"({ws_name})" if ws_name else "(via SyncBot)" + post_uuid = uuid.uuid4().hex + post_list: list[schemas.PostMeta] = [] + synced = 0 failed = 0 for post_meta, sync_channel, workspace in reacted_records: - if sync_channel.channel_id == channel_id: - continue try: if fed_ws and workspace.id != source_workspace_id: payload = federation.build_reaction_payload( @@ -611,23 +621,21 @@ def _handle_reaction( display_name = target_display_name or user_name or user_id or "Someone" permalink = None - is_thread_reply = False try: plink_resp = target_client.chat_getPermalink( channel=sync_channel.channel_id, message_ts=target_msg_ts, ) permalink = helpers.safe_get(plink_resp, "permalink") - is_thread_reply = permalink and "thread_ts=" in permalink except Exception: pass - if is_thread_reply and permalink: + if permalink: msg_text = f"reacted with :{reaction}: to <{permalink}|this message>" else: msg_text = f"reacted with :{reaction}:" - target_client.chat_postMessage( + resp = target_client.chat_postMessage( channel=sync_channel.channel_id, text=msg_text, username=f"{display_name} {posted_from}", @@ -636,11 +644,17 @@ def _handle_reaction( unfurl_links=False, unfurl_media=False, ) + ts = helpers.safe_get(resp, "ts") + if ts: + post_list.append(schemas.PostMeta(post_id=post_uuid, sync_channel_id=sync_channel.id, ts=float(ts))) synced += 1 except Exception as exc: failed += 1 _logger.error(f"Failed to sync reaction to channel {sync_channel.channel_id}: {exc}") + if post_list: + DbManager.create_records(post_list) + emit_metric("messages_synced", value=synced, sync_type="reaction_add") if failed: emit_metric("sync_failures", value=failed, sync_type="reaction_add") diff --git a/syncbot/handlers/sync.py b/syncbot/handlers/sync.py index 708c732..94af8d0 100644 --- a/syncbot/handlers/sync.py +++ b/syncbot/handlers/sync.py @@ -343,8 +343,14 @@ def handle_member_joined_channel( if user_id != own_user_id: return - sync_records = helpers.get_sync_list(team_id, channel_id) - if sync_records: + any_sync_channel = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + if any_sync_channel: return try: @@ -405,6 +411,7 @@ def check_join_sync_channel( # Database Reset (gated by ENABLE_DB_RESET) # --------------------------------------------------------------------------- + def handle_db_reset( body: dict, client: WebClient, @@ -423,31 +430,42 @@ def handle_db_reset( if not trigger_id: return - modal_blocks = [ - orm.SectionBlock( - label=( - ":rotating_light: *This will permanently delete ALL data* :rotating_light:\n\n" - "Every workspace, group, channel sync, user mapping, and federation connection " - "in this database will be erased and the schema will be reinitialized from `init.sql`.\n\n" - "*This action cannot be undone.*" - ), - ).as_form_field(), - ] - client.views_open( trigger_id=trigger_id, view={ "type": "modal", - "callback_id": actions.CONFIG_DB_RESET_CONFIRM, "title": {"type": "plain_text", "text": "Reset Database?"}, - "submit": {"type": "plain_text", "text": "Yes, reset everything"}, "close": {"type": "plain_text", "text": "Cancel"}, - "blocks": modal_blocks, + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ( + ":rotating_light: *This will permanently delete ALL data* :rotating_light:\n\n" + "Every workspace, group, channel sync, user mapping, and federation connection " + "in this database will be erased and the schema will be reinitialized from `init.sql`.\n\n" + "*This action cannot be undone.*" + ), + }, + }, + { + "type": "actions", + "elements": [ + { + "type": "button", + "text": {"type": "plain_text", "text": "Yes, Reset Everything!"}, + "style": "danger", + "action_id": actions.CONFIG_DB_RESET_PROCEED, + }, + ], + }, + ], }, ) -def handle_db_reset_confirm( +def handle_db_reset_proceed( body: dict, client: WebClient, logger: Logger, @@ -467,6 +485,7 @@ def handle_db_reset_confirm( ) from db import drop_and_init_db + drop_and_init_db() helpers.clear_all_caches() diff --git a/syncbot/helpers/export_import.py b/syncbot/helpers/export_import.py index 6ecffe9..9d1f611 100644 --- a/syncbot/helpers/export_import.py +++ b/syncbot/helpers/export_import.py @@ -181,7 +181,7 @@ def restore_full_backup( else: kwargs[k] = v rec = cls(**kwargs) - DbManager.create_record(rec) + DbManager.merge_record(rec) if table_name == "workspaces" and rec.team_id: team_ids.append(rec.team_id) return team_ids diff --git a/syncbot/routing.py b/syncbot/routing.py index 439b614..6eed870 100644 --- a/syncbot/routing.py +++ b/syncbot/routing.py @@ -37,9 +37,12 @@ actions.CONFIG_REFRESH_HOME: handlers.handle_refresh_home, actions.CONFIG_BACKUP_RESTORE: handlers.handle_backup_restore, actions.CONFIG_BACKUP_DOWNLOAD: handlers.handle_backup_download, + actions.CONFIG_BACKUP_RESTORE_PROCEED: handlers.handle_backup_restore_proceed, actions.CONFIG_DATA_MIGRATION: handlers.handle_data_migration, actions.CONFIG_DATA_MIGRATION_EXPORT: handlers.handle_data_migration_export, + actions.CONFIG_DATA_MIGRATION_PROCEED: handlers.handle_data_migration_proceed, actions.CONFIG_DB_RESET: handlers.handle_db_reset, + actions.CONFIG_DB_RESET_PROCEED: handlers.handle_db_reset_proceed, actions.CONFIG_GENERATE_FEDERATION_CODE: handlers.handle_generate_federation_code, actions.CONFIG_ENTER_FEDERATION_CODE: handlers.handle_enter_federation_code, actions.CONFIG_REMOVE_FEDERATION_CONNECTION: handlers.handle_remove_federation_connection, @@ -73,10 +76,7 @@ actions.CONFIG_FEDERATION_CODE_SUBMIT: handlers.handle_federation_code_submit, actions.CONFIG_FEDERATION_LABEL_SUBMIT: handlers.handle_federation_label_submit, actions.CONFIG_BACKUP_RESTORE_SUBMIT: handlers.handle_backup_restore_submit, - actions.CONFIG_BACKUP_RESTORE_CONFIRM: handlers.handle_backup_restore_confirm_submit, actions.CONFIG_DATA_MIGRATION_SUBMIT: handlers.handle_data_migration_submit, - actions.CONFIG_DATA_MIGRATION_CONFIRM: handlers.handle_data_migration_confirm_submit, - actions.CONFIG_DB_RESET_CONFIRM: handlers.handle_db_reset_confirm, } """View submission ``callback_id`` -> handler.""" diff --git a/syncbot/slack/actions.py b/syncbot/slack/actions.py index 1a18c52..9dd9457 100644 --- a/syncbot/slack/actions.py +++ b/syncbot/slack/actions.py @@ -159,6 +159,9 @@ CONFIG_BACKUP_RESTORE_CONFIRM = "backup_restore_confirm" """Callback: Confirm restore when HMAC or encryption key mismatch.""" +CONFIG_BACKUP_RESTORE_PROCEED = "backup_restore_proceed" +"""Action: danger button to proceed with restore despite warnings.""" + CONFIG_BACKUP_DOWNLOAD = "backup_download" """Action: user clicked Download backup in Backup/Restore modal.""" @@ -174,6 +177,9 @@ CONFIG_DATA_MIGRATION_CONFIRM = "data_migration_confirm" """Callback: Confirm import when signature check failed.""" +CONFIG_DATA_MIGRATION_PROCEED = "data_migration_proceed" +"""Action: danger button to proceed with import despite warnings.""" + CONFIG_DATA_MIGRATION_EXPORT = "data_migration_export" """Action: user clicked Export in Data Migration modal.""" @@ -213,4 +219,7 @@ """Action: user clicked "Reset Database" on the Home tab.""" CONFIG_DB_RESET_CONFIRM = "db_reset_confirm" + +CONFIG_DB_RESET_PROCEED = "db_reset_proceed" +"""Action: danger button to confirm database reset.""" """Callback: user confirmed database reset in the warning modal.""" From d09ac9605d49cd56002095f5d47b8b51e92785c4 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 13 Mar 2026 16:00:15 -0500 Subject: [PATCH 05/45] Removed S3 runtime dependency, improved verbiage and modal flows. Using Slack direct file upload for pictures and videos. Changed OAuth to use database instead of S3. General verbiage and modal cleanup. --- .env.example | 27 +--- ARCHITECTURE.md | 17 +-- IMPROVEMENTS.md | 22 +-- README.md | 9 +- db/init.sql | 60 ++++++++ docker-compose.yml | 2 - docs/DEPLOYMENT.md | 14 +- docs/USER_GUIDE.md | 2 +- pyproject.toml | 2 - syncbot/builders/__init__.py | 2 - syncbot/builders/channel_sync.py | 81 ++++++----- syncbot/builders/home.py | 223 ++++++++++++++++++------------ syncbot/builders/user_mapping.py | 45 ++++-- syncbot/constants.py | 29 ++-- syncbot/db/schemas.py | 8 +- syncbot/handlers/channel_sync.py | 148 +++++++++++--------- syncbot/handlers/export_import.py | 14 +- syncbot/handlers/groups.py | 78 +++++++---- syncbot/handlers/messages.py | 41 ++---- syncbot/handlers/sync.py | 72 +++++++--- syncbot/handlers/tokens.py | 2 +- syncbot/handlers/users.py | 7 +- syncbot/helpers/__init__.py | 6 +- syncbot/helpers/core.py | 25 +++- syncbot/helpers/export_import.py | 52 ++++++- syncbot/helpers/files.py | 75 ---------- syncbot/helpers/oauth.py | 75 +++++----- syncbot/helpers/refresh.py | 14 ++ syncbot/slack/actions.py | 2 +- syncbot/slack/forms.py | 18 +-- template.yaml | 156 +-------------------- tests/test_oauth.py | 77 +++++++++++ 32 files changed, 737 insertions(+), 668 deletions(-) create mode 100644 tests/test_oauth.py diff --git a/.env.example b/.env.example index e497051..f4a3e62 100644 --- a/.env.example +++ b/.env.example @@ -15,10 +15,10 @@ ADMIN_DATABASE_USER=root ADMIN_DATABASE_PASSWORD=rootpass ADMIN_DATABASE_SCHEMA=syncbot -# When true, a "Reset Database" button appears on the Home tab. -# Clicking it opens a confirmation modal, then drops and reinitializes the DB -# from db/init.sql. All data is lost. Only for local/dev use. -# ENABLE_DB_RESET=false +# When set to a Slack Team ID, the "Reset Database" button will be avaialbe +# on the Home tab for that team. Clicking it opens a confirmation modal, +# then drops and reinitializes the DB from db/init.sql. All data is lost. +# ENABLE_DB_RESET= # ----------------------------------------------------------------------------- # Local Development Mode @@ -36,8 +36,7 @@ ADMIN_DATABASE_SCHEMA=syncbot # ENV_SLACK_CLIENT_ID=your-client-id # ENV_SLACK_CLIENT_SECRET=your-client-secret # ENV_SLACK_SCOPES=app_mentions:read,channels:history,channels:join,channels:manage,channels:read,chat:write,chat:write.customize,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email -# ENV_SLACK_STATE_S3_BUCKET_NAME=syncbot-state -# ENV_SLACK_INSTALLATION_S3_BUCKET_NAME=syncbot-installations +# OAuth state and installation data are stored in the same MySQL database. # ----------------------------------------------------------------------------- # Encryption (optional) @@ -52,22 +51,6 @@ ADMIN_DATABASE_SCHEMA=syncbot # Set to "false" to allow all users to configure syncs (default: true). # REQUIRE_ADMIN=true -# ----------------------------------------------------------------------------- -# S3 File Storage (optional) -# ----------------------------------------------------------------------------- -# When S3_IMAGE_BUCKET is set, images are uploaded to S3 and referenced by URL. -# When not set, images and videos are re-uploaded directly to each synced -# Slack channel (no external storage required). -# -# AWS_ACCESS_KEY_ID=your-key -# AWS_SECRET_ACCESS_KEY=your-secret -# S3_IMAGE_BUCKET=syncbot-images -# S3_IMAGE_URL=https://syncbot-images.s3.amazonaws.com/ -# -# By default, videos are always posted directly to Slack to avoid large S3 -# storage costs. Set S3_VIDEO_ENABLED=true to store videos in S3 as well. -# S3_VIDEO_ENABLED=false - # ----------------------------------------------------------------------------- # Logging (optional) # ----------------------------------------------------------------------------- diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index 1689921..1307d54 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -26,7 +26,6 @@ sequenceDiagram participant AG as API Gateway participant L as Lambda (SyncBot) participant DB as RDS MySQL - participant S3 as S3 (Images) participant SB as Slack API (Workspace B) U->>S: Posts message in #general @@ -37,10 +36,10 @@ sequenceDiagram L->>DB: Look up sync group for channel DB-->>L: SyncChannel + Workspace records - alt Message has images (streamed with size cap) - L->>S: Download image via URL - S-->>L: Image bytes (streaming) - L->>S3: Upload (with HEIC→PNG conversion) + alt Message has images or files (streamed with size cap) + L->>S: Download file via URL + S-->>L: File bytes (streaming) + L->>SB: files_upload_v2 (direct upload to each target channel) end L->>S: users.info (resolve sender) @@ -82,12 +81,6 @@ flowchart TB FED["federation/"] end - subgraph Storage["S3 Buckets"] - S1["OAuth State
(1-day TTL)"] - S2["Installations
(versioned)"] - S3["Images
(90-day TTL, public read)"] - end - subgraph Database["RDS MySQL"] T1["workspaces"] T2["workspace_groups"] @@ -114,8 +107,6 @@ flowchart TB HAND --> HELP HAND --> BUILD HELP --> FED - HELP --> S1 & S2 - HELP --> S3 HELP -->|SQLAlchemy
QueuePool + retry| Database EB -->|ScheduleV2| Lambda Lambda -.->|logs & metrics| Monitoring diff --git a/IMPROVEMENTS.md b/IMPROVEMENTS.md index 872e690..e47a6f4 100644 --- a/IMPROVEMENTS.md +++ b/IMPROVEMENTS.md @@ -10,7 +10,7 @@ This document outlines the improvements made to the SyncBot application and addi - **Improved error handling** in database operations ### 2. Code Quality Improvements -- **Removed duplicate constant definitions** in `constants.py` (SLACK_STATE_S3_BUCKET_NAME, SLACK_INSTALLATION_S3_BUCKET_NAME, etc. were defined twice) +- **Removed duplicate constant definitions** in `constants.py` where env-var names were defined twice - **Fixed type hints**: - `get_request_type()` now correctly returns `tuple[str, str]` instead of `tuple[str]` - `apply_mentioned_users()` now correctly returns `str` instead of `List[Dict]` @@ -99,7 +99,7 @@ This document outlines the improvements made to the SyncBot application and addi - **Connection pooling** reuses DB connections across invocations in warm Lambda containers ### 15. Infrastructure as Code -- **AWS SAM template** (`template.yaml`) defining complete VPC, RDS, S3, Lambda, API Gateway stack +- **AWS SAM template** (`template.yaml`) defining VPC, RDS, Lambda, API Gateway (SAM artifact S3 used for deploy packaging only) - **Free-tier optimized** (128 MB Lambda, db.t3.micro RDS, gp2 storage, no NAT Gateway) - **CI/CD pipeline** (`.github/workflows/sam-pipeline.yml`) for automated build/deploy - **SAM config** (`samconfig.toml`) for staging and production environments @@ -138,7 +138,7 @@ This document outlines the improvements made to the SyncBot application and addi ### 19. Architecture Diagrams (Low Priority - Completed) - **Added message sync flow sequence diagram** (Mermaid) to README showing the full request path from user message through API Gateway, Lambda, DB lookup, image upload, mention re-mapping, cross-workspace posting, and metric emission -- **Added AWS infrastructure diagram** (Mermaid) to README showing the relationships between API Gateway, Lambda, S3 buckets, RDS, EventBridge keep-warm, and CloudWatch monitoring +- **Added AWS infrastructure diagram** (Mermaid) to ARCHITECTURE.md showing API Gateway, Lambda, RDS, EventBridge keep-warm, and CloudWatch monitoring ### 20. Admin Authorization and Security Hardening (Completed) - **Added admin/owner authorization** — only workspace admins and owners can run `/config-syncbot` and all related configuration actions (create sync, join sync, remove sync) @@ -265,7 +265,7 @@ This document outlines the improvements made to the SyncBot application and addi - `GET /api/federation/ping` — health check / connectivity test - **Transparent message forwarding** — the core message handlers (`_handle_new_post`, `_handle_thread_reply`, `_handle_message_edit`, `_handle_message_delete`) detect whether a sync target is local or remote and dispatch accordingly — local channels are posted to directly, remote channels are forwarded via the federation webhook - **User directory exchange** — when a connection is established, both instances exchange their user directories so @mention resolution works across instances -- **Image handling** — images use existing S3 URLs which are publicly accessible; the receiving instance uses them directly in Slack blocks +- **Image handling** — images are forwarded as file uploads or public URLs; the receiving instance uses them in Slack blocks - **Retry with exponential backoff** — all outgoing federation HTTP calls retry up to 3 times with 1s/2s/4s backoff on transient failures (5xx, timeouts, connection errors) - **Home tab UI** — "External Connections" section on the Home tab with "Generate Connection Code" and "Enter Connection Code" buttons, active connection display with status and remove button, and pending code display with cancel button - **Connection label prompt** — generating a connection code prompts for a friendly name (e.g. "East Coast SyncBot") which is displayed on the Home tab and used as the remote workspace's display name @@ -289,12 +289,10 @@ This document outlines the improvements made to the SyncBot application and addi - **Slack GIF picker support** — GIFs sent via Slack's built-in `/giphy` picker or GIPHY integration are detected and synced - **Nested block parsing** — `_build_file_context` extracts `image_url` from nested `image` blocks within `attachments`, which is how Slack structures GIF picker messages - **Direct ImageBlock posting** — GIFs are always posted as `ImageBlock` elements via `chat.postMessage` using their public URLs, ensuring a proper message `ts` is captured for `PostMeta` (enabling reactions on GIFs) -- **No S3 required** — GIF URLs are already publicly accessible; no download or S3 upload needed +- **GIF sync** — GIF URLs are publicly accessible and posted as image blocks; no file download needed ### 32. Video & Image Direct Upload (Completed) -- **S3 is now optional** — images and videos can be synced without S3 by using Slack's `files_upload_v2` directly -- **`S3_IMAGE_BUCKET` defaults to empty** — when not set, all media is uploaded directly to target channels -- **`S3_VIDEO_ENABLED` env var** — when `true` and S3 is configured, videos are also stored in S3; when `false` (default), videos always use direct upload regardless of S3 configuration +- **Direct upload only** — images and videos are synced via Slack's `files_upload_v2` (no S3); media is downloaded from the source and uploaded to each target channel - **User attribution** — direct uploads include "Shared by User (Workspace)" in the `initial_comment` - **Fallback text** — `post_message` supports a `fallback_text` argument for messages that contain only blocks (no text), satisfying Slack's accessibility requirements @@ -365,7 +363,7 @@ This document outlines the improvements made to the SyncBot application and addi - **Performance — `DbManager.count_records()`**: Added `SELECT COUNT(*)` method and replaced `len(find_records(...))` calls that were fetching all rows just to count them - **Performance — module-level constants**: Moved `_PREFIXED_ACTIONS` tuple to module scope (avoids rebuilding on every request); cached `GetDBClass` column keys in a class-level `frozenset` - **DoS — file download streaming**: All `requests.get` calls for files now use `stream=True` with 30s timeout, 8 KB chunks, and a 100 MB size cap -- **DoS — S3 client reuse**: `_get_s3_client()` creates the boto3 client once instead of per-file inside upload loops +- **Media path** — single direct-upload path (download from Slack, re-upload via `files_upload_v2`); no runtime S3 or boto3 - **DoS — input caps**: File attachments capped at 20 per event, mentions at 50 per message, federation user ingestion at 5,000 per request, federation images at 10 per message - **DoS — federation body limit**: Local dev federation HTTP server enforces 1 MB max request body - **DoS — connection pool safety**: `GLOBAL_ENGINE.dispose()` now only fires after all retries are exhausted, not on every transient failure (prevents disrupting other in-flight queries) @@ -437,6 +435,12 @@ This document outlines the improvements made to the SyncBot application and addi - **README restructured** — Reduced README from ~580 lines to ~220 lines, keeping only install/deploy/run instructions. Moved end-user guide, backup/migration, CI/CD, shared infrastructure, and API reference into `docs/` folder (`USER_GUIDE.md`, `BACKUP_AND_MIGRATION.md`, `DEPLOYMENT.md`, `API_REFERENCE.md`). - **Documentation consistency** — Updated `IMPROVEMENTS.md` and all doc files to use new domain terminology (group members instead of partners). +### 47. OAuth on MySQL; Remove Runtime S3 and HEIC (Completed) +- **OAuth in RDS** — Slack OAuth state and installation data are stored in the same MySQL database via `SQLAlchemyInstallationStore` and `SQLAlchemyOAuthStateStore`. One code path for local dev and production; no file-based or S3-backed OAuth stores. +- **No runtime S3** — Removed all runtime S3 usage: OAuth buckets and image bucket resources, Lambda S3 policies, and env vars. Media is uploaded directly to each target Slack channel via `files_upload_v2`. SAM deploy still uses an S3 artifact bucket for packaging only. +- **HEIC and Pillow removed** — HEIC-to-PNG conversion and `upload_photos` (S3) were removed; direct upload is the only media path. Dropped `pillow` and `pillow-heif` from dependencies. +- **Template and docs** — `template.yaml` no longer creates OAuth or image buckets; README, DEPLOYMENT, ARCHITECTURE, USER_GUIDE, `.env.example`, and IMPROVEMENTS updated to describe MySQL OAuth and artifact-bucket-only S3. + ## Remaining Recommendations ### Low Priority diff --git a/README.md b/README.md index 6e5db8c..dfbd88a 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,8 @@ SyncBot ships with a full AWS SAM template (`template.yaml`) that provisions eve | Compute | Lambda (128 MB) | 1M requests/month free | | API | API Gateway v1 | 1M calls/month free | | Database | RDS MySQL (db.t3.micro) | 750 hrs/month free (12 months) | -| Storage | S3 (3 buckets) | 5 GB free | + +OAuth and app data are stored in RDS. Media is uploaded directly to Slack (no runtime S3). SAM deploy uses an S3 artifact bucket for packaging only. ### Prerequisites @@ -227,10 +228,10 @@ See [`.env.example`](.env.example) for all available options with descriptions. | `ENV_SLACK_CLIENT_ID` | OAuth client ID | | `ENV_SLACK_CLIENT_SECRET` | OAuth client secret | | `ENV_SLACK_SCOPES` | Comma-separated OAuth scopes | -| `ENV_SLACK_STATE_S3_BUCKET_NAME` | S3 bucket for OAuth state | -| `ENV_SLACK_INSTALLATION_S3_BUCKET_NAME` | S3 bucket for installations | | `PASSWORD_ENCRYPT_KEY` | Passphrase for Fernet bot-token encryption | +OAuth state and installation data are stored in the same RDS MySQL database. + ### Local Development Only | Variable | Description | @@ -243,8 +244,6 @@ See [`.env.example`](.env.example) for all available options with descriptions. | Variable | Default | Description | |----------|---------|-------------| | `REQUIRE_ADMIN` | `true` | Only admins/owners can configure syncs | -| `S3_IMAGE_BUCKET` | *(empty)* | S3 bucket for synced images | -| `S3_VIDEO_ENABLED` | `false` | Store videos in S3 (when bucket is set) | | `SOFT_DELETE_RETENTION_DAYS` | `30` | Days before soft-deleted data is purged | | `SYNCBOT_FEDERATION_ENABLED` | `false` | Enable External Connections | | `SYNCBOT_PUBLIC_URL` | *(none)* | Public URL for external connections | diff --git a/db/init.sql b/db/init.sql index bfad7e3..6575d53 100644 --- a/db/init.sql +++ b/db/init.sql @@ -53,11 +53,15 @@ CREATE TABLE IF NOT EXISTS workspace_group_members ( joined_at DATETIME DEFAULT NULL, deleted_at DATETIME DEFAULT NULL, dm_messages TEXT DEFAULT NULL, + invited_by_slack_user_id VARCHAR(32) DEFAULT NULL, + invited_by_workspace_id INT DEFAULT NULL, FOREIGN KEY (group_id) REFERENCES workspace_groups(id) ON DELETE CASCADE, FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE, FOREIGN KEY (federated_workspace_id) REFERENCES federated_workspaces(id) ON DELETE SET NULL, + FOREIGN KEY (invited_by_workspace_id) REFERENCES workspaces(id) ON DELETE SET NULL, UNIQUE KEY uq_group_workspace (group_id, workspace_id) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; +-- If upgrading an existing DB, run: ALTER TABLE workspace_group_members ADD COLUMN invited_by_slack_user_id VARCHAR(32) DEFAULT NULL, ADD COLUMN invited_by_workspace_id INT DEFAULT NULL, ADD CONSTRAINT fk_wgm_invited_by FOREIGN KEY (invited_by_workspace_id) REFERENCES workspaces(id) ON DELETE SET NULL; CREATE TABLE IF NOT EXISTS syncs ( id INT AUTO_INCREMENT PRIMARY KEY, @@ -122,6 +126,60 @@ CREATE TABLE IF NOT EXISTS user_mappings ( UNIQUE KEY uq_source_target (source_workspace_id, source_user_id, target_workspace_id) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; +-- Slack OAuth persistence tables (for SQLAlchemyInstallationStore / SQLAlchemyOAuthStateStore) +CREATE TABLE IF NOT EXISTS slack_bots ( + id INT AUTO_INCREMENT PRIMARY KEY, + client_id VARCHAR(32) NOT NULL, + app_id VARCHAR(32) NOT NULL, + enterprise_id VARCHAR(32) DEFAULT NULL, + enterprise_name VARCHAR(200) DEFAULT NULL, + team_id VARCHAR(32) DEFAULT NULL, + team_name VARCHAR(200) DEFAULT NULL, + bot_token VARCHAR(200) DEFAULT NULL, + bot_id VARCHAR(32) DEFAULT NULL, + bot_user_id VARCHAR(32) DEFAULT NULL, + bot_scopes VARCHAR(1000) DEFAULT NULL, + bot_refresh_token VARCHAR(200) DEFAULT NULL, + bot_token_expires_at DATETIME DEFAULT NULL, + is_enterprise_install BOOLEAN NOT NULL DEFAULT FALSE, + installed_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +CREATE TABLE IF NOT EXISTS slack_installations ( + id INT AUTO_INCREMENT PRIMARY KEY, + client_id VARCHAR(32) NOT NULL, + app_id VARCHAR(32) NOT NULL, + enterprise_id VARCHAR(32) DEFAULT NULL, + enterprise_name VARCHAR(200) DEFAULT NULL, + enterprise_url VARCHAR(200) DEFAULT NULL, + team_id VARCHAR(32) DEFAULT NULL, + team_name VARCHAR(200) DEFAULT NULL, + bot_token VARCHAR(200) DEFAULT NULL, + bot_id VARCHAR(32) DEFAULT NULL, + bot_user_id VARCHAR(32) DEFAULT NULL, + bot_scopes VARCHAR(1000) DEFAULT NULL, + bot_refresh_token VARCHAR(200) DEFAULT NULL, + bot_token_expires_at DATETIME DEFAULT NULL, + user_id VARCHAR(32) NOT NULL, + user_token VARCHAR(200) DEFAULT NULL, + user_scopes VARCHAR(1000) DEFAULT NULL, + user_refresh_token VARCHAR(200) DEFAULT NULL, + user_token_expires_at DATETIME DEFAULT NULL, + incoming_webhook_url VARCHAR(200) DEFAULT NULL, + incoming_webhook_channel VARCHAR(200) DEFAULT NULL, + incoming_webhook_channel_id VARCHAR(200) DEFAULT NULL, + incoming_webhook_configuration_url VARCHAR(200) DEFAULT NULL, + is_enterprise_install BOOLEAN NOT NULL DEFAULT FALSE, + token_type VARCHAR(32) DEFAULT NULL, + installed_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +CREATE TABLE IF NOT EXISTS slack_oauth_states ( + id INT AUTO_INCREMENT PRIMARY KEY, + state VARCHAR(200) NOT NULL, + expire_at DATETIME NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + -- Indexes CREATE INDEX idx_sync_channels_channel_id ON sync_channels(channel_id); CREATE INDEX idx_sync_channels_sync_id ON sync_channels(sync_id); @@ -137,3 +195,5 @@ CREATE INDEX idx_groups_code ON workspace_groups(invite_code, status); CREATE INDEX idx_group_members_group ON workspace_group_members(group_id, status); CREATE INDEX idx_group_members_workspace ON workspace_group_members(workspace_id, status); CREATE INDEX idx_syncs_group ON syncs(group_id); +CREATE INDEX slack_bots_idx ON slack_bots(client_id, enterprise_id, team_id, installed_at); +CREATE INDEX slack_installations_idx ON slack_installations(client_id, enterprise_id, team_id, user_id, installed_at); diff --git a/docker-compose.yml b/docker-compose.yml index cbc2780..b93932d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -34,8 +34,6 @@ services: # Optional PASSWORD_ENCRYPT_KEY: ${PASSWORD_ENCRYPT_KEY:-123} REQUIRE_ADMIN: ${REQUIRE_ADMIN:-true} - AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-} - AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-} volumes: - ./syncbot:/app/syncbot diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 72f8d61..87e19ff 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -7,17 +7,13 @@ If you run multiple apps in the same AWS account, you can point SyncBot at exist | Parameter | What it skips | |-----------|---------------| | `ExistingDatabaseHost` | VPC, subnets, security groups, RDS instance | -| `ExistingSlackStateBucket` | Slack OAuth state S3 bucket | -| `ExistingInstallationBucket` | Slack installation data S3 bucket | -| `ExistingImagesBucket` | Synced-images S3 bucket | -Example — deploy with an existing RDS and images bucket: +OAuth and app data use RDS (MySQL); there are no runtime S3 buckets. Example — deploy with an existing RDS: ```bash sam deploy --guided \ --parameter-overrides \ - ExistingDatabaseHost=mydb.xxxx.us-east-2.rds.amazonaws.com \ - ExistingImagesBucket=my-shared-images-bucket + ExistingDatabaseHost=mydb.xxxx.us-east-2.rds.amazonaws.com ``` Each app sharing the same RDS should use a **different `DatabaseSchema`** (the default is `syncbot`). Create the schema and initialize the tables on the existing instance: @@ -39,9 +35,9 @@ Pushes to `main` automatically build and deploy via `.github/workflows/sam-pipel ### One-Time Setup -1. **Create an IAM user** for deployments with permissions for CloudFormation, Lambda, API Gateway, S3, IAM, and RDS. Generate an access key pair. +1. **Create an IAM user** for deployments with permissions for CloudFormation, Lambda, API Gateway, S3 (for deploy artifacts only), IAM, and RDS. Generate an access key pair. -2. **Create a SAM deployment bucket** — SAM needs an S3 bucket to upload build artifacts during deploy: +2. **Create a SAM deployment bucket** — SAM uploads the Lambda package to an S3 bucket during deploy (packaging only; the app does not use S3 at runtime): ```bash aws s3 mb s3://my-sam-deploy-bucket --region us-east-2 @@ -76,6 +72,6 @@ Once configured, merge or push to `main` and the pipeline runs: push to main → sam build → deploy to test → (manual approval) → deploy to prod ``` -Monitor progress in your repo's **Actions** tab. The first deploy creates the full CloudFormation stack (VPC, RDS, Lambda, API Gateway, S3 buckets). Subsequent deploys update only what changed. +Monitor progress in your repo's **Actions** tab. The first deploy creates the CloudFormation stack (VPC, RDS, Lambda, API Gateway). SAM uses the deployment bucket only for packaging; the app stores OAuth and data in RDS and uploads media directly to Slack. > **Tip:** If you prefer to do the very first deploy manually (to see the interactive prompts), run `sam deploy --guided` locally first, then let the pipeline handle all future deploys. diff --git a/docs/USER_GUIDE.md b/docs/USER_GUIDE.md index b5add34..7634d94 100644 --- a/docs/USER_GUIDE.md +++ b/docs/USER_GUIDE.md @@ -47,7 +47,7 @@ The Home tab and User Mapping screens have Refresh buttons. To keep API usage lo ## Media Sync -Images and videos are uploaded directly to target channels (or via S3 if configured). GIFs from the Slack GIF picker or GIPHY are synced as image blocks. +Images and videos are downloaded from the source and uploaded directly to each target channel. GIFs from the Slack GIF picker or GIPHY are synced as image blocks. ## External Connections diff --git a/pyproject.toml b/pyproject.toml index 575a87c..594ff14 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,8 +12,6 @@ sqlalchemy = "<2.0" pymysql = "^1.1.2" cryptography = "^46.0.0" requests = "^2.32.0" -pillow = "^12.0.0" -pillow-heif = "^1.2.0" [tool.poetry.group.dev.dependencies] diff --git a/syncbot/builders/__init__.py b/syncbot/builders/__init__.py index e47b0de..59acf13 100644 --- a/syncbot/builders/__init__.py +++ b/syncbot/builders/__init__.py @@ -14,7 +14,6 @@ _build_inline_channel_sync, ) from builders.home import ( - _REFRESH_BUTTON_BLOCK_INDEX, _home_tab_content_hash, build_home_tab, refresh_home_tab_for_workspace, @@ -34,7 +33,6 @@ "_get_group_members", "_get_groups_for_workspace", "_get_workspace_info", - "_REFRESH_BUTTON_BLOCK_INDEX", "_home_tab_content_hash", "build_home_tab", "build_join_sync_form", diff --git a/syncbot/builders/channel_sync.py b/syncbot/builders/channel_sync.py index c30ca5a..303f44c 100644 --- a/syncbot/builders/channel_sync.py +++ b/syncbot/builders/channel_sync.py @@ -1,20 +1,18 @@ """Channel sync form builders.""" import logging -from logging import Logger - -from slack_sdk.web import WebClient import helpers from builders._common import ( - _deny_unauthorized, _format_channel_ref, ) from db import DbManager from db.schemas import PostMeta, Sync, SyncChannel, Workspace, WorkspaceGroup, WorkspaceGroupMember -from helpers import safe_get from slack import actions, orm -from slack.blocks import context as block_context, section +from slack.blocks import ( + context as block_context, + section, +) _logger = logging.getLogger(__name__) @@ -31,7 +29,7 @@ def _build_inline_channel_sync( Shows: - Active synced channels with Pause/Stop buttons - Paused synced channels with Resume/Stop buttons - - Channels waiting for a subscriber with Unpublish button + - Channels waiting for a subscriber with Stop Syncing button - Available channels from other members with Start Syncing button """ syncs_for_group = DbManager.find_records( @@ -39,8 +37,6 @@ def _build_inline_channel_sync( [Sync.group_id == group.id], ) - other_ws_ids = {m.workspace_id for m in other_members if m.workspace_id} - published_syncs: list[tuple[Sync, SyncChannel, list[SyncChannel], bool]] = [] waiting_syncs: list[tuple[Sync, SyncChannel]] = [] available_syncs: list[tuple[Sync, list[SyncChannel]]] = [] @@ -54,7 +50,7 @@ def _build_inline_channel_sync( other_channels = [c for c in channels if c.workspace_id != workspace_record.id] if my_channel and other_channels: - is_paused = my_channel.status == "paused" or any(c.status == "paused" for c in other_channels) + is_paused = my_channel.status == "paused" published_syncs.append((sync, my_channel, other_channels, is_paused)) elif my_channel and not other_channels: waiting_syncs.append((sync, my_channel)) @@ -70,24 +66,24 @@ def _build_inline_channel_sync( if not published_syncs and not waiting_syncs and not available_syncs: return - blocks.append(block_context("*Synced Channels*")) + blocks.append(section("*Synced Channels*")) for sync, my_ch, other_chs, is_paused in published_syncs: my_ref = _format_channel_ref(my_ch.channel_id, workspace_record, is_local=True) - # Workspace names for bracket: local first, then others + # Workspace names for bracket: local first, then others; append (Paused) per workspace that paused local_name = helpers.resolve_workspace_name(workspace_record) or f"Workspace {workspace_record.id}" + if my_ch.status == "paused": + local_name = f"{local_name} (Paused)" other_names: list[str] = [] for other_channel in other_chs: other_ws = helpers.get_workspace_by_id(other_channel.workspace_id, context=context) - other_names.append(helpers.resolve_workspace_name(other_ws) if other_ws else f"Workspace {other_channel.workspace_id}") + name = helpers.resolve_workspace_name(other_ws) if other_ws else f"Workspace {other_channel.workspace_id}" + if other_channel.status == "paused": + name = f"{name} (Paused)" + other_names.append(name) all_ws_names = [local_name] + other_names - if sync.sync_mode == "direct": - mode_tag = f" _[1-to-1: {', '.join(all_ws_names)}]_" if all_ws_names else "" - else: - mode_tag = f" _[Any: {', '.join(all_ws_names)}]_" if all_ws_names else "" - if is_paused: icon = ":double_vertical_bar:" toggle_btn = orm.ButtonElement( @@ -103,22 +99,37 @@ def _build_inline_channel_sync( value=str(sync.id), ) - blocks.append( - section(f"{icon} {my_ref}{mode_tag}") - ) + blocks.append(section(f"{icon} {my_ref}")) context_parts: list[str] = [] + if is_paused: + status_tag = "Paused" + else: + status_tag = "Active" + + context_parts.append(f"Status: `{status_tag}`") + + if sync.sync_mode == "direct": + mode_tag = "1-to-1" + else: + mode_tag = "Available to Any" + + context_parts.append(f"Type: `{mode_tag}`") + + if all_ws_names: + context_parts.append(f"Members: `{', '.join(all_ws_names)}`") + if getattr(my_ch, "created_at", None): - context_parts.append(f"Synced since: {my_ch.created_at:%B %d, %Y}") + context_parts.append(f"Synced Since: `{my_ch.created_at:%B %d, %Y}`") msg_count = DbManager.count_records( PostMeta, [PostMeta.sync_channel_id == my_ch.id], ) - context_parts.append(f"{msg_count} message{'s' if msg_count != 1 else ''} tracked") + context_parts.append(f"Messages Tracked: `{msg_count}`") if context_parts: - blocks.append(block_context(" · ".join(context_parts))) + blocks.append(block_context("\n".join(context_parts))) blocks.append( orm.ActionsBlock( elements=[ @@ -134,16 +145,14 @@ def _build_inline_channel_sync( ) for sync, my_ch in waiting_syncs: - blocks.append( - section(f":outbox_tray: <#{my_ch.channel_id}> — _waiting for subscribers_") - ) + blocks.append(section(f":outbox_tray: <#{my_ch.channel_id}> — _waiting for subscribers_")) is_publisher = sync.publisher_workspace_id == workspace_record.id if is_publisher: blocks.append( orm.ActionsBlock( elements=[ orm.ButtonElement( - label="Unpublish", + label="Stop Syncing", action=f"{actions.CONFIG_UNPUBLISH_CHANNEL}_{my_ch.id}", value=str(sync.id), style="danger", @@ -154,18 +163,14 @@ def _build_inline_channel_sync( for sync, other_chs in available_syncs: publisher_ws = helpers.get_workspace_by_id(other_chs[0].workspace_id, context=context) if other_chs else None - publisher_name = helpers.resolve_workspace_name(publisher_ws) if publisher_ws else "another workspace" - sub_names_avail: list[str] = [] - for other_channel in other_chs: - other_ws = helpers.get_workspace_by_id(other_channel.workspace_id, context=context) - sub_names_avail.append(helpers.resolve_workspace_name(other_ws) if other_ws else f"Workspace {other_channel.workspace_id}") + publisher_name = helpers.resolve_workspace_name(publisher_ws) if publisher_ws else " another Workspace" if sync.sync_mode == "direct": - mode_tag = f" _[1-to-1: {sub_names_avail[0]}]_" if sub_names_avail else "" + mode_tag = "1-to-1" else: - mode_tag = f" _[Any: {', '.join(sub_names_avail)}]_" if sub_names_avail else "" - blocks.append( - section(f":inbox_tray: *{sync.title}* from {publisher_name}{mode_tag}") - ) + mode_tag = "Available to Any" + + blocks.append(section(":inbox_tray: New Sync Available")) + blocks.append(block_context(f"Type: `{mode_tag}`\nPublisher: `{publisher_name}`\nChannel Name: `{sync.title}`")) blocks.append( orm.ActionsBlock( elements=[ diff --git a/syncbot/builders/home.py b/syncbot/builders/home.py index ff1712c..6f2e85b 100644 --- a/syncbot/builders/home.py +++ b/syncbot/builders/home.py @@ -27,22 +27,22 @@ WorkspaceGroupMember, ) from slack import actions, orm -from slack.blocks import context as block_context, divider, header, section +from slack.blocks import context as block_context +from slack.blocks import divider, header, section _logger = logging.getLogger(__name__) -# Index of the Actions block that contains the Refresh button (after header at 0) -_REFRESH_BUTTON_BLOCK_INDEX = 1 - def _home_tab_content_hash(workspace_record: Workspace) -> str: """Compute a stable hash of the data that drives the Home tab. Includes groups, members, syncs, sync channels (id/workspace/status), mapped counts, - and pending invite ids so the hash changes when anything visible on Home changes. + pending invite ids, and reset-button visibility so the hash changes when anything + visible on Home changes (including ENABLE_DB_RESET / team_id for the Reset button). """ workspace_id = workspace_record.id workspace_name = (workspace_record.workspace_name or "") or "" + reset_visible = helpers.is_db_reset_visible_for_workspace(workspace_record.team_id) my_groups = _get_groups_for_workspace(workspace_id) group_ids = sorted(g.id for g, _ in my_groups) pending_invites = DbManager.find_records( @@ -105,11 +105,16 @@ def _home_tab_content_hash(workspace_record: Workspace) -> str: ) member_sigs.append((ws_id, ch_count, mapped_count)) member_sigs.sort(key=lambda x: x[0]) - group_payload.append( - (group.id, len(members), len(syncs), tuple(sync_channel_tuples), tuple(member_sigs)) - ) + group_payload.append((group.id, len(members), len(syncs), tuple(sync_channel_tuples), tuple(member_sigs))) group_payload.sort(key=lambda x: x[0]) - payload = (workspace_id, workspace_name, tuple(group_ids), tuple(group_payload), pending_ids) + payload = ( + workspace_id, + workspace_name, + tuple(group_ids), + tuple(group_payload), + pending_ids, + reset_visible, + ) return hashlib.sha256(repr(payload).encode()).hexdigest() @@ -163,50 +168,28 @@ def build_home_tab( blocks: list[orm.BaseBlock] = [] if not is_admin: - blocks.append(block_context(":lock: Only workspace admins and owners can configure SyncBot.")) + blocks.append(block_context(":lock: Only Workspace Admins can configure SyncBot.")) block_dicts = orm.BlockView(blocks=blocks).as_form_field() if return_blocks: return block_dicts client.views_publish(user_id=user_id, view={"type": "home", "blocks": block_dicts}) return None - blocks.append(header("SyncBot Configuration")) - top_buttons = [ - orm.ButtonElement( - label=":arrows_counterclockwise: Refresh", - action=actions.CONFIG_REFRESH_HOME, - ), - orm.ButtonElement( - label=":floppy_disk: Backup/Restore", - action=actions.CONFIG_BACKUP_RESTORE, - ), - ] - if constants.ENABLE_DB_RESET: - top_buttons.append( - orm.ButtonElement( - label=":bomb: Reset Database", - action=actions.CONFIG_DB_RESET, - style="danger", - ), - ) - blocks.append(orm.ActionsBlock(elements=top_buttons)) - blocks.append(divider()) - # Compute hash for admin view so we can update cache after publish (manual or automatic) current_hash = _home_tab_content_hash(workspace_record) # ── Workspace Groups ────────────────────────────────────── - blocks.append(section(":busts_in_silhouette: *Workspace Groups*")) - blocks.append(block_context("Create or join groups to sync channels with other workspaces.")) + blocks.append(header("Workspace Groups")) + blocks.append(block_context("_Groups of Workspaces that can Sync Channels._")) blocks.append( orm.ActionsBlock( elements=[ orm.ButtonElement( - label=":heavy_plus_sign: Create Group", + label="Create Group", action=actions.CONFIG_CREATE_GROUP, ), orm.ButtonElement( - label=":link: Join Group", + label="Join Group", action=actions.CONFIG_JOIN_GROUP, ), ] @@ -226,7 +209,9 @@ def build_home_tab( if not my_groups and not pending_invites: blocks.append( - block_context("_You are not in any groups yet. Create a new group or enter an invite code to join one._") + block_context( + "You are not in any Workspace Groups yet. Create or join a Group before you can Sync Channels with other Workspaces." + ) ) else: for group, my_membership in my_groups: @@ -239,6 +224,29 @@ def build_home_tab( if constants.FEDERATION_ENABLED: _build_federation_section(blocks, workspace_record) + # ── SyncBot Configuration ──────────────────── + blocks.append(block_context("\u200b")) + blocks.append(divider()) + blocks.append(header("SyncBot Configuration")) + config_buttons = [ + orm.ButtonElement( + label="Refresh", + action=actions.CONFIG_REFRESH_HOME, + ), + orm.ButtonElement( + label="Backup/Restore", + action=actions.CONFIG_BACKUP_RESTORE, + ), + ] + if helpers.is_db_reset_visible_for_workspace(workspace_record.team_id): + config_buttons.append( + orm.ButtonElement( + label=":bomb: Reset Database", + action=actions.CONFIG_DB_RESET, + style="danger", + ), + ) + blocks.append(orm.ActionsBlock(elements=config_buttons)) block_dicts = orm.BlockView(blocks=blocks).as_form_field() if return_blocks: @@ -273,18 +281,30 @@ def _build_pending_invite_section( WorkspaceGroupMember.deleted_at.is_(None), ], ) - inviter_names = [] + inviter_workspace_names = [] for member in inviting_members: if member.workspace_id: ws = helpers.get_workspace_by_id(member.workspace_id, context=context) - inviter_names.append(helpers.resolve_workspace_name(ws) if ws else f"Workspace {member.workspace_id}") - - inviter_label = ", ".join(inviter_names) if inviter_names else "Another workspace" + inviter_workspace_names.append( + helpers.resolve_workspace_name(ws) if ws else f"Workspace {member.workspace_id}" + ) + workspace_label = ", ".join(inviter_workspace_names) if inviter_workspace_names else "Another Workspace" + + inviter_label = workspace_label + if getattr(invite, "invited_by_slack_user_id", None) and getattr(invite, "invited_by_workspace_id", None): + inviter_ws = helpers.get_workspace_by_id(invite.invited_by_workspace_id, context=context) + if inviter_ws and inviter_ws.bot_token: + try: + ws_client = WebClient(token=helpers.decrypt_bot_token(inviter_ws.bot_token)) + admin_name, _ = helpers.get_user_info(ws_client, invite.invited_by_slack_user_id) + if admin_name: + inviter_label = f"{admin_name} from {workspace_label}" + except Exception: + pass blocks.append(divider()) - blocks.append( - section(f":handshake: *{inviter_label}* has invited your workspace to join the group *{group.name}*.") - ) + blocks.append(header(f"{group.name}")) + blocks.append(section(f":punch: *{inviter_label}* has invited your Workspace to join this Group.")) blocks.append( orm.ActionsBlock( elements=[ @@ -318,11 +338,35 @@ def _build_group_section( all_members = _get_group_members(group.id) other_members = [member for member in all_members if member.workspace_id != workspace_record.id] - role_tag = " _(creator)_" if my_membership.role == "creator" else "" - icon = ":link:" if len(other_members) > 0 else ":handshake:" - label_text = f"{icon} *{group.name}*{role_tag}" + blocks.append(header(f"{group.name}")) - blocks.append(section(label_text)) + # Action buttons for this group + group_actions: list[orm.ButtonElement] = [ + orm.ButtonElement( + label="Invite Workspace", + action=actions.CONFIG_INVITE_WORKSPACE, + value=str(group.id), + ), + orm.ButtonElement( + label="Sync Channel", + action=actions.CONFIG_PUBLISH_CHANNEL, + value=str(group.id), + ), + orm.ButtonElement( + label="User Mapping", + action=actions.CONFIG_MANAGE_USER_MATCHING, + value=str(group.id), + ), + ] + group_actions.append( + orm.ButtonElement( + label="Leave Group", + action=f"{actions.CONFIG_LEAVE_GROUP}_{group.id}", + style="danger", + value=str(group.id), + ), + ) + blocks.append(orm.ActionsBlock(elements=group_actions)) syncs_for_group = DbManager.find_records(Sync, [Sync.group_id == group.id]) sync_ids = [s.id for s in syncs_for_group] @@ -331,8 +375,8 @@ def _build_group_section( if member.workspace_id: member_ws = helpers.get_workspace_by_id(member.workspace_id, context=context) name = helpers.resolve_workspace_name(member_ws) if member_ws else f"Workspace {member.workspace_id}" - if member.workspace_id == workspace_record.id: - name += " _(you)_" + if member.role == "creator": + name += " _(Group Creator)_" elif member.federated_workspace_id: fed_ws = DbManager.get_record(FederatedWorkspace, id=member.federated_workspace_id) name = f":globe_with_meridians: {fed_ws.name}" if fed_ws and fed_ws.name else "External" @@ -366,12 +410,25 @@ def _build_group_section( ) mapped_count = len(mapped) - stats = ( - f"Member since {joined_str}" - f" · {channel_count} synced channel{'s' if channel_count != 1 else ''}" - f" · {mapped_count} mapped user{'s' if mapped_count != 1 else ''}" - ) - blocks.append(block_context(f"*{name}*\n{stats}")) + stats = f"Member Since: `{joined_str}`\nSynced Channels: `{channel_count}`\nMapped Users: `{mapped_count}` " + text = f"*{name}*\n{stats}" + if member.workspace_id and member_ws: + ws_info = _get_workspace_info(member_ws) + icon_url = ws_info.get("icon_url") + if icon_url: + blocks.append( + orm.SectionBlock( + label=text, + element=orm.ImageAccessoryElement( + image_url=icon_url, + alt_text=name.split(" ")[0] if name else "Workspace", + ), + ) + ) + else: + blocks.append(block_context(text)) + else: + blocks.append(block_context(text)) pending_members = DbManager.find_records( WorkspaceGroupMember, @@ -382,12 +439,33 @@ def _build_group_section( ], ) for pending_member in pending_members: + pending_ws = None if pending_member.workspace_id: pending_ws = helpers.get_workspace_by_id(pending_member.workspace_id, context=context) - pname = helpers.resolve_workspace_name(pending_ws) if pending_ws else f"Workspace {pending_member.workspace_id}" + pname = ( + helpers.resolve_workspace_name(pending_ws) if pending_ws else f"Workspace {pending_member.workspace_id}" + ) else: pname = "Unknown" - blocks.append(block_context(f":hourglass_flowing_sand: *{pname}* — _Pending invite_")) + stats_pending = "Member Since: `Pending Invite`" + text_pending = f"*{pname}*\n{stats_pending}" + if pending_member.workspace_id and pending_ws: + ws_info = _get_workspace_info(pending_ws) + icon_url = ws_info.get("icon_url") + if icon_url: + blocks.append( + orm.SectionBlock( + label=text_pending, + element=orm.ImageAccessoryElement( + image_url=icon_url, + alt_text=pname.split(" ")[0] if pname else "Workspace", + ), + ) + ) + else: + blocks.append(block_context(text_pending)) + else: + blocks.append(block_context(text_pending)) blocks.append( orm.ActionsBlock( elements=[ @@ -395,39 +473,12 @@ def _build_group_section( label="Cancel Invite", action=f"{actions.CONFIG_CANCEL_GROUP_REQUEST}_{pending_member.id}", value=str(pending_member.id), + style="danger", ), ] ) ) - # Action buttons for this group - group_actions: list[orm.ButtonElement] = [ - orm.ButtonElement( - label="Invite Workspace", - action=actions.CONFIG_INVITE_WORKSPACE, - value=str(group.id), - ), - orm.ButtonElement( - label="Publish Channel", - action=actions.CONFIG_PUBLISH_CHANNEL, - value=str(group.id), - ), - orm.ButtonElement( - label="User Mapping", - action=actions.CONFIG_MANAGE_USER_MATCHING, - value=str(group.id), - ), - ] - group_actions.append( - orm.ButtonElement( - label="Leave Group", - action=f"{actions.CONFIG_LEAVE_GROUP}_{group.id}", - style="danger", - value=str(group.id), - ), - ) - blocks.append(orm.ActionsBlock(elements=group_actions)) - _build_inline_channel_sync(blocks, group, workspace_record, other_members, context) diff --git a/syncbot/builders/user_mapping.py b/syncbot/builders/user_mapping.py index 2df42b4..f45ecfc 100644 --- a/syncbot/builders/user_mapping.py +++ b/syncbot/builders/user_mapping.py @@ -31,9 +31,7 @@ def _user_mapping_content_hash(workspace_record: Workspace, group_id: int | None gid = group_id or 0 if gid: members = _get_group_members(gid) - linked_workspace_ids = { - m.workspace_id for m in members if m.workspace_id and m.workspace_id != workspace_id - } + linked_workspace_ids = {m.workspace_id for m in members if m.workspace_id and m.workspace_id != workspace_id} else: my_groups = _get_groups_for_workspace(workspace_id) linked_workspace_ids = set() @@ -53,7 +51,11 @@ def _user_mapping_content_hash(workspace_record: Workspace, group_id: int | None ) all_mappings.extend(mappings) - payload = (workspace_id, gid, tuple((m.id, m.match_method, m.target_user_id) for m in sorted(all_mappings, key=lambda x: x.id))) + payload = ( + workspace_id, + gid, + tuple((m.id, m.match_method, m.target_user_id) for m in sorted(all_mappings, key=lambda x: x.id)), + ) return hashlib.sha256(repr(payload).encode()).hexdigest() @@ -181,12 +183,15 @@ def _user_context_block(mapping: UserMapping, label_text: str) -> orm.ContextBlo group_val = str(group_id) if group_id else "0" blocks: list[orm.BaseBlock] = [ - header(f"User Mapping — {group_name}"), + header(f"User Mapping for: {group_name}"), + block_context( + "_Users with the same email address between Workspaces will be mapped automatically. Other users can be mapped manually._" + ), blocks_actions( button(":arrow_left: Back", actions.CONFIG_USER_MAPPING_BACK, value=group_val), - button(":arrows_counterclockwise: Refresh", actions.CONFIG_USER_MAPPING_REFRESH, value=group_val), + button("Refresh", actions.CONFIG_USER_MAPPING_REFRESH, value=group_val), ), - block_context(f":busts_in_silhouette: *{len(soft_matched) + len(email_matched)} mapped* \u00b7 *{len(unmapped)} unmapped*"), + block_context(f"*Mapped: {len(soft_matched) + len(email_matched)}* \u00b7 *Unmapped: {len(unmapped)}*"), divider(), ] @@ -199,26 +204,33 @@ def _user_context_block(mapping: UserMapping, label_text: str) -> orm.ContextBlo blocks.append(divider()) if soft_matched: - blocks.append(section(":pencil2: *Soft / Manual Matches*")) + blocks.append(section("*Soft / Manual Matches*")) blocks.append(block_context("\u200b")) for m in soft_matched: method_tag = "manual" if m.match_method == "manual" else "name" - blocks.append(_user_context_block(m, f"*{_display_for_mapping(m, _ws_name_lookup)}* \u2192 <@{m.target_user_id}> _[{method_tag}]_")) + blocks.append( + _user_context_block( + m, f"*{_display_for_mapping(m, _ws_name_lookup)}* \u2192 <@{m.target_user_id}> _[{method_tag}]_" + ) + ) blocks.append(blocks_actions(button("Edit", f"{actions.CONFIG_USER_MAPPING_EDIT}_{m.id}", value=group_val))) blocks.append(divider()) if email_matched: - blocks.append(section(":lock: *Email Matches*")) + blocks.append(section("*Email Matches*")) blocks.append(block_context("\u200b")) for m in email_matched: email_addr = _email_lookup.get((m.source_workspace_id, m.source_user_id), "") email_tag = f"_{email_addr}_" if email_addr else "_[email]_" - blocks.append(_user_context_block(m, f"*{_display_for_mapping(m, _ws_name_lookup)}* \u2192 <@{m.target_user_id}> {email_tag}")) + blocks.append( + _user_context_block( + m, f"*{_display_for_mapping(m, _ws_name_lookup)}* \u2192 <@{m.target_user_id}> {email_tag}" + ) + ) blocks.append(divider()) if not unmapped and not soft_matched and not email_matched: - blocks.append(block_context("_No user mappings yet. Mappings are created automatically when " - "workspaces join a group and users share display names or emails._")) + blocks.append(block_context("_No users have been mapped in this Workspace Group yet._")) block_dicts = orm.BlockView(blocks=blocks).as_form_field() if return_blocks: @@ -333,8 +345,11 @@ def build_user_mapping_edit_modal( ) ) else: - blocks.append(block_context("_No available users to map to. All users in your workspace " - "are already mapped to other users._")) + blocks.append( + block_context( + "_No available users to map to. All users in your workspace are already mapped to other users._" + ) + ) meta = {"mapping_id": mapping_id, "group_id": group_id or 0} modal_form = orm.BlockView(blocks=blocks) diff --git a/syncbot/constants.py b/syncbot/constants.py index 4cfb64f..eb33b8b 100644 --- a/syncbot/constants.py +++ b/syncbot/constants.py @@ -19,8 +19,6 @@ # --------------------------------------------------------------------------- SLACK_BOT_TOKEN = "SLACK_BOT_TOKEN" -SLACK_STATE_S3_BUCKET_NAME = "ENV_SLACK_STATE_S3_BUCKET_NAME" -SLACK_INSTALLATION_S3_BUCKET_NAME = "ENV_SLACK_INSTALLATION_S3_BUCKET_NAME" SLACK_CLIENT_ID = "ENV_SLACK_CLIENT_ID" SLACK_CLIENT_SECRET = "ENV_SLACK_CLIENT_SECRET" SLACK_SCOPES = "ENV_SLACK_SCOPES" @@ -33,8 +31,8 @@ ADMIN_DATABASE_PASSWORD = "ADMIN_DATABASE_PASSWORD" ADMIN_DATABASE_SCHEMA = "ADMIN_DATABASE_SCHEMA" -# When set to "true", a red "Reset Database" button appears on the Home tab. -ENABLE_DB_RESET = os.environ.get("ENABLE_DB_RESET", "false").lower() == "true" +# Make the "Reset Database" button appear on the Home tab in a specific Workspace. +ENABLE_DB_RESET = os.environ.get("ENABLE_DB_RESET", "").strip() LOCAL_DEVELOPMENT = os.environ.get("LOCAL_DEVELOPMENT", "false").lower() == "true" @@ -51,8 +49,6 @@ def _has_real_bot_token() -> bool: WARNING_BLOCK = "WARNING_BLOCK" -MAX_HEIF_SIZE = 1000 - # --------------------------------------------------------------------------- # User-matching TTLs (seconds) # @@ -60,11 +56,11 @@ def _has_real_bot_token() -> bool: # Manual matches never expire and can only be removed via the admin UI. # --------------------------------------------------------------------------- -MATCH_TTL_EMAIL = 30 * 24 * 3600 # 30 days for email-confirmed matches -MATCH_TTL_NAME = 14 * 24 * 3600 # 14 days for name-based matches -MATCH_TTL_NONE = 90 * 24 * 3600 # 90 days for no-match (team_join handles re-checks) -USER_DIR_REFRESH_TTL = 24 * 3600 # 24 hours per workspace directory refresh -USER_MATCHING_PAGE_SIZE = 40 # max unmatched users shown in the modal +MATCH_TTL_EMAIL = 30 * 24 * 3600 # 30 days for email-confirmed matches +MATCH_TTL_NAME = 14 * 24 * 3600 # 14 days for name-based matches +MATCH_TTL_NONE = 90 * 24 * 3600 # 90 days for no-match (team_join handles re-checks) +USER_DIR_REFRESH_TTL = 24 * 3600 # 24 hours per workspace directory refresh +USER_MATCHING_PAGE_SIZE = 40 # max unmatched users shown in the modal # Refresh button cooldown (seconds) when content hash unchanged REFRESH_COOLDOWN_SECONDS = 60 @@ -79,13 +75,6 @@ def _has_real_bot_token() -> bool: SYNCBOT_PUBLIC_URL = "SYNCBOT_PUBLIC_URL" FEDERATION_ENABLED = os.environ.get("SYNCBOT_FEDERATION_ENABLED", "false").lower() == "true" -AWS_ACCESS_KEY_ID = "AWS_ACCESS_KEY_ID" -AWS_SECRET_ACCESS_KEY = "AWS_SECRET_ACCESS_KEY" -S3_IMAGE_BUCKET = os.environ.get("S3_IMAGE_BUCKET", "") -S3_IMAGE_URL = os.environ.get("S3_IMAGE_URL", f"https://{S3_IMAGE_BUCKET}.s3.amazonaws.com/" if S3_IMAGE_BUCKET else "") -S3_ENABLED = bool(S3_IMAGE_BUCKET) -S3_VIDEO_ENABLED = os.environ.get("S3_VIDEO_ENABLED", "false").lower() == "true" - # --------------------------------------------------------------------------- # Startup configuration validation @@ -102,14 +91,12 @@ def _has_real_bot_token() -> bool: ADMIN_DATABASE_SCHEMA, ] -# Required only in production (Lambda) +# Required only in production (Lambda). OAuth uses MySQL; no S3 buckets. _REQUIRED_PRODUCTION = [ SLACK_SIGNING_SECRET, SLACK_CLIENT_ID, SLACK_CLIENT_SECRET, SLACK_SCOPES, - SLACK_STATE_S3_BUCKET_NAME, - SLACK_INSTALLATION_S3_BUCKET_NAME, PASSWORD_ENCRYPT_KEY, ] diff --git a/syncbot/db/schemas.py b/syncbot/db/schemas.py index 6bed678..c011f35 100644 --- a/syncbot/db/schemas.py +++ b/syncbot/db/schemas.py @@ -94,9 +94,15 @@ class WorkspaceGroupMember(BaseClass, GetDBClass): joined_at = Column(DateTime, nullable=True) deleted_at = Column(DateTime, nullable=True, default=None) dm_messages = Column(Text, nullable=True) + invited_by_slack_user_id = Column(String(32), nullable=True) + invited_by_workspace_id = Column(Integer, ForeignKey("workspaces.id"), nullable=True) group = relationship("WorkspaceGroup", backref="members") - workspace = relationship("Workspace", backref="group_memberships") + workspace = relationship( + "Workspace", + backref="group_memberships", + foreign_keys=[workspace_id], + ) def get_id(): return WorkspaceGroupMember.id diff --git a/syncbot/handlers/channel_sync.py b/syncbot/handlers/channel_sync.py index 0f30939..034bcc9 100644 --- a/syncbot/handlers/channel_sync.py +++ b/syncbot/handlers/channel_sync.py @@ -1,5 +1,6 @@ """Channel sync handlers — publish, unpublish, subscribe, pause, resume, stop.""" +import contextlib import logging from datetime import UTC, datetime from logging import Logger @@ -12,7 +13,8 @@ from db import DbManager, schemas from handlers._common import _parse_private_metadata, _sanitize_text from slack import actions, orm -from slack.blocks import context as block_context, section +from slack.blocks import context as block_context +from slack.blocks import section _logger = logging.getLogger(__name__) @@ -74,22 +76,22 @@ def _build_publish_step2( channel_options = _get_publishable_channel_options(client, workspace_id) if not channel_options: channel_options = [ - orm.SelectorOption(name="— No channels available (all are already published or synced) —", value="__none__"), + orm.SelectorOption( + name="— No Channels available (all are already published or synced) —", value="__none__" + ), ] modal_blocks.append( orm.InputBlock( label="Channel to Publish", action=actions.CONFIG_PUBLISH_CHANNEL_SELECT, element=orm.StaticSelectElement( - placeholder="Select a channel to publish", + placeholder="Select a Channel to publish", options=channel_options, ), optional=False, ) ) - modal_blocks.append( - block_context("Select a channel from your workspace to make available for syncing.") - ) + modal_blocks.append(block_context("Select a Channel from your Workspace to make available for Syncing.")) if sync_mode == "direct" and other_members: ws_options: list[orm.SelectorOption] = [] @@ -104,7 +106,7 @@ def _build_publish_step2( label="Target Workspace", action=actions.CONFIG_PUBLISH_DIRECT_TARGET, element=orm.StaticSelectElement( - placeholder="Select target workspace", + placeholder="Select target Workspace", options=ws_options, ), optional=False, @@ -136,17 +138,17 @@ def handle_publish_channel( mode_options = [ orm.SelectorOption( - name="Available to entire group\nAny current or future member can subscribe", + name="Available to All Workspaces\nAny current or future Workspace Group Member can Sync.", value="group", ), orm.SelectorOption( - name="1-to-1 with a specific workspace\nOnly the selected workspace can subscribe", + name="Only with Specific Workspace\nChoose a specific Workspace Group Member to allow Syncing.", value="direct", ), ] step1_blocks: list[orm.BaseBlock] = [ orm.InputBlock( - label="Sync Mode", + label="Channel Sync Mode", action=actions.CONFIG_PUBLISH_SYNC_MODE, element=orm.RadioButtonsElement( initial_value="group", @@ -162,7 +164,7 @@ def handle_publish_channel( client=client, trigger_id=trigger_id, callback_id=actions.CONFIG_PUBLISH_MODE_SUBMIT, - title_text="Publish Channel", + title_text="Sync Channel", submit_button_text="Next", parent_metadata={"group_id": group_id}, new_or_add="new", @@ -200,10 +202,7 @@ def handle_publish_mode_submit( other_members = [] if workspace_record: group_members = _get_group_members(group_id) - other_members = [ - m for m in group_members - if m.workspace_id != workspace_record.id and m.workspace_id - ] + other_members = [m for m in group_members if m.workspace_id != workspace_record.id and m.workspace_id] if not workspace_record: _logger.warning("handle_publish_mode_submit: no workspace_record") @@ -211,7 +210,7 @@ def handle_publish_mode_submit( step2 = _build_publish_step2(client, group_id, sync_mode, other_members, workspace_record.id) updated_view = step2.as_ack_update( callback_id=actions.CONFIG_PUBLISH_CHANNEL_SUBMIT, - title_text="Publish Channel", + title_text="Sync Channel", submit_button_text="Publish", parent_metadata={"group_id": group_id, "sync_mode": sync_mode}, ) @@ -256,10 +255,8 @@ def handle_publish_channel_submit( if action_id == actions.CONFIG_PUBLISH_DIRECT_TARGET: selected_opt = helpers.safe_get(action_data, "selected_option", "value") if selected_opt: - try: + with contextlib.suppress(TypeError, ValueError): target_workspace_id = int(selected_opt) - except (TypeError, ValueError): - pass if sync_mode == "direct" and not target_workspace_id: sync_mode = "group" @@ -270,13 +267,15 @@ def handle_publish_channel_submit( for _block_id, block_data in state_values.items(): for action_id, action_data in block_data.items(): if action_id == actions.CONFIG_PUBLISH_CHANNEL_SELECT: - channel_id = action_data.get("selected_conversation") or action_data.get("selected_option", {}).get("value") + channel_id = action_data.get("selected_conversation") or action_data.get("selected_option", {}).get( + "value" + ) if not channel_id or channel_id == "__none__": if ack_fn: ack_fn( response_action="errors", - errors={actions.CONFIG_PUBLISH_CHANNEL_SELECT: "Select a channel to publish."}, + errors={actions.CONFIG_PUBLISH_CHANNEL_SELECT: "Select a Channel to publish."}, ) return @@ -292,7 +291,7 @@ def handle_publish_channel_submit( if ack_fn: ack_fn( response_action="errors", - errors={actions.CONFIG_PUBLISH_CHANNEL_SELECT: "This channel is already being synced."}, + errors={actions.CONFIG_PUBLISH_CHANNEL_SELECT: "This Channel is already being synced."}, ) return @@ -395,12 +394,14 @@ def handle_unpublish_channel( try: member_ws = helpers.get_workspace_by_id(sync_channel.workspace_id) if member_ws and member_ws.bot_token: - name = admin_name if workspace_record and sync_channel.workspace_id == workspace_record.id else admin_label + name = ( + admin_name if workspace_record and sync_channel.workspace_id == workspace_record.id else admin_label + ) member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) helpers.notify_synced_channels( member_client, [sync_channel.channel_id], - f":octagonal_sign: *{name}* unpublished this channel. Syncing is no longer available.", + f":octagonal_sign: *{name}* unpublished this Channel. Syncing is no longer available.", ) member_client.conversations_leave(channel=sync_channel.channel_id) except Exception as e: @@ -430,7 +431,7 @@ def _toggle_sync_status( verb: str, log_event: str, ) -> None: - """Shared logic for pausing or resuming a channel sync.""" + """Shared logic for pausing or resuming a channel sync. Only the current workspace's channel is toggled.""" action_id = helpers.safe_get(body, "actions", 0, "action_id") or "" sync_id_str = action_id.replace(action_prefix + "_", "") @@ -447,36 +448,51 @@ def _toggle_sync_status( or helpers.safe_get(body, "user", "team_id") ) workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None + if not workspace_record: + return admin_name, admin_label = helpers.format_admin_label(client, user_id, workspace_record) all_channels = DbManager.find_records( schemas.SyncChannel, [schemas.SyncChannel.sync_id == sync_id, schemas.SyncChannel.deleted_at.is_(None)], ) - - for sync_channel in all_channels: - DbManager.update_records( - schemas.SyncChannel, - [schemas.SyncChannel.id == sync_channel.id], - {schemas.SyncChannel.status: target_status}, + my_sync_channel = next( + (c for c in all_channels if c.workspace_id == workspace_record.id), + None, + ) + if not my_sync_channel: + _logger.warning( + f"{log_event}_no_channel_for_workspace", extra={"sync_id": sync_id, "workspace_id": workspace_record.id} ) + return + + DbManager.update_records( + schemas.SyncChannel, + [schemas.SyncChannel.id == my_sync_channel.id], + {schemas.SyncChannel.status: target_status}, + ) + helpers._cache_delete(f"sync_list:{my_sync_channel.channel_id}") ws_cache: dict[int, schemas.Workspace | None] = {} - for sync_channel in all_channels: + for sync_channel in [my_sync_channel]: try: - channel_ws = ws_cache.get(sync_channel.workspace_id) or helpers.get_workspace_by_id(sync_channel.workspace_id) + channel_ws = ws_cache.get(sync_channel.workspace_id) or helpers.get_workspace_by_id( + sync_channel.workspace_id + ) ws_cache[sync_channel.workspace_id] = channel_ws if channel_ws and channel_ws.bot_token: ws_client = WebClient(token=helpers.decrypt_bot_token(channel_ws.bot_token)) if target_status == "active": - try: + with contextlib.suppress(Exception): ws_client.conversations_join(channel=sync_channel.channel_id) - except Exception: - pass - name = admin_name if workspace_record and sync_channel.workspace_id == workspace_record.id else admin_label + name = ( + admin_name if workspace_record and sync_channel.workspace_id == workspace_record.id else admin_label + ) other_channels = [c for c in all_channels if c.workspace_id != sync_channel.workspace_id] if other_channels: - other_ws = ws_cache.get(other_channels[0].workspace_id) or helpers.get_workspace_by_id(other_channels[0].workspace_id) + other_ws = ws_cache.get(other_channels[0].workspace_id) or helpers.get_workspace_by_id( + other_channels[0].workspace_id + ) ws_cache[other_channels[0].workspace_id] = other_ws channel_ref = helpers.resolve_channel_name(other_channels[0].channel_id, other_ws) msg = f":{emoji}: *{name}* {verb} syncing with *{channel_ref}*." @@ -486,18 +502,23 @@ def _toggle_sync_status( except Exception as e: _logger.warning(f"Failed to notify channel {sync_channel.channel_id} about {verb}: {e}") - _logger.info(log_event, extra={"sync_id": sync_id, "channels": len(all_channels)}) + _logger.info(log_event, extra={"sync_id": sync_id, "sync_channel_id": my_sync_channel.id}) builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) sync_record = DbManager.get_record(schemas.Sync, id=sync_id) if sync_record and sync_record.group_id: - _refresh_group_member_homes(sync_record.group_id, workspace_record.id if workspace_record else 0, logger, context=context) + _refresh_group_member_homes( + sync_record.group_id, workspace_record.id if workspace_record else 0, logger, context=context + ) def handle_pause_sync(body: dict, client: WebClient, logger: Logger, context: dict) -> None: """Pause an active channel sync.""" _toggle_sync_status( - body, client, logger, context, + body, + client, + logger, + context, action_prefix=actions.CONFIG_PAUSE_SYNC, target_status="paused", emoji="double_vertical_bar", @@ -509,7 +530,10 @@ def handle_pause_sync(body: dict, client: WebClient, logger: Logger, context: di def handle_resume_sync(body: dict, client: WebClient, logger: Logger, context: dict) -> None: """Resume a paused channel sync.""" _toggle_sync_status( - body, client, logger, context, + body, + client, + logger, + context, action_prefix=actions.CONFIG_RESUME_SYNC, target_status="active", emoji="arrow_forward", @@ -541,12 +565,12 @@ def handle_stop_sync( confirm_form = orm.BlockView( blocks=[ section( - ":warning: *Are you sure you want to stop syncing this channel?*\n\n" + ":warning: *Are you sure you want to stop syncing this Channel?*\n\n" "This will:\n" - "\u2022 Remove your workspace's sync history for this channel\n" - "\u2022 Remove this channel from the active sync\n" - "\u2022 Other workspaces in the sync will continue uninterrupted\n\n" - "_No messages will be deleted from any channel — only SyncBot's tracking history for your workspace is removed._" + "\u2022 Remove your Workspace's Sync history for this Channel\n" + "\u2022 Remove this Channel from the active Sync\n" + "\u2022 Other Workspaces in the Sync will continue uninterrupted\n\n" + "_No messages will be deleted from any Channel — only SyncBot's tracking history for your Workspace is removed._" ), ] ) @@ -611,11 +635,11 @@ def handle_stop_sync_confirm( my_ref = ( helpers.resolve_channel_name(my_channel.channel_id, workspace_record) if my_channel - else "the other workspace" + else "the other Workspace" ) msg = f":octagonal_sign: *{admin_label}* stopped syncing with *{my_ref}*." else: - msg = f":octagonal_sign: *{admin_name}* stopped channel syncing." + msg = f":octagonal_sign: *{admin_name}* stopped Channel Syncing." ws_client = WebClient(token=helpers.decrypt_bot_token(channel_ws.bot_token)) helpers.notify_synced_channels(ws_client, [sync_channel.channel_id], msg) except Exception as e: @@ -683,14 +707,13 @@ def handle_subscribe_channel( pub_ch = publisher_channels[0] pub_ws = helpers.get_workspace_by_id(pub_ch.workspace_id) ch_ref = _format_channel_ref(pub_ch.channel_id, pub_ws, is_local=False) - blocks.append(section(f"Subscribing to: {ch_ref}")) + blocks.append(section(f"Syncing with: {ch_ref}")) channel_options = _get_publishable_channel_options(client, workspace_record.id) if not channel_options: channel_options = [ orm.SelectorOption( - name="— No channels available (all are already in a sync) —", - value="__none__", + name="— No Channels available to Sync in this Workspace. —", value="__none__" ), ] blocks.append( @@ -698,22 +721,20 @@ def handle_subscribe_channel( label="Channel for Sync", action=actions.CONFIG_SUBSCRIBE_CHANNEL_SELECT, element=orm.StaticSelectElement( - placeholder="Select a channel to sync into", + placeholder="Select a Channel to Sync with.", options=channel_options, ), optional=False, ) ) - blocks.append( - block_context("Choose a channel in your workspace to receive synced messages.") - ) + blocks.append(block_context("Choose a Channel in your Workspace to start Syncing.")) orm.BlockView(blocks=blocks).post_modal( client=client, trigger_id=trigger_id, callback_id=actions.CONFIG_SUBSCRIBE_CHANNEL_SUBMIT, - title_text="Subscribe to Channel", - submit_button_text="Subscribe", + title_text="Sync Channel", + submit_button_text="Sync Channel", parent_metadata={"sync_id": int(sync_id)} if sync_id else None, new_or_add="new", ) @@ -748,9 +769,8 @@ def handle_subscribe_channel_submit( for _block_id, block_data in state_values.items(): for action_id, action_data in block_data.items(): if action_id == actions.CONFIG_SUBSCRIBE_CHANNEL_SELECT: - channel_id = ( - action_data.get("selected_conversation") - or helpers.safe_get(action_data, "selected_option", "value") + channel_id = action_data.get("selected_conversation") or helpers.safe_get( + action_data, "selected_option", "value" ) if not channel_id or channel_id == "__none__": @@ -792,10 +812,10 @@ def handle_subscribe_channel_submit( pub_ws = helpers.get_workspace_by_id(pub_ch.workspace_id) channel_ref = helpers.resolve_channel_name(pub_ch.channel_id, pub_ws) else: - channel_ref = sync_record.title or "the other channel" + channel_ref = sync_record.title or "the other Channel" client.chat_postMessage( channel=channel_id, - text=f":arrows_counterclockwise: *{admin_name}* started syncing this channel with *{channel_ref}*. Messages will be shared automatically.", + text=f":arrows_counterclockwise: *{admin_name}* started syncing this Channel with *{channel_ref}*. Messages will be shared automatically.", ) except Exception as exc: _logger.debug(f"subscribe_channel: failed to notify subscriber channel {channel_id}: {exc}") @@ -808,7 +828,7 @@ def handle_subscribe_channel_submit( pub_client = WebClient(token=helpers.decrypt_bot_token(pub_ws.bot_token)) pub_client.chat_postMessage( channel=pub_ch.channel_id, - text=f":arrows_counterclockwise: *{admin_label}* started syncing *{local_ref}* with this channel. Messages will be shared automatically.", + text=f":arrows_counterclockwise: *{admin_label}* started syncing *{local_ref}* with this Channel. Messages will be shared automatically.", ) except Exception as exc: _logger.debug(f"subscribe_channel: failed to notify publisher channel {pub_ch.channel_id}: {exc}") diff --git a/syncbot/handlers/export_import.py b/syncbot/handlers/export_import.py index 7ab3357..8bd80f5 100644 --- a/syncbot/handlers/export_import.py +++ b/syncbot/handlers/export_import.py @@ -1,5 +1,6 @@ """Backup/Restore and Data Migration handlers (modals and submissions).""" +import contextlib import json import logging from datetime import UTC, datetime @@ -111,7 +112,7 @@ def handle_backup_download( content=json_str, filename=f"syncbot-backup-{datetime.now(UTC).strftime('%Y%m%d-%H%M%S')}.json", channel=dm_channel, - initial_comment="Your SyncBot full-instance backup. Keep this file secure.", + initial_comment=":nerd_face: Here is your SyncBot JSON backup. Keep this file secure.", ) except Exception as e: _logger.exception("backup_download failed: %s", e) @@ -119,7 +120,7 @@ def handle_backup_download( view_id = helpers.safe_get(body, "view", "id") if view_id: - try: + with contextlib.suppress(Exception): client.views_update( view_id=view_id, view={ @@ -137,8 +138,6 @@ def handle_backup_download( ], }, ) - except Exception: - pass def handle_backup_restore_submit( @@ -319,19 +318,19 @@ def handle_data_migration( export_blocks = [ orm.SectionBlock( - label="*Export*\nDownload your workspace data for migration to another instance. You will receive a JSON file in your DM.", + label="*Export*\nDownload your Workspace data for migration to another instance. You will receive a JSON file in your DM.", ), orm.ActionsBlock( elements=[ orm.ButtonElement( - label=":outbox_tray: Export my workspace data", + label=":outbox_tray: Export my Workspace data", action=actions.CONFIG_DATA_MIGRATION_EXPORT, ), ], ), orm.DividerBlock(), orm.SectionBlock( - label="*Import*\nUpload a migration JSON file. Existing sync channels in the federated group will be replaced.", + label="*Import*\nUpload a migration JSON file. Existing Sync Channels in the federated Group will be replaced.", ), ] @@ -485,6 +484,7 @@ def handle_data_migration_submit( source = data.get("source_instance") if source and source.get("connection_code"): import secrets + from federation import core as federation result = federation.initiate_federation_connect( diff --git a/syncbot/handlers/groups.py b/syncbot/handlers/groups.py index 4a79f15..4a1cb58 100644 --- a/syncbot/handlers/groups.py +++ b/syncbot/handlers/groups.py @@ -4,7 +4,7 @@ import logging import secrets import string -from datetime import UTC, datetime, timedelta +from datetime import UTC, datetime from logging import Logger from slack_sdk.web import WebClient @@ -13,7 +13,8 @@ import helpers from db import DbManager, schemas from slack import actions, forms, orm -from slack.blocks import context as block_context, divider, section +from slack.blocks import context as block_context +from slack.blocks import divider, section _logger = logging.getLogger(__name__) @@ -100,15 +101,14 @@ def handle_create_group( view = orm.BlockView( blocks=[ orm.InputBlock( - label="Group Name", + label="Workspace Group Name", action=actions.CONFIG_CREATE_GROUP_NAME, - element=orm.PlainTextInputElement(placeholder="e.g. East Coast AOs, Partner Org..."), + element=orm.PlainTextInputElement(placeholder="e.g. Slack Syndicate, The Multiverse..."), optional=False, ), orm.ContextBlock( element=orm.ContextElement( - initial_value="Give this group a friendly name. An invite code will be generated " - "that other workspace admins can use to join.", + initial_value="_Give this Workspace Group a friendly and descriptive name._", ), ), ] @@ -119,7 +119,7 @@ def handle_create_group( trigger_id=trigger_id, callback_id=actions.CONFIG_CREATE_GROUP_SUBMIT, title_text="Create Group", - submit_button_text="Create", + submit_button_text="Create Group", ) @@ -193,8 +193,8 @@ def handle_create_group_submit( if dm_channel: client.chat_postMessage( channel=dm_channel, - text=f":white_check_mark: *Group Created* — *{group_name}*\n\n" - f"Share this invite code with admins of other workspaces:\n\n`{code}`", + text=f":raised_hands: *New Workspace Group Created!*\n\n*Group Name:* `{group_name}`\n\n*Invite Code:* `{code}`\n\n" + "You can share the Invite Code with an Admin from another Workspace and they can join the Group.", ) except Exception as e: _logger.warning(f"Failed to DM invite code: {e}") @@ -314,7 +314,6 @@ def handle_join_group_submit( _activate_group_membership(client, workspace_record, group) - ws_name = helpers.resolve_workspace_name(workspace_record) _, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) other_members = DbManager.find_records( @@ -336,7 +335,7 @@ def handle_join_group_submit( member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) helpers.notify_admins_dm( member_client, - f":handshake: *{admin_label}* joined the group *{group.name}*.", + f":punch: *{admin_label}* joined the Workspace Group called *{group.name}*.", ) builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) except Exception as e: @@ -387,6 +386,23 @@ def handle_invite_workspace( ) eligible = [ws for ws in all_workspaces if ws.id not in member_ws_ids and ws.bot_token] + if not eligible and not constants.FEDERATION_ENABLED: + msg_blocks = [ + section( + "At least one other Slack Workspace needs to install this SyncBot app, or " + "External Connections need to be allowed, before you can invite another Workspace to this Group." + ), + ] + orm.BlockView(blocks=msg_blocks).post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_INVITE_WORKSPACE_SUBMIT, + title_text="Oops!", + submit_button_text="None", + new_or_add="new", + ) + return + modal_blocks: list = [] if eligible: @@ -399,10 +415,10 @@ def handle_invite_workspace( ] modal_blocks.append( orm.InputBlock( - label="Send a direct invite", + label="Send a SyncBot DM", action=actions.CONFIG_INVITE_WORKSPACE_SELECT, element=orm.StaticSelectElement( - placeholder="Select a workspace", + placeholder="Select a Workspace", options=ws_options, ), optional=True, @@ -410,17 +426,16 @@ def handle_invite_workspace( ) modal_blocks.append( block_context( - "A DM will be sent to the workspace's admins " - "with an invitation to join this group.", + "A SyncBot DM will be sent to Admins in the other Workspace.", ) ) + modal_blocks.append(block_context("\u200b")) modal_blocks.append(divider()) modal_blocks.append(section(":memo: *Invite Code*")) modal_blocks.append( block_context( - "Share this code with an admin from another workspace:" - f"\n\n`{group.invite_code}`" + f"Alternatively, share this Invite Code with an Admin from another Workspace:\n\n`{group.invite_code}`" ) ) @@ -429,8 +444,8 @@ def handle_invite_workspace( modal_blocks.append(section(":globe_with_meridians: *External Workspace*")) modal_blocks.append( block_context( - "For workspaces running their own SyncBot instance, " - f"share this code for them to join externally:\n\n`{group.invite_code}`" + "For Workspaces running their own external SyncBot instance, " + f"share this Invite Code for them to join:\n\n`{group.invite_code}`" ) ) @@ -512,16 +527,18 @@ def handle_invite_workspace_submit( builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) return + acting_user_id = helpers.safe_get(body, "user", "id") or user_id member = schemas.WorkspaceGroupMember( group_id=group_id, workspace_id=target_ws_id, status="pending", role="member", joined_at=None, + invited_by_slack_user_id=acting_user_id, + invited_by_workspace_id=workspace_record.id, ) DbManager.create_record(member) - acting_user_id = helpers.safe_get(body, "user", "id") or user_id _, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) target_client = WebClient(token=helpers.decrypt_bot_token(target_ws.bot_token)) @@ -531,8 +548,7 @@ def handle_invite_workspace_submit( "type": "section", "text": { "type": "mrkdwn", - "text": f":handshake: *{admin_label}* has invited your workspace " - f"to join the group *{group.name}*.", + "text": f":punch: *{admin_label}* has invited your Workspace to join a SyncBot Group!\n\n*Group Name:* `{group.name}`", }, }, { @@ -558,7 +574,7 @@ def handle_invite_workspace_submit( dm_entries = helpers.notify_admins_dm_blocks( target_client, - f"{admin_label} has invited your workspace to join the group {group.name}.", + f"{admin_label} has invited your Workspace to join a SyncBot Group!\n\n*Group Name:* `{group.name}`", invite_blocks, ) helpers.save_dm_messages_to_group_member(member.id, dm_entries) @@ -623,7 +639,7 @@ def handle_accept_group_invite( _update_invite_dms( member, workspace_record, - f":white_check_mark: Your workspace has joined the group *{group.name}*.", + f"Your Workspace has joined the SyncBot Group called *{group.name}*.", ) other_members = DbManager.find_records( @@ -646,7 +662,7 @@ def handle_accept_group_invite( member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) helpers.notify_admins_dm( member_client, - f":handshake: *{ws_name}* has joined the group *{group.name}*.", + f":punch: *{ws_name}* has joined the Workspace Group called *{group.name}*.", ) builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) except Exception as e: @@ -670,7 +686,7 @@ def handle_decline_group_invite( logger: Logger, context: dict, ) -> None: - """Decline a pending group invite from a DM button.""" + """Handle Decline (invited workspace) or Cancel Invite (inviting workspace) for a pending group invite.""" raw_member_id = helpers.safe_get(body, "actions", 0, "value") try: member_id = int(raw_member_id) @@ -686,12 +702,16 @@ def handle_decline_group_invite( group = DbManager.get_record(schemas.WorkspaceGroup, id=member.group_id) group_name = group.name if group else "the group" + action_id = helpers.safe_get(body, "actions", 0, "action_id") or "" + is_cancel = action_id.startswith(actions.CONFIG_CANCEL_GROUP_REQUEST) + outcome = "canceled" if is_cancel else "declined" + target_ws = helpers.get_workspace_by_id(member.workspace_id) if member.workspace_id else None _update_invite_dms( member, target_ws, - f":x: The invitation to join *{group_name}* was declined.", + f":x: The invitation to join *{group_name}* was {outcome}.", ) group_id = member.group_id @@ -723,6 +743,10 @@ def handle_decline_group_invite( with contextlib.suppress(Exception): builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) + if target_ws and target_ws.bot_token and not target_ws.deleted_at: + with contextlib.suppress(Exception): + builders.refresh_home_tab_for_workspace(target_ws, logger, context=None) + def _update_invite_dms( member: schemas.WorkspaceGroupMember, diff --git a/syncbot/handlers/messages.py b/syncbot/handlers/messages.py index 8a5daf0..0a6bb4b 100644 --- a/syncbot/handlers/messages.py +++ b/syncbot/handlers/messages.py @@ -6,7 +6,6 @@ from slack_sdk.web import WebClient -import constants import federation import helpers from db import DbManager, schemas @@ -52,15 +51,13 @@ def _parse_event_fields(body: dict, client: WebClient) -> EventContext: def _build_file_context(body: dict, client: WebClient, logger: Logger) -> tuple[list[dict], list[dict], list[dict]]: """Process files attached to a message event. - Returns ``(s3_photo_list, photo_blocks, direct_files)`` where: + Returns ``(photo_list, photo_blocks, direct_files)`` where: - * *s3_photo_list* — files uploaded to S3 (have a ``path`` key for - cleanup after syncing). - * *photo_blocks* — Slack Block Kit ``image`` blocks for S3-hosted - images, ready to include in ``chat.postMessage``. - * *direct_files* — files downloaded to ``/tmp`` that should be - uploaded directly to each target channel via - ``files_upload_v2``. + * *photo_list* — always [] (kept for cleanup API; no S3). + * *photo_blocks* — Slack Block Kit ``image`` blocks for inline images + (e.g. GIF picker URLs), ready for ``chat.postMessage``. + * *direct_files* — files downloaded to ``/tmp`` for direct upload to + each target channel via ``files_upload_v2``. """ event = body.get("event", {}) files = (helpers.safe_get(event, "files") or helpers.safe_get(event, "message", "files") or [])[:20] @@ -69,31 +66,13 @@ def _build_file_context(body: dict, client: WebClient, logger: Logger) -> tuple[ images = [f for f in files if f.get("mimetype", "").startswith("image")] videos = [f for f in files if f.get("mimetype", "").startswith("video")] - s3_photo_list: list[dict] = [] photo_blocks: list[dict] = [] direct_files: list[dict] = [] is_edit = event_subtype in ("message_changed", "message_deleted") - if constants.S3_ENABLED: - if is_edit: - photo_names = [ - f"{p['id']}.png" if p.get("filetype") == "heic" else f"{p['id']}.{p.get('filetype', 'png')}" - for p in images - ] - s3_photo_list = [{"url": f"{constants.S3_IMAGE_URL}{name}", "name": name} for name in photo_names] - else: - s3_photo_list = helpers.upload_photos(files=images, client=client, logger=logger) - - photo_blocks = [orm.ImageBlock(image_url=p["url"], alt_text=p["name"]).as_form_field() for p in s3_photo_list] - - if constants.S3_VIDEO_ENABLED and not is_edit: - s3_photo_list.extend(helpers.upload_photos(files=videos, client=client, logger=logger)) - elif not is_edit: - direct_files.extend(helpers.download_slack_files(videos, client, logger)) - else: - if not is_edit: - direct_files = helpers.download_slack_files(images + videos, client, logger) + if not is_edit: + direct_files = helpers.download_slack_files(images + videos, client, logger) # Handle GIFs/images from attachments (e.g. GIPHY bot, Slack GIF picker, # unfurled URLs) when no file attachments are present. We always use @@ -128,7 +107,7 @@ def _build_file_context(body: dict, client: WebClient, logger: Logger) -> tuple[ name = att.get("fallback") or "attachment.gif" photo_blocks.append(orm.ImageBlock(image_url=img_url, alt_text=name).as_form_field()) - return s3_photo_list, photo_blocks, direct_files + return [], photo_blocks, direct_files def _get_workspace_name(records: list, channel_id: str, workspace_index: int) -> str | None: @@ -170,7 +149,7 @@ def _handle_new_post( try: client.chat_postMessage( channel=channel_id, - text=":wave: Hello! I'm SyncBot. I was added to this channel, but this channel " + text=":wave: Hello! I'm SyncBot. I was added to this Channel, but this Channel " "doesn't seem to be part of a Sync. I'm leaving now. Please open the SyncBot Home " "tab to configure me.", ) diff --git a/syncbot/handlers/sync.py b/syncbot/handlers/sync.py index 94af8d0..1aad2b5 100644 --- a/syncbot/handlers/sync.py +++ b/syncbot/handlers/sync.py @@ -119,8 +119,11 @@ def handle_refresh_home( cooldown_sec = getattr(constants, "REFRESH_COOLDOWN_SECONDS", 60) if action == "cooldown" and cached_blocks is not None and remaining is not None: + refresh_idx = helpers.index_of_block_with_action( + cached_blocks, actions.CONFIG_REFRESH_HOME + ) blocks_with_message = helpers.inject_cooldown_message( - cached_blocks, builders._REFRESH_BUTTON_BLOCK_INDEX, remaining + cached_blocks, refresh_idx, remaining ) client.views_publish(user_id=user_id, view={"type": "home", "blocks": blocks_with_message}) return @@ -229,10 +232,10 @@ def handle_join_sync_submission( first_ws = helpers.get_workspace_by_id(first_channel.workspace_id) channel_ref = helpers.resolve_channel_name(first_channel.channel_id, first_ws) else: - channel_ref = sync_record.title or "the other channel" + channel_ref = sync_record.title or "the other Channel" client.chat_postMessage( channel=channel_id, - text=f":arrows_counterclockwise: *{admin_name}* started syncing this channel with *{channel_ref}*. Messages will be shared automatically.", + text=f":arrows_counterclockwise: *{admin_name}* started syncing this Channel with *{channel_ref}*. Messages will be shared automatically.", ) local_ref = helpers.resolve_channel_name(channel_id, workspace_record) @@ -243,7 +246,7 @@ def handle_join_sync_submission( member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) member_client.chat_postMessage( channel=sync_channel.channel_id, - text=f":arrows_counterclockwise: *{admin_label}* started syncing *{local_ref}* with this channel. Messages will be shared automatically.", + text=f":arrows_counterclockwise: *{admin_label}* started syncing *{local_ref}* with this Channel. Messages will be shared automatically.", ) except Exception as exc: _logger.debug(f"join_sync: failed to notify channel {sync_channel.channel_id}: {exc}") @@ -316,7 +319,7 @@ def handle_new_sync_submission( DbManager.create_record(channel_sync_record) client.chat_postMessage( channel=channel_id, - text=f":outbox_tray: *{admin_name}* published this channel for syncing. Other Workspaces can now subscribe.", + text=f":outbox_tray: *{admin_name}* published this Channel for Syncing. Other Workspaces can now subscribe.", ) except Exception as e: logger.error(f"Failed to create sync for channel {channel_id}: {e}") @@ -356,7 +359,7 @@ def handle_member_joined_channel( try: client.chat_postMessage( channel=channel_id, - text=":wave: Hello! I'm SyncBot. I was added to this channel, but this channel " + text=":wave: Hello! I'm SyncBot. I was added to this Channel, but this Channel " "doesn't seem to be part of a Sync. I'm leaving now. Please open the SyncBot Home " "tab to configure me.", ) @@ -386,7 +389,7 @@ def check_join_sync_channel( blocks.append( orm.SectionBlock( action=constants.WARNING_BLOCK, - label=":warning: :warning: This channel is already part of a Sync! Please choose another channel.", + label=":warning: :warning: This Channel is already part of a Sync! Please choose another Channel.", ).as_form_field() ) helpers.update_modal( @@ -418,8 +421,9 @@ def handle_db_reset( logger: Logger, context: dict, ) -> None: - """Open a confirmation modal warning the user before a full DB reset.""" - if not constants.ENABLE_DB_RESET: + """Open a confirmation modal warning the user before a full DB reset. Only for the workspace whose team_id matches ENABLE_DB_RESET.""" + team_id = helpers.safe_get(body, "team", "id") or helpers.safe_get(body, "view", "team_id") + if not helpers.is_db_reset_visible_for_workspace(team_id): return user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) @@ -434,7 +438,7 @@ def handle_db_reset( trigger_id=trigger_id, view={ "type": "modal", - "title": {"type": "plain_text", "text": "Reset Database?"}, + "title": {"type": "plain_text", "text": "Yikes! Reset Database?"}, "close": {"type": "plain_text", "text": "Cancel"}, "blocks": [ { @@ -442,10 +446,11 @@ def handle_db_reset( "text": { "type": "mrkdwn", "text": ( - ":rotating_light: *This will permanently delete ALL data* :rotating_light:\n\n" - "Every workspace, group, channel sync, user mapping, and federation connection " - "in this database will be erased and the schema will be reinitialized from `init.sql`.\n\n" - "*This action cannot be undone.*" + ":rotating_light: *This Will Permanently Delete ALL Data!* :rotating_light:\n\n" + "Every Slack Install, Workspace Group, Channel Sync, and User Mapping, " + "in this database will be erased and the schema will be reinitialized.\n\n" + "*NOTE:* _All Slack Workspaces will need to reinstall the SyncBot app to get started again._\n\n" + "*This action cannot be undone! MAKE A BACKUP FIRST!*" ), }, }, @@ -454,7 +459,7 @@ def handle_db_reset( "elements": [ { "type": "button", - "text": {"type": "plain_text", "text": "Yes, Reset Everything!"}, + "text": {"type": "plain_text", "text": "Confirm, Erase Everything!"}, "style": "danger", "action_id": actions.CONFIG_DB_RESET_PROCEED, }, @@ -471,14 +476,40 @@ def handle_db_reset_proceed( logger: Logger, context: dict, ) -> None: - """Execute the database reset after user confirmed via modal.""" - if not constants.ENABLE_DB_RESET: + """Execute the database reset after user confirmed via modal. Only for the workspace whose team_id matches ENABLE_DB_RESET.""" + team_id = helpers.safe_get(body, "team", "id") or helpers.safe_get(body, "view", "team_id") + if not helpers.is_db_reset_visible_for_workspace(team_id): return user_id = helpers.get_user_id_from_body(body) if not user_id or not helpers.is_user_authorized(client, user_id): return + # Update the modal to a "done" state so the user can close it (Slack only allows + # closing modals via view_submission, not block_actions, so we replace the view). + view_id = helpers.safe_get(body, "view", "id") + if view_id: + try: + client.views_update( + view_id=view_id, + view={ + "type": "modal", + "title": {"type": "plain_text", "text": "Reset Complete"}, + "close": {"type": "plain_text", "text": "Close"}, + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":skull_and_crossbones: You can close this now.", + }, + }, + ], + }, + ) + except Exception as e: + _logger.warning("Failed to update modal after DB reset: %s", e) + _logger.critical( "DB_RESET triggered by user %s — dropping database and reinitializing from init.sql", user_id, @@ -490,11 +521,6 @@ def handle_db_reset_proceed( helpers.clear_all_caches() - team_id = ( - helpers.safe_get(body, "view", "team_id") - or helpers.safe_get(body, "team", "id") - or helpers.safe_get(body, "team_id") - ) if team_id and user_id: try: client.views_publish( @@ -506,7 +532,7 @@ def handle_db_reset_proceed( "type": "section", "text": { "type": "mrkdwn", - "text": ":white_check_mark: *Database has been reset.*\nPlease reinstall the app or re-open this tab to get started fresh.", + "text": "*Database Has Been Reset!*\nPlease reinstall SyncBot in your Workspace.", }, } ], diff --git a/syncbot/handlers/tokens.py b/syncbot/handlers/tokens.py index 397b67d..2b29a2b 100644 --- a/syncbot/handlers/tokens.py +++ b/syncbot/handlers/tokens.py @@ -100,7 +100,7 @@ def handle_tokens_revoked( member_client, f":double_vertical_bar: *{ws_name}* has uninstalled SyncBot. " f"Syncing has been paused. If they reinstall within {retention_days} days, " - "syncing will resume automatically.", + "Syncing will resume automatically.", ) member_channel_ids = [] diff --git a/syncbot/handlers/users.py b/syncbot/handlers/users.py index 51c56d0..c8847f0 100644 --- a/syncbot/handlers/users.py +++ b/syncbot/handlers/users.py @@ -1,5 +1,6 @@ """User event handlers — team join, profile changes, user mapping management.""" +import contextlib import logging import time from datetime import UTC, datetime @@ -194,10 +195,8 @@ def handle_user_mapping_refresh( helpers.run_auto_match_for_workspace(client, workspace_record.id) for member_client, member_ws_id in member_clients: - try: + with contextlib.suppress(Exception): helpers.run_auto_match_for_workspace(member_client, member_ws_id) - except Exception: - pass block_dicts = builders.build_user_mapping_screen( client, @@ -288,4 +287,4 @@ def handle_user_mapping_edit_submit( group_id=group_id or None, context=context, ) - builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) \ No newline at end of file + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) diff --git a/syncbot/helpers/__init__.py b/syncbot/helpers/__init__.py index 4d3d9cb..f4e784e 100644 --- a/syncbot/helpers/__init__.py +++ b/syncbot/helpers/__init__.py @@ -18,6 +18,7 @@ format_admin_label, get_request_type, get_user_id_from_body, + is_db_reset_visible_for_workspace, is_user_authorized, safe_get, ) @@ -27,7 +28,6 @@ download_public_file, download_slack_files, upload_files_to_slack, - upload_photos, ) from helpers.notifications import ( get_admin_ids, @@ -40,6 +40,7 @@ from helpers.oauth import get_oauth_flow from helpers.refresh import ( cooldown_message_block, + index_of_block_with_action, inject_cooldown_message, refresh_after_full, refresh_cooldown_check, @@ -125,7 +126,9 @@ "get_user_info", "get_workspace_by_id", "get_workspace_record", + "index_of_block_with_action", "inject_cooldown_message", + "is_db_reset_visible_for_workspace", "is_user_authorized", "notify_admins_dm", "notify_admins_dm_blocks", @@ -146,5 +149,4 @@ "slack_retry", "update_modal", "upload_files_to_slack", - "upload_photos", ] diff --git a/syncbot/helpers/core.py b/syncbot/helpers/core.py index f99e548..78e88ab 100644 --- a/syncbot/helpers/core.py +++ b/syncbot/helpers/core.py @@ -55,16 +55,37 @@ def is_user_authorized(client, user_id: str) -> bool: return bool(user.get("is_admin") or user.get("is_owner")) +def is_db_reset_visible_for_workspace(team_id: str | None) -> bool: + """Return True if the DB reset button/action is allowed for this workspace. + + When ENABLE_DB_RESET is set to a Slack team ID, only that workspace may see + and use the Reset Database button; other workspaces cannot. + Reads ENABLE_DB_RESET from os.environ at call time so it is correct even + if .env was loaded after constants was first imported. + """ + enabled = (os.environ.get("ENABLE_DB_RESET") or "").strip() + if not enabled: + _logger.debug("DB reset button hidden: ENABLE_DB_RESET not set") + return False + visible = (team_id or "") == enabled + if not visible: + _logger.debug( + "DB reset button hidden: team_id %r does not match ENABLE_DB_RESET", + team_id, + ) + return visible + + def format_admin_label(client, user_id: str, workspace) -> tuple[str, str]: """Return ``(display_name, full_label)`` for an admin.""" from .slack_api import get_user_info from .workspace import resolve_workspace_name display_name, _ = get_user_info(client, user_id) - display_name = display_name or "An admin" + display_name = display_name or "An Admin from another Workspace" ws_name = resolve_workspace_name(workspace) if workspace else None if ws_name: - return display_name, f"{display_name} ({ws_name})" + return display_name, f"{display_name} from {ws_name}" return display_name, display_name diff --git a/syncbot/helpers/export_import.py b/syncbot/helpers/export_import.py index 9d1f611..6d65599 100644 --- a/syncbot/helpers/export_import.py +++ b/syncbot/helpers/export_import.py @@ -14,12 +14,54 @@ from typing import Any import constants -from db import DbManager, schemas +from sqlalchemy import text + +from db import DbManager, get_engine, schemas _logger = logging.getLogger(__name__) BACKUP_VERSION = 1 MIGRATION_VERSION = 1 +_RAW_BACKUP_TABLES = ("slack_bots", "slack_installations", "slack_oauth_states") + + +def _dump_raw_table(table_name: str) -> list[dict]: + """Return all rows from a non-ORM table as dictionaries.""" + engine = get_engine() + with engine.connect() as conn: + rows = conn.execute(text(f"SELECT * FROM `{table_name}`")).mappings().all() + return [dict(row) for row in rows] + + +def _restore_raw_table(table_name: str, rows: list[dict]) -> None: + """Replace table contents for a non-ORM table from backup rows.""" + engine = get_engine() + with engine.begin() as conn: + conn.execute(text(f"DELETE FROM `{table_name}`")) + for row in rows: + if not row: + continue + parsed: dict[str, Any] = {} + for key, value in row.items(): + if isinstance(value, str) and key in { + "bot_token_expires_at", + "user_token_expires_at", + "installed_at", + "expire_at", + }: + try: + parsed[key] = datetime.fromisoformat(value.replace("Z", "+00:00")) + except ValueError: + parsed[key] = value + else: + parsed[key] = value + + cols = ", ".join(f"`{k}`" for k in parsed.keys()) + placeholders = ", ".join(f":{k}" for k in parsed.keys()) + conn.execute( + text(f"INSERT INTO `{table_name}` ({cols}) VALUES ({placeholders})"), + parsed, + ) def _json_serializer(obj: Any) -> Any: @@ -100,6 +142,8 @@ def build_full_backup() -> dict: for table_name, cls in tables: records = DbManager.find_records(cls, []) payload[table_name] = _records_to_list(records, cls) + for table_name in _RAW_BACKUP_TABLES: + payload[table_name] = _dump_raw_table(table_name) payload["hmac"] = _compute_backup_hmac({k: v for k, v in payload.items() if k != "hmac"}) return payload @@ -139,6 +183,9 @@ def restore_full_backup( """ team_ids: list[str] = [] tables = [ + "slack_bots", + "slack_installations", + "slack_oauth_states", "workspaces", "workspace_groups", "workspace_group_members", @@ -165,6 +212,9 @@ def restore_full_backup( datetime_keys = {"created_at", "updated_at", "deleted_at", "joined_at", "matched_at"} for table_name in tables: rows = data.get(table_name, []) + if table_name in _RAW_BACKUP_TABLES: + _restore_raw_table(table_name, rows) + continue cls = table_to_schema[table_name] for row in rows: kwargs = {} diff --git a/syncbot/helpers/files.py b/syncbot/helpers/files.py index 543b520..87c8931 100644 --- a/syncbot/helpers/files.py +++ b/syncbot/helpers/files.py @@ -11,8 +11,6 @@ import requests from slack_sdk import WebClient -import constants - _logger = logging.getLogger(__name__) _DOWNLOAD_TIMEOUT = 30 # seconds @@ -62,79 +60,6 @@ def _download_to_file(url: str, file_path: str, headers: dict | None = None) -> raise -def _get_s3_client(): - """Return a reusable boto3 S3 client (created once per call-site).""" - import boto3 - - if constants.LOCAL_DEVELOPMENT: - return boto3.client( - "s3", - aws_access_key_id=os.environ[constants.AWS_ACCESS_KEY_ID], - aws_secret_access_key=os.environ[constants.AWS_SECRET_ACCESS_KEY], - ) - return boto3.client("s3") - - -def upload_photos(files: list[dict], client: WebClient, logger: Logger) -> list[dict]: - """Download file attachments from Slack and upload them to S3. - - Images are optionally converted from HEIC to PNG. - """ - uploaded: list[dict] = [] - s3_client = _get_s3_client() - auth_headers = {"Authorization": f"Bearer {client.token}"} - - for f in files: - try: - is_image = f.get("mimetype", "").startswith("image") - - if is_image: - download_url = ( - f.get("thumb_480") or f.get("thumb_360") - or f.get("thumb_80") or f.get("url_private") - ) - else: - download_url = f.get("url_private") - if not download_url: - continue - - safe_id, safe_ext, file_name = _safe_file_parts(f) - file_path = f"/tmp/{file_name}" - file_mimetype = f.get("mimetype", "application/octet-stream") - - _download_to_file(download_url, file_path, headers=auth_headers) - - if is_image and f.get("filetype") == "heic": - from PIL import Image - from pillow_heif import register_heif_opener - - register_heif_opener() - heic_img = Image.open(file_path) - x, y = heic_img.size - coeff = min(constants.MAX_HEIF_SIZE / max(x, y), 1) - heic_img = heic_img.resize((int(x * coeff), int(y * coeff))) - heic_img.save(file_path.replace(".heic", ".png"), quality=95, optimize=True, format="PNG") - os.remove(file_path) - file_path = file_path.replace(".heic", ".png") - file_name = file_name.replace(".heic", ".png") - file_mimetype = "image/png" - - with open(file_path, "rb") as fh: - s3_client.upload_fileobj( - fh, constants.S3_IMAGE_BUCKET, file_name, ExtraArgs={"ContentType": file_mimetype} - ) - uploaded.append( - { - "url": f"{constants.S3_IMAGE_URL}{file_name}", - "name": file_name, - "path": file_path, - } - ) - except Exception as e: - logger.error(f"Error uploading file: {e}") - return uploaded - - def download_public_file(url: str, logger: Logger) -> dict | None: """Download a file from a public URL (e.g. GIPHY) to /tmp.""" try: diff --git a/syncbot/helpers/oauth.py b/syncbot/helpers/oauth.py index cbdbde4..b0270d2 100644 --- a/syncbot/helpers/oauth.py +++ b/syncbot/helpers/oauth.py @@ -3,60 +3,51 @@ import logging import os -from slack_bolt.adapter.aws_lambda.lambda_s3_oauth_flow import LambdaS3OAuthFlow from slack_bolt.oauth import OAuthFlow from slack_bolt.oauth.oauth_settings import OAuthSettings -from slack_sdk.oauth.installation_store import FileInstallationStore -from slack_sdk.oauth.state_store import FileOAuthStateStore +from slack_sdk.oauth.installation_store.sqlalchemy import SQLAlchemyInstallationStore +from slack_sdk.oauth.state_store.sqlalchemy import SQLAlchemyOAuthStateStore import constants _logger = logging.getLogger(__name__) +_OAUTH_STATE_EXPIRATION_SECONDS = 600 + def get_oauth_flow(): - """Build the Slack OAuth flow, choosing the right backend. + """Build the Slack OAuth flow using MySQL-backed stores. - - **Production (Lambda)**: Uses S3-backed stores. - - **Local development with OAuth credentials**: Uses file-based stores. - - **Local development without OAuth credentials**: Returns *None*. + Uses the same RDS/MySQL connection as the rest of the app. Works for both + local development and production (Lambda). If OAuth credentials are not + set and LOCAL_DEVELOPMENT is true, returns None (single-workspace mode). """ client_id = os.environ.get(constants.SLACK_CLIENT_ID, "").strip() client_secret = os.environ.get(constants.SLACK_CLIENT_SECRET, "").strip() scopes_raw = os.environ.get(constants.SLACK_SCOPES, "").strip() - if constants.LOCAL_DEVELOPMENT: - if not (client_id and client_secret and scopes_raw): - _logger.info("OAuth credentials not set — running in single-workspace mode") - return None - - _logger.info("OAuth credentials found — enabling local OAuth flow (file-based stores)") - base_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), ".oauth-data") - os.makedirs(base_dir, exist_ok=True) - - return OAuthFlow( - settings=OAuthSettings( - client_id=client_id, - client_secret=client_secret, - scopes=scopes_raw.split(","), - installation_store=FileInstallationStore( - base_dir=os.path.join(base_dir, "installations"), - client_id=client_id, - ), - state_store=FileOAuthStateStore( - expiration_seconds=600, - base_dir=os.path.join(base_dir, "states"), - client_id=client_id, - ), - ), - ) - else: - return LambdaS3OAuthFlow( - oauth_state_bucket_name=os.environ[constants.SLACK_STATE_S3_BUCKET_NAME], - installation_bucket_name=os.environ[constants.SLACK_INSTALLATION_S3_BUCKET_NAME], - settings=OAuthSettings( - client_id=client_id, - client_secret=client_secret, - scopes=scopes_raw.split(","), - ), - ) + if constants.LOCAL_DEVELOPMENT and not (client_id and client_secret and scopes_raw): + _logger.info("OAuth credentials not set — running in single-workspace mode") + return None + + from db import get_engine + + engine = get_engine() + installation_store = SQLAlchemyInstallationStore( + client_id=client_id, + engine=engine, + ) + state_store = SQLAlchemyOAuthStateStore( + expiration_seconds=_OAUTH_STATE_EXPIRATION_SECONDS, + engine=engine, + ) + + return OAuthFlow( + settings=OAuthSettings( + client_id=client_id, + client_secret=client_secret, + scopes=scopes_raw.split(","), + installation_store=installation_store, + state_store=state_store, + ), + ) diff --git a/syncbot/helpers/refresh.py b/syncbot/helpers/refresh.py index 9af7c8b..f500c69 100644 --- a/syncbot/helpers/refresh.py +++ b/syncbot/helpers/refresh.py @@ -25,6 +25,20 @@ def cooldown_message_block(remaining_seconds: int) -> dict: } +def index_of_block_with_action(block_dicts: list, action_id: str) -> int: + """Return the index of the first block that contains an element with the given action_id. + + Used to find the Refresh button block so the cooldown message can be inserted after it. + Returns len(block_dicts) - 1 if not found (inject at end). + """ + for i, block in enumerate(block_dicts): + if block.get("type") == "actions": + for elt in block.get("elements") or []: + if elt.get("action_id") == action_id: + return i + return max(0, len(block_dicts) - 1) + + def inject_cooldown_message( cached_blocks: list, after_block_index: int, diff --git a/syncbot/slack/actions.py b/syncbot/slack/actions.py index 9dd9457..6d45b84 100644 --- a/syncbot/slack/actions.py +++ b/syncbot/slack/actions.py @@ -102,7 +102,7 @@ # --------------------------------------------------------------------------- CONFIG_PUBLISH_CHANNEL = "publish_channel" -"""Action: user clicked "Publish Channel" button (value carries group_id).""" +"""Action: user clicked "Sync Channel" button (value carries group_id).""" CONFIG_PUBLISH_CHANNEL_SELECT = "publish_channel_select" """Input: channel picker in the publish channel modal.""" diff --git a/syncbot/slack/forms.py b/syncbot/slack/forms.py index 3f6d73b..67f20c0 100644 --- a/syncbot/slack/forms.py +++ b/syncbot/slack/forms.py @@ -18,13 +18,13 @@ orm.InputBlock( label="Channel to Sync", action=actions.CONFIG_NEW_SYNC_CHANNEL_SELECT, - element=orm.ConversationsSelectElement(placeholder="Select a channel"), + element=orm.ConversationsSelectElement(placeholder="Select a Channel"), optional=False, ), orm.ContextBlock( element=orm.ContextElement( - initial_value="Select the channel you want to sync. The sync will be named after the channel. " - "If a sync has already been set up in another workspace, use 'Join existing Sync' instead.", + initial_value="Select the Channel you want to sync. The Sync will be named after the Channel. " + "If a Sync has already been set up in another Workspace, use 'Join existing Sync' instead.", ), ), ] @@ -41,7 +41,7 @@ orm.InputBlock( label="Sync Channel Select", action=actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT, - element=orm.ConversationsSelectElement(placeholder="Select a channel to use for this Sync"), + element=orm.ConversationsSelectElement(placeholder="Select a Channel to use for this Sync"), optional=False, dispatch_action=True, ), @@ -59,7 +59,7 @@ ), orm.ContextBlock( element=orm.ContextElement( - initial_value="Enter the invite code shared by an admin from another workspace in the group.", + initial_value="Enter the invite code shared by an Admin from another Workspace in the Group.", ), ), ] @@ -71,12 +71,12 @@ orm.InputBlock( label="Channel to Publish", action=actions.CONFIG_PUBLISH_CHANNEL_SELECT, - element=orm.ConversationsSelectElement(placeholder="Select a channel to publish"), + element=orm.ConversationsSelectElement(placeholder="Select a Channel to publish"), optional=False, ), orm.ContextBlock( element=orm.ContextElement( - initial_value="Select a channel from your workspace to make available for syncing.", + initial_value="Select a Channel from your Workspace to make available for Syncing.", ), ), ] @@ -88,12 +88,12 @@ orm.InputBlock( label="Channel for Sync", action=actions.CONFIG_SUBSCRIBE_CHANNEL_SELECT, - element=orm.ConversationsSelectElement(placeholder="Select a channel to sync into"), + element=orm.ConversationsSelectElement(placeholder="Select a Channel to sync into"), optional=False, ), orm.ContextBlock( element=orm.ContextElement( - initial_value="Select a channel in your workspace to receive messages from the published channel.", + initial_value="Select a Channel in your Workspace to receive messages from the published Channel.", ), ), ] diff --git a/template.yaml b/template.yaml index ef76c2b..fe61152 100644 --- a/template.yaml +++ b/template.yaml @@ -2,9 +2,9 @@ AWSTemplateFormatVersion: "2010-09-09" Transform: AWS::Serverless-2016-10-31 Description: > SyncBot - Slack app that syncs posts and replies across workspaces. - Free-tier compatible: Lambda, API Gateway, RDS db.t3.micro, S3. - All infrastructure resources (RDS, VPC, S3 buckets) can optionally - point at existing instances so multiple apps can share one account. + Free-tier compatible: Lambda, API Gateway, RDS db.t3.micro. + OAuth and app data use RDS (MySQL); media is uploaded directly to Slack. + SAM deploy uses an S3 artifact bucket for packaging only (not runtime). Globals: Function: @@ -101,30 +101,6 @@ Parameters: Type: String Default: "10.0.0.0/16" - # --- S3 Buckets --- - - ExistingSlackStateBucket: - Description: > - Name of an existing S3 bucket for Slack OAuth state. - Leave EMPTY to create a new bucket. - Type: String - Default: "" - - ExistingInstallationBucket: - Description: > - Name of an existing S3 bucket for Slack installation data. - Leave EMPTY to create a new bucket. - Type: String - Default: "" - - ExistingImagesBucket: - Description: > - Name of an existing S3 bucket for synced images. - Leave EMPTY to create a new bucket. The bucket must allow - public reads if you want images to render in Slack. - Type: String - Default: "" - # --- Security --- PasswordEncryptKey: @@ -149,9 +125,6 @@ Parameters: Conditions: CreateDatabase: !Equals [!Ref ExistingDatabaseHost, ""] - CreateSlackStateBucket: !Equals [!Ref ExistingSlackStateBucket, ""] - CreateInstallationBucket: !Equals [!Ref ExistingInstallationBucket, ""] - CreateImagesBucket: !Equals [!Ref ExistingImagesBucket, ""] Mappings: StagesMap: @@ -336,83 +309,6 @@ Resources: - Key: Name Value: !Sub "syncbot-${Stage}-db" - # ============================================================ - # S3 Buckets (free-tier: 5 GB total, 20 000 GET, 2 000 PUT) - # - # Each bucket is skipped when an existing bucket name is provided. - # ============================================================ - - SlackStateBucket: - Type: AWS::S3::Bucket - Condition: CreateSlackStateBucket - Properties: - BucketName: !Sub "syncbot-${Stage}-slack-state-${AWS::AccountId}" - BucketEncryption: - ServerSideEncryptionConfiguration: - - ServerSideEncryptionByDefault: - SSEAlgorithm: AES256 - PublicAccessBlockConfiguration: - BlockPublicAcls: true - BlockPublicPolicy: true - IgnorePublicAcls: true - RestrictPublicBuckets: true - LifecycleConfiguration: - Rules: - - Id: ExpireOAuthState - Status: Enabled - ExpirationInDays: 1 - - SlackInstallationBucket: - Type: AWS::S3::Bucket - Condition: CreateInstallationBucket - Properties: - BucketName: !Sub "syncbot-${Stage}-slack-installations-${AWS::AccountId}" - BucketEncryption: - ServerSideEncryptionConfiguration: - - ServerSideEncryptionByDefault: - SSEAlgorithm: AES256 - PublicAccessBlockConfiguration: - BlockPublicAcls: true - BlockPublicPolicy: true - IgnorePublicAcls: true - RestrictPublicBuckets: true - VersioningConfiguration: - Status: Enabled - - SyncBotImagesBucket: - Type: AWS::S3::Bucket - Condition: CreateImagesBucket - Properties: - BucketName: !Sub "syncbot-${Stage}-images-${AWS::AccountId}" - BucketEncryption: - ServerSideEncryptionConfiguration: - - ServerSideEncryptionByDefault: - SSEAlgorithm: AES256 - PublicAccessBlockConfiguration: - BlockPublicAcls: false - BlockPublicPolicy: false - IgnorePublicAcls: false - RestrictPublicBuckets: false - LifecycleConfiguration: - Rules: - - Id: ExpireOldImages - Status: Enabled - ExpirationInDays: 90 - - SyncBotImagesBucketPolicy: - Type: AWS::S3::BucketPolicy - Condition: CreateImagesBucket - Properties: - Bucket: !Ref SyncBotImagesBucket - PolicyDocument: - Version: "2012-10-17" - Statement: - - Sid: PublicReadGetObject - Effect: Allow - Principal: "*" - Action: "s3:GetObject" - Resource: !Sub "${SyncBotImagesBucket.Arn}/*" - # ============================================================ # Lambda Function (free-tier: 1M requests, 400 000 GB-s) # @@ -434,22 +330,6 @@ Resources: - x86_64 Timeout: 30 MemorySize: 128 - Policies: - - S3CrudPolicy: - BucketName: !If - - CreateSlackStateBucket - - !Ref SlackStateBucket - - !Ref ExistingSlackStateBucket - - S3CrudPolicy: - BucketName: !If - - CreateInstallationBucket - - !Ref SlackInstallationBucket - - !Ref ExistingInstallationBucket - - S3CrudPolicy: - BucketName: !If - - CreateImagesBucket - - !Ref SyncBotImagesBucket - - !Ref ExistingImagesBucket Events: SyncBot: Type: Api @@ -484,14 +364,6 @@ Resources: - StagesMap - !Ref Stage - SlackClientID - ENV_SLACK_STATE_S3_BUCKET_NAME: !If - - CreateSlackStateBucket - - !Ref SlackStateBucket - - !Ref ExistingSlackStateBucket - ENV_SLACK_INSTALLATION_S3_BUCKET_NAME: !If - - CreateInstallationBucket - - !Ref SlackInstallationBucket - - !Ref ExistingInstallationBucket DATABASE_HOST: !If - CreateDatabase - !GetAtt RDSInstance.Endpoint.Address @@ -501,14 +373,6 @@ Resources: ADMIN_DATABASE_SCHEMA: !Ref DatabaseSchema PASSWORD_ENCRYPT_KEY: !Ref PasswordEncryptKey REQUIRE_ADMIN: !Ref RequireAdmin - S3_IMAGE_BUCKET: !If - - CreateImagesBucket - - !Ref SyncBotImagesBucket - - !Ref ExistingImagesBucket - S3_IMAGE_URL: !If - - CreateImagesBucket - - !Sub "https://${SyncBotImagesBucket}.s3.amazonaws.com/" - - !Sub "https://${ExistingImagesBucket}.s3.amazonaws.com/" # ============================================================ # CloudWatch Alarms (free-tier: 10 alarms) @@ -632,17 +496,3 @@ Outputs: Condition: CreateDatabase Description: VPC ID (only when VPC is created by this stack) Value: !Ref VPC - - ImagesBucketName: - Description: S3 bucket name for synced images - Value: !If - - CreateImagesBucket - - !Ref SyncBotImagesBucket - - !Ref ExistingImagesBucket - - ImagesBucketUrl: - Description: Public URL for synced images - Value: !If - - CreateImagesBucket - - !Sub "https://${SyncBotImagesBucket}.s3.amazonaws.com/" - - !Sub "https://${ExistingImagesBucket}.s3.amazonaws.com/" diff --git a/tests/test_oauth.py b/tests/test_oauth.py new file mode 100644 index 0000000..c322fd5 --- /dev/null +++ b/tests/test_oauth.py @@ -0,0 +1,77 @@ +"""Unit tests for OAuth flow construction.""" + +import os +from unittest.mock import patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("ADMIN_DATABASE_USER", "root") +os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") +os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from helpers.oauth import get_oauth_flow + + +class TestGetOAuthFlow: + @patch("helpers.oauth.constants.LOCAL_DEVELOPMENT", True) + @patch.dict(os.environ, {}, clear=True) + def test_local_dev_without_oauth_credentials_returns_none(self): + assert get_oauth_flow() is None + + @patch("helpers.oauth.constants.LOCAL_DEVELOPMENT", True) + @patch.dict( + os.environ, + { + "ENV_SLACK_CLIENT_ID": "cid", + "ENV_SLACK_CLIENT_SECRET": "csecret", + "ENV_SLACK_SCOPES": "chat:write,channels:read", + }, + clear=True, + ) + @patch("db.get_engine") + @patch("helpers.oauth.SQLAlchemyOAuthStateStore") + @patch("helpers.oauth.SQLAlchemyInstallationStore") + def test_local_dev_with_credentials_uses_sql_stores( + self, + mock_installation_store_cls, + mock_state_store_cls, + mock_get_engine, + ): + engine = object() + mock_get_engine.return_value = engine + + flow = get_oauth_flow() + + assert flow is not None + mock_get_engine.assert_called_once_with() + mock_installation_store_cls.assert_called_once_with(client_id="cid", engine=engine) + mock_state_store_cls.assert_called_once_with(expiration_seconds=600, engine=engine) + + @patch("helpers.oauth.constants.LOCAL_DEVELOPMENT", False) + @patch.dict( + os.environ, + { + "ENV_SLACK_CLIENT_ID": "prod-cid", + "ENV_SLACK_CLIENT_SECRET": "prod-secret", + "ENV_SLACK_SCOPES": "chat:write,groups:read", + }, + clear=True, + ) + @patch("db.get_engine") + @patch("helpers.oauth.SQLAlchemyOAuthStateStore") + @patch("helpers.oauth.SQLAlchemyInstallationStore") + def test_production_uses_sql_stores_without_s3( + self, + mock_installation_store_cls, + mock_state_store_cls, + mock_get_engine, + ): + engine = object() + mock_get_engine.return_value = engine + + flow = get_oauth_flow() + + assert flow is not None + mock_get_engine.assert_called_once_with() + mock_installation_store_cls.assert_called_once_with(client_id="prod-cid", engine=engine) + mock_state_store_cls.assert_called_once_with(expiration_seconds=600, engine=engine) From 103b7d8b5c6daa7fd9a1e4b4b68cb54738a94cb3 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 13 Mar 2026 17:24:14 -0500 Subject: [PATCH 06/45] Small fix to groups. --- syncbot/handlers/groups.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/syncbot/handlers/groups.py b/syncbot/handlers/groups.py index 4a1cb58..5ee4984 100644 --- a/syncbot/handlers/groups.py +++ b/syncbot/handlers/groups.py @@ -374,9 +374,18 @@ def handle_invite_workspace( if not group: return + team_id = helpers.safe_get(body, "team", "id") or helpers.safe_get(body, "view", "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None + current_workspace_id = workspace_record.id if workspace_record else None + + # Only active members count as "already in the group"; pending invites can be re-invited current_members = DbManager.find_records( schemas.WorkspaceGroupMember, - [schemas.WorkspaceGroupMember.group_id == group_id], + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], ) member_ws_ids = {m.workspace_id for m in current_members if m.workspace_id} @@ -386,7 +395,9 @@ def handle_invite_workspace( ) eligible = [ws for ws in all_workspaces if ws.id not in member_ws_ids and ws.bot_token] - if not eligible and not constants.FEDERATION_ENABLED: + # Show Oops only when there are no other installed workspaces at all (not when everyone is already in the group) + other_installed = [ws for ws in all_workspaces if ws.bot_token and ws.id != current_workspace_id] + if not other_installed and not constants.FEDERATION_ENABLED: msg_blocks = [ section( "At least one other Slack Workspace needs to install this SyncBot app, or " From 9df7a80114764f0d91faf2093250d899011a3d66 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 13 Mar 2026 21:57:36 -0500 Subject: [PATCH 07/45] Code cleanup on aisle three. Added reusable helper and handler functions. Fixed typos and cleaned up vars/consts. Added more logging and comments. Updated tests. Moved docs to docs. --- README.md | 4 +- ARCHITECTURE.md => docs/ARCHITECTURE.md | 0 docs/BACKUP_AND_MIGRATION.md | 2 +- IMPROVEMENTS.md => docs/IMPROVEMENTS.md | 2 +- syncbot/builders/channel_sync.py | 2 + syncbot/builders/home.py | 9 +- syncbot/builders/sync.py | 2 +- syncbot/constants.py | 18 ++- syncbot/handlers/_common.py | 52 +++++++- syncbot/handlers/channel_sync.py | 158 ++++++++---------------- syncbot/handlers/export_import.py | 10 +- syncbot/handlers/groups.py | 135 +++++++++----------- syncbot/handlers/messages.py | 23 ++-- syncbot/handlers/sync.py | 4 +- syncbot/handlers/tokens.py | 12 +- syncbot/handlers/users.py | 69 ++++++----- syncbot/helpers/core.py | 2 +- syncbot/helpers/export_import.py | 6 +- syncbot/helpers/user_matching.py | 9 +- syncbot/helpers/workspace.py | 9 +- syncbot/routing.py | 2 +- syncbot/slack/actions.py | 10 +- syncbot/slack/orm.py | 15 ++- tests/test_channel_sync_handlers.py | 110 +++++++++++++++++ tests/test_db.py | 2 +- tests/test_export_import_handlers.py | 50 ++++++++ tests/test_groups_handlers.py | 48 +++++++ tests/test_handlers.py | 2 +- tests/test_helpers.py | 2 +- 29 files changed, 487 insertions(+), 282 deletions(-) rename ARCHITECTURE.md => docs/ARCHITECTURE.md (100%) rename IMPROVEMENTS.md => docs/IMPROVEMENTS.md (99%) create mode 100644 tests/test_channel_sync_handlers.py create mode 100644 tests/test_export_import_handlers.py create mode 100644 tests/test_groups_handlers.py diff --git a/README.md b/README.md index dfbd88a..a484be9 100644 --- a/README.md +++ b/README.md @@ -256,11 +256,11 @@ OAuth state and installation data are stored in the same RDS MySQL database. | Document | Description | |----------|-------------| | [User Guide](docs/USER_GUIDE.md) | End-user walkthrough of all features | -| [Architecture](ARCHITECTURE.md) | Message sync flow, AWS infrastructure, caching | +| [Architecture](docs/ARCHITECTURE.md) | Message sync flow, AWS infrastructure, caching | | [Backup & Migration](docs/BACKUP_AND_MIGRATION.md) | Full-instance backup/restore, workspace data migration | | [Deployment](docs/DEPLOYMENT.md) | Shared infrastructure, CI/CD via GitHub Actions | | [API Reference](docs/API_REFERENCE.md) | HTTP endpoints and subscribed Slack events | -| [Improvements](IMPROVEMENTS.md) | Completed and planned improvements | +| [Improvements](docs/IMPROVEMENTS.md) | Completed and planned improvements | ## Project Structure diff --git a/ARCHITECTURE.md b/docs/ARCHITECTURE.md similarity index 100% rename from ARCHITECTURE.md rename to docs/ARCHITECTURE.md diff --git a/docs/BACKUP_AND_MIGRATION.md b/docs/BACKUP_AND_MIGRATION.md index e5235f3..625725b 100644 --- a/docs/BACKUP_AND_MIGRATION.md +++ b/docs/BACKUP_AND_MIGRATION.md @@ -20,4 +20,4 @@ After import, Home tab and sync-list caches for that workspace are cleared. ### Instance A Behavior -When a workspace that used to be on Instance A connects to A from a new instance (B) via federation and sends its `team_id`, A soft-deletes the matching local workspace row so only the federated connection represents that workspace. See [ARCHITECTURE.md](../ARCHITECTURE.md) for details. +When a workspace that used to be on Instance A connects to A from a new instance (B) via federation and sends its `team_id`, A soft-deletes the matching local workspace row so only the federated connection represents that workspace. See [ARCHITECTURE.md](ARCHITECTURE.md) for details. diff --git a/IMPROVEMENTS.md b/docs/IMPROVEMENTS.md similarity index 99% rename from IMPROVEMENTS.md rename to docs/IMPROVEMENTS.md index e47a6f4..b42ff1f 100644 --- a/IMPROVEMENTS.md +++ b/docs/IMPROVEMENTS.md @@ -484,4 +484,4 @@ This document outlines the improvements made to the SyncBot application and addi - Duplicated code has been consolidated into shared helpers throughout handlers and federation modules - Home and User Mapping Refresh buttons use content hash, cached blocks, and a 60s cooldown to minimize RDS and Slack API usage when nothing has changed; request-scoped caching keeps builds lightweight, and cross-workspace refreshes use `context=None` to prevent cache contamination - Variable naming follows a consistent domain-model convention: `member_ws`/`member_client` for group members, `sync_channel` for ORM records, `slack_channel` for raw API dicts -- Pre-release schema management uses `db/init.sql` as the single source of truth (no separate migration scripts) \ No newline at end of file +- Pre-release schema management uses `db/init.sql` as the single source of truth (no separate migration scripts) diff --git a/syncbot/builders/channel_sync.py b/syncbot/builders/channel_sync.py index 303f44c..2a6db4b 100644 --- a/syncbot/builders/channel_sync.py +++ b/syncbot/builders/channel_sync.py @@ -11,6 +11,8 @@ from slack import actions, orm from slack.blocks import ( context as block_context, +) +from slack.blocks import ( section, ) diff --git a/syncbot/builders/home.py b/syncbot/builders/home.py index 6f2e85b..c6cf3fe 100644 --- a/syncbot/builders/home.py +++ b/syncbot/builders/home.py @@ -299,8 +299,13 @@ def _build_pending_invite_section( admin_name, _ = helpers.get_user_info(ws_client, invite.invited_by_slack_user_id) if admin_name: inviter_label = f"{admin_name} from {workspace_label}" - except Exception: - pass + except Exception as exc: + # Keep the workspace-level fallback label if we cannot resolve the + # inviter's display name from Slack. + _logger.debug( + "pending_invite_inviter_name_lookup_failed", + extra={"invite_id": invite.id, "workspace_id": invite.invited_by_workspace_id, "error": str(exc)}, + ) blocks.append(divider()) blocks.append(header(f"{group.name}")) diff --git a/syncbot/builders/sync.py b/syncbot/builders/sync.py index 1450ff3..8cc7616 100644 --- a/syncbot/builders/sync.py +++ b/syncbot/builders/sync.py @@ -68,7 +68,7 @@ def build_join_sync_form( join_sync_form.post_modal( client=client, trigger_id=trigger_id, - callback_id=actions.CONFIG_JOIN_SYNC_SUMBIT, + callback_id=actions.CONFIG_JOIN_SYNC_SUBMIT, title_text="Join Sync", new_or_add="new", ) diff --git a/syncbot/constants.py b/syncbot/constants.py index eb33b8b..e71a198 100644 --- a/syncbot/constants.py +++ b/syncbot/constants.py @@ -1,9 +1,11 @@ """Application constants and startup configuration validation. -This module defines the **names** of all environment variables the app -reads at runtime (the actual *values* come from ``os.environ``). It -also provides :func:`validate_config` which should be called once at -startup to fail fast on missing configuration. +This module defines: +1) environment-variable *name* constants, and +2) derived runtime flags computed from ``os.environ``. + +It also provides :func:`validate_config` to fail fast on missing +configuration at startup. """ import logging @@ -31,8 +33,12 @@ ADMIN_DATABASE_PASSWORD = "ADMIN_DATABASE_PASSWORD" ADMIN_DATABASE_SCHEMA = "ADMIN_DATABASE_SCHEMA" -# Make the "Reset Database" button appear on the Home tab in a specific Workspace. -ENABLE_DB_RESET = os.environ.get("ENABLE_DB_RESET", "").strip() +# Name of env var that scopes the Reset Database button to one workspace. +ENABLE_DB_RESET = "ENABLE_DB_RESET" + +# --------------------------------------------------------------------------- +# Derived runtime flags / computed values +# --------------------------------------------------------------------------- LOCAL_DEVELOPMENT = os.environ.get("LOCAL_DEVELOPMENT", "false").lower() == "true" diff --git a/syncbot/handlers/_common.py b/syncbot/handlers/_common.py index a40f423..bd43ce5 100644 --- a/syncbot/handlers/_common.py +++ b/syncbot/handlers/_common.py @@ -39,6 +39,16 @@ def _parse_private_metadata(body: dict) -> dict: return {} +def _extract_team_id(body: dict) -> str | None: + """Return a workspace/team ID from common Slack payload locations.""" + return ( + helpers.safe_get(body, "view", "team_id") + or helpers.safe_get(body, "team", "id") + or helpers.safe_get(body, "team_id") + or helpers.safe_get(body, "user", "team_id") + ) + + def _get_authorized_workspace( body: dict, client, context: dict, action_name: str ) -> tuple[str, schemas.Workspace] | None: @@ -52,11 +62,10 @@ def _get_authorized_workspace( _logger.warning("authorization_denied", extra={"user_id": user_id, "action": action_name}) return None - team_id = ( - helpers.safe_get(body, "view", "team_id") - or helpers.safe_get(body, "team", "id") - or helpers.safe_get(body, "team_id") - ) + team_id = _extract_team_id(body) + if not team_id: + _logger.warning("workspace_resolution_failed", extra={"user_id": user_id, "action": action_name}) + return None workspace_record = helpers.get_workspace_record(team_id, body, context, client) if not workspace_record: return None @@ -64,6 +73,39 @@ def _get_authorized_workspace( return user_id, workspace_record +def _iter_view_state_actions(body: dict): + """Yield ``(action_id, action_data)`` pairs from ``view.state.values``.""" + state_values = helpers.safe_get(body, "view", "state", "values") or {} + for block_data in state_values.values(): + yield from block_data.items() + + +def _get_selected_option_value(body: dict, action_id: str) -> str | None: + """Return ``selected_option.value`` for a view state action.""" + for aid, action_data in _iter_view_state_actions(body): + if aid == action_id: + return helpers.safe_get(action_data, "selected_option", "value") + return None + + +def _get_text_input_value(body: dict, action_id: str) -> str | None: + """Return plain-text ``value`` for a view state action.""" + for aid, action_data in _iter_view_state_actions(body): + if aid == action_id: + return action_data.get("value") + return None + + +def _get_selected_conversation_or_option(body: dict, action_id: str) -> str | None: + """Return selected conversation ID, falling back to selected option value.""" + for aid, action_data in _iter_view_state_actions(body): + if aid == action_id: + return action_data.get("selected_conversation") or helpers.safe_get( + action_data, "selected_option", "value" + ) + return None + + def _sanitize_text(value: str, max_length: int = 100) -> str: """Strip and truncate user-supplied text to prevent oversized DB writes.""" if not value: diff --git a/syncbot/handlers/channel_sync.py b/syncbot/handlers/channel_sync.py index 034bcc9..536f41c 100644 --- a/syncbot/handlers/channel_sync.py +++ b/syncbot/handlers/channel_sync.py @@ -11,7 +11,13 @@ import helpers from builders._common import _format_channel_ref, _get_group_members from db import DbManager, schemas -from handlers._common import _parse_private_metadata, _sanitize_text +from handlers._common import ( + _get_authorized_workspace, + _get_selected_conversation_or_option, + _get_selected_option_value, + _parse_private_metadata, + _sanitize_text, +) from slack import actions, orm from slack.blocks import context as block_context from slack.blocks import section @@ -95,10 +101,14 @@ def _build_publish_step2( if sync_mode == "direct" and other_members: ws_options: list[orm.SelectorOption] = [] - for m in other_members: - ws = helpers.get_workspace_by_id(m.workspace_id) - name = helpers.resolve_workspace_name(ws) if ws else f"Workspace {m.workspace_id}" - ws_options.append(orm.SelectorOption(name=name, value=str(m.workspace_id))) + for other_member in other_members: + other_workspace = helpers.get_workspace_by_id(other_member.workspace_id) + name = ( + helpers.resolve_workspace_name(other_workspace) + if other_workspace + else f"Workspace {other_member.workspace_id}" + ) + ws_options.append(orm.SelectorOption(name=name, value=str(other_member.workspace_id))) if ws_options: modal_blocks.append( @@ -123,10 +133,10 @@ def handle_publish_channel( context: dict, ) -> None: """Open the publish-channel flow — always starts with step 1 (sync mode selection).""" - user_id = helpers.get_user_id_from_body(body) - if not user_id or not helpers.is_user_authorized(client, user_id): - _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "publish_channel"}) + auth_result = _get_authorized_workspace(body, client, context, "publish_channel") + if not auth_result: return + _, workspace_record = auth_result trigger_id = helpers.safe_get(body, "trigger_id") raw_group_id = helpers.safe_get(body, "actions", 0, "value") @@ -166,7 +176,7 @@ def handle_publish_channel( callback_id=actions.CONFIG_PUBLISH_MODE_SUBMIT, title_text="Sync Channel", submit_button_text="Next", - parent_metadata={"group_id": group_id}, + parent_metadata={"group_id": group_id, "workspace_id": workspace_record.id}, new_or_add="new", ) @@ -178,35 +188,23 @@ def handle_publish_mode_submit( context: dict, ) -> None: """Handle step 1 submission: read the selected sync mode and show step 2.""" - user_id = helpers.get_user_id_from_body(body) - if not user_id or not helpers.is_user_authorized(client, user_id): + auth_result = _get_authorized_workspace(body, client, context, "publish_mode_submit") + if not auth_result: return + _, workspace_record = auth_result metadata = _parse_private_metadata(body) group_id = metadata.get("group_id") if not group_id: return - state_values = helpers.safe_get(body, "view", "state", "values") or {} - sync_mode = "group" - for _block_id, block_data in state_values.items(): - for action_id, action_data in block_data.items(): - if action_id == actions.CONFIG_PUBLISH_SYNC_MODE: - selected = helpers.safe_get(action_data, "selected_option", "value") - if selected: - sync_mode = selected - - team_id = helpers.safe_get(body, "view", "team_id") - workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None + sync_mode = _get_selected_option_value(body, actions.CONFIG_PUBLISH_SYNC_MODE) or "group" other_members = [] - if workspace_record: - group_members = _get_group_members(group_id) - other_members = [m for m in group_members if m.workspace_id != workspace_record.id and m.workspace_id] - - if not workspace_record: - _logger.warning("handle_publish_mode_submit: no workspace_record") - return + group_members = _get_group_members(group_id) + other_members = [ + member for member in group_members if member.workspace_id != workspace_record.id and member.workspace_id + ] step2 = _build_publish_step2(client, group_id, sync_mode, other_members, workspace_record.id) updated_view = step2.as_ack_update( callback_id=actions.CONFIG_PUBLISH_CHANNEL_SUBMIT, @@ -228,15 +226,10 @@ def handle_publish_channel_submit( context: dict, ) -> None: """Create a Sync + SyncChannel for the publisher's channel, scoped to a group.""" - user_id = helpers.get_user_id_from_body(body) - if not user_id or not helpers.is_user_authorized(client, user_id): - _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "publish_channel_submit"}) - return - - team_id = helpers.safe_get(body, "view", "team_id") - workspace_record = helpers.get_workspace_record(team_id, body, context, client) - if not workspace_record: + auth_result = _get_authorized_workspace(body, client, context, "publish_channel_submit") + if not auth_result: return + _, workspace_record = auth_result metadata = _parse_private_metadata(body) group_id = metadata.get("group_id") @@ -245,31 +238,19 @@ def handle_publish_channel_submit( _logger.warning("publish_channel_submit: missing group_id in metadata") return - state_values = helpers.safe_get(body, "view", "state", "values") or {} - sync_mode = metadata.get("sync_mode", "group") target_workspace_id = None - - for _block_id, block_data in state_values.items(): - for action_id, action_data in block_data.items(): - if action_id == actions.CONFIG_PUBLISH_DIRECT_TARGET: - selected_opt = helpers.safe_get(action_data, "selected_option", "value") - if selected_opt: - with contextlib.suppress(TypeError, ValueError): - target_workspace_id = int(selected_opt) + selected_target = _get_selected_option_value(body, actions.CONFIG_PUBLISH_DIRECT_TARGET) + if selected_target: + with contextlib.suppress(TypeError, ValueError): + target_workspace_id = int(selected_target) if sync_mode == "direct" and not target_workspace_id: sync_mode = "group" ack_fn = context.get("ack") - channel_id = None - for _block_id, block_data in state_values.items(): - for action_id, action_data in block_data.items(): - if action_id == actions.CONFIG_PUBLISH_CHANNEL_SELECT: - channel_id = action_data.get("selected_conversation") or action_data.get("selected_option", {}).get( - "value" - ) + channel_id = _get_selected_conversation_or_option(body, actions.CONFIG_PUBLISH_CHANNEL_SELECT) if not channel_id or channel_id == "__none__": if ack_fn: @@ -355,17 +336,11 @@ def handle_unpublish_channel( DB cascades remove all ``SyncChannel`` and ``PostMeta`` rows. Only the original publisher can unpublish. """ - user_id = helpers.get_user_id_from_body(body) - if not user_id or not helpers.is_user_authorized(client, user_id): - _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "unpublish_channel"}) + auth_result = _get_authorized_workspace(body, client, context, "unpublish_channel") + if not auth_result: return + user_id, workspace_record = auth_result - team_id = ( - helpers.safe_get(body, "view", "team_id") - or helpers.safe_get(body, "team", "id") - or helpers.safe_get(body, "user", "team_id") - ) - workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None admin_name, admin_label = helpers.format_admin_label(client, user_id, workspace_record) raw_value = helpers.safe_get(body, "actions", 0, "value") @@ -441,15 +416,10 @@ def _toggle_sync_status( _logger.warning(f"{log_event}_invalid_id", extra={"action_id": action_id}) return - user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) - team_id = ( - helpers.safe_get(body, "view", "team_id") - or helpers.safe_get(body, "team", "id") - or helpers.safe_get(body, "user", "team_id") - ) - workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None - if not workspace_record: + auth_result = _get_authorized_workspace(body, client, context, log_event) + if not auth_result: return + user_id, workspace_record = auth_result admin_name, admin_label = helpers.format_admin_label(client, user_id, workspace_record) all_channels = DbManager.find_records( @@ -597,10 +567,10 @@ def handle_stop_sync_confirm( Removes only this workspace's ``SyncChannel`` and its ``PostMeta``. Other workspaces' data and the Sync record remain intact. """ - user_id = helpers.get_user_id_from_body(body) - if not user_id or not helpers.is_user_authorized(client, user_id): - _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "stop_sync_confirm"}) + auth_result = _get_authorized_workspace(body, client, context, "stop_sync_confirm") + if not auth_result: return + user_id, workspace_record = auth_result meta = _parse_private_metadata(body) sync_id = meta.get("sync_id") @@ -608,11 +578,6 @@ def handle_stop_sync_confirm( _logger.warning("stop_sync_confirm: missing sync_id in metadata") return - team_id = helpers.safe_get(body, "view", "team_id") - workspace_record = helpers.get_workspace_record(team_id, body, context, client) - if not workspace_record: - return - admin_name, admin_label = helpers.format_admin_label(client, user_id, workspace_record) all_channels = DbManager.find_records( @@ -679,22 +644,13 @@ def handle_subscribe_channel( The channel list only shows channels that are not already in any sync (excluding already-synced and published-but-unsubscribed channels). """ - user_id = helpers.get_user_id_from_body(body) - if not user_id or not helpers.is_user_authorized(client, user_id): - _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "subscribe_channel"}) + auth_result = _get_authorized_workspace(body, client, context, "subscribe_channel") + if not auth_result: return + _, workspace_record = auth_result trigger_id = helpers.safe_get(body, "trigger_id") sync_id = helpers.safe_get(body, "actions", 0, "value") - team_id = ( - helpers.safe_get(body, "view", "team_id") - or helpers.safe_get(body, "team", "id") - or helpers.safe_get(body, "user", "team_id") - ) - workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None - if not workspace_record: - _logger.warning("handle_subscribe_channel: no workspace_record") - return blocks: list[orm.BaseBlock] = [] @@ -747,15 +703,10 @@ def handle_subscribe_channel_submit( context: dict, ) -> None: """Subscribe to an available channel sync: create SyncChannel for subscriber.""" - user_id = helpers.get_user_id_from_body(body) - if not user_id or not helpers.is_user_authorized(client, user_id): - _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "subscribe_channel_submit"}) - return - - team_id = helpers.safe_get(body, "view", "team_id") - workspace_record = helpers.get_workspace_record(team_id, body, context, client) - if not workspace_record: + auth_result = _get_authorized_workspace(body, client, context, "subscribe_channel_submit") + if not auth_result: return + user_id, workspace_record = auth_result metadata = _parse_private_metadata(body) sync_id = metadata.get("sync_id") @@ -764,14 +715,7 @@ def handle_subscribe_channel_submit( _logger.warning("subscribe_channel_submit: missing sync_id") return - state_values = helpers.safe_get(body, "view", "state", "values") or {} - channel_id = None - for _block_id, block_data in state_values.items(): - for action_id, action_data in block_data.items(): - if action_id == actions.CONFIG_SUBSCRIBE_CHANNEL_SELECT: - channel_id = action_data.get("selected_conversation") or helpers.safe_get( - action_data, "selected_option", "value" - ) + channel_id = _get_selected_conversation_or_option(body, actions.CONFIG_SUBSCRIBE_CHANNEL_SELECT) if not channel_id or channel_id == "__none__": _logger.warning("subscribe_channel_submit: no channel selected") diff --git a/syncbot/handlers/export_import.py b/syncbot/handlers/export_import.py index 8bd80f5..e31e8ed 100644 --- a/syncbot/handlers/export_import.py +++ b/syncbot/handlers/export_import.py @@ -284,13 +284,13 @@ def _do_restore(data: dict, client: WebClient, user_id: str) -> None: _logger.exception("restore failed: %s", e) raise - for tid in team_ids: - ws = DbManager.find_records(schemas.Workspace, [schemas.Workspace.team_id == tid]) - if ws: + for team_id in team_ids: + workspace_rows = DbManager.find_records(schemas.Workspace, [schemas.Workspace.team_id == team_id]) + if workspace_rows: try: - builders.refresh_home_tab_for_workspace(ws[0], _logger) + builders.refresh_home_tab_for_workspace(workspace_rows[0], _logger) except Exception as e: - _logger.warning("_do_restore: failed to refresh home tab for %s: %s", tid, e) + _logger.warning("_do_restore: failed to refresh home tab for %s: %s", team_id, e) # --------------------------------------------------------------------------- diff --git a/syncbot/handlers/groups.py b/syncbot/handlers/groups.py index 5ee4984..c9b2dca 100644 --- a/syncbot/handlers/groups.py +++ b/syncbot/handlers/groups.py @@ -12,6 +12,12 @@ import builders import helpers from db import DbManager, schemas +from handlers._common import ( + _get_authorized_workspace, + _get_selected_option_value, + _get_text_input_value, + _parse_private_metadata, +) from slack import actions, forms, orm from slack.blocks import context as block_context from slack.blocks import divider, section @@ -130,22 +136,12 @@ def handle_create_group_submit( context: dict, ) -> None: """Create the workspace group and add this workspace as the creator.""" - user_id = helpers.get_user_id_from_body(body) - if not user_id or not helpers.is_user_authorized(client, user_id): - _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "create_group_submit"}) + auth_result = _get_authorized_workspace(body, client, context, "create_group_submit") + if not auth_result: return + user_id, workspace_record = auth_result - team_id = helpers.safe_get(body, "view", "team_id") - workspace_record = helpers.get_workspace_record(team_id, body, context, client) - if not workspace_record: - return - - values = helpers.safe_get(body, "view", "state", "values") or {} - group_name = "" - for block_data in values.values(): - for action_id, action_data in block_data.items(): - if action_id == actions.CONFIG_CREATE_GROUP_NAME: - group_name = (action_data.get("value") or "").strip() + group_name = (_get_text_input_value(body, actions.CONFIG_CREATE_GROUP_NAME) or "").strip() if not group_name: _logger.warning("create_group_submit: empty group name") @@ -234,15 +230,10 @@ def handle_join_group_submit( context: dict, ) -> None: """Validate an invite code and join the workspace group.""" - user_id = helpers.get_user_id_from_body(body) - if not user_id or not helpers.is_user_authorized(client, user_id): - _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "join_group_submit"}) - return - - team_id = helpers.safe_get(body, "view", "team_id") - workspace_record = helpers.get_workspace_record(team_id, body, context, client) - if not workspace_record: + auth_result = _get_authorized_workspace(body, client, context, "join_group_submit") + if not auth_result: return + user_id, workspace_record = auth_result form_data = forms.ENTER_GROUP_CODE_FORM.get_selected_values(body) raw_code = (helpers.safe_get(form_data, actions.CONFIG_JOIN_GROUP_CODE) or "").strip().upper() @@ -269,7 +260,14 @@ def handle_join_group_submit( if not groups: helpers._cache_set(rate_key, attempts + 1, ttl=900) - _logger.warning("group_code_invalid", extra={"code": raw_code}) + _logger.warning( + "group_code_invalid", + extra={ + "workspace_id": workspace_record.id, + "attempt": attempts + 1, + "code_length": len(raw_code), + }, + ) builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) return @@ -325,10 +323,10 @@ def handle_join_group_submit( schemas.WorkspaceGroupMember.workspace_id != workspace_record.id, ], ) - for m in other_members: - if not m.workspace_id: + for other_member in other_members: + if not other_member.workspace_id: continue - member_ws = helpers.get_workspace_by_id(m.workspace_id) + member_ws = helpers.get_workspace_by_id(other_member.workspace_id) if not member_ws or not member_ws.bot_token or member_ws.deleted_at: continue try: @@ -339,7 +337,7 @@ def handle_join_group_submit( ) builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) except Exception as e: - _logger.warning(f"Failed to notify group member {m.workspace_id}: {e}") + _logger.warning(f"Failed to notify group member {other_member.workspace_id}: {e}") builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) @@ -358,9 +356,10 @@ def handle_invite_workspace( """Open a modal for inviting a workspace to a group.""" import constants - user_id = helpers.get_user_id_from_body(body) - if not user_id or not helpers.is_user_authorized(client, user_id): + auth_result = _get_authorized_workspace(body, client, context, "invite_workspace") + if not auth_result: return + _, workspace_record = auth_result trigger_id = helpers.safe_get(body, "trigger_id") raw_group_id = helpers.safe_get(body, "actions", 0, "value") @@ -374,8 +373,6 @@ def handle_invite_workspace( if not group: return - team_id = helpers.safe_get(body, "team", "id") or helpers.safe_get(body, "view", "team_id") - workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None current_workspace_id = workspace_record.id if workspace_record else None # Only active members count as "already in the group"; pending invites can be re-invited @@ -387,7 +384,7 @@ def handle_invite_workspace( schemas.WorkspaceGroupMember.deleted_at.is_(None), ], ) - member_ws_ids = {m.workspace_id for m in current_members if m.workspace_id} + member_ws_ids = {member.workspace_id for member in current_members if member.workspace_id} all_workspaces = DbManager.find_records( schemas.Workspace, @@ -409,7 +406,7 @@ def handle_invite_workspace( trigger_id=trigger_id, callback_id=actions.CONFIG_INVITE_WORKSPACE_SUBMIT, title_text="Oops!", - submit_button_text="None", + submit_button_text=None, new_or_add="new", ) return @@ -417,12 +414,12 @@ def handle_invite_workspace( modal_blocks: list = [] if eligible: - ws_options = [ + workspace_options = [ orm.SelectorOption( - name=helpers.resolve_workspace_name(ws), - value=str(ws.id), + name=helpers.resolve_workspace_name(workspace), + value=str(workspace.id), ) - for ws in eligible + for workspace in eligible ] modal_blocks.append( orm.InputBlock( @@ -430,7 +427,7 @@ def handle_invite_workspace( action=actions.CONFIG_INVITE_WORKSPACE_SELECT, element=orm.StaticSelectElement( placeholder="Select a Workspace", - options=ws_options, + options=workspace_options, ), optional=True, ) @@ -460,7 +457,7 @@ def handle_invite_workspace( ) ) - submit_text = "Send Invite" if eligible else "None" + submit_text = "Send Invite" if eligible else None view = orm.BlockView(blocks=modal_blocks) view.post_modal( client=client, @@ -480,22 +477,11 @@ def handle_invite_workspace_submit( context: dict, ) -> None: """Send a DM invite to admins of the selected workspace.""" - import json as _json - - user_id = helpers.get_user_id_from_body(body) - if not user_id or not helpers.is_user_authorized(client, user_id): + auth_result = _get_authorized_workspace(body, client, context, "invite_workspace_submit") + if not auth_result: return - - team_id = helpers.safe_get(body, "view", "team_id") - workspace_record = helpers.get_workspace_record(team_id, body, context, client) - if not workspace_record: - return - - metadata = helpers.safe_get(body, "view", "private_metadata") - try: - meta = _json.loads(metadata) if metadata else {} - except (ValueError, TypeError): - meta = {} + user_id, workspace_record = auth_result + meta = _parse_private_metadata(body) group_id = meta.get("group_id") if not group_id: return @@ -504,14 +490,7 @@ def handle_invite_workspace_submit( if not group: return - values = helpers.safe_get(body, "view", "state", "values") or {} - selected_ws_id = None - for block_data in values.values(): - for action_id, action_data in block_data.items(): - if action_id == actions.CONFIG_INVITE_WORKSPACE_SELECT: - sel = action_data.get("selected_option") - if sel: - selected_ws_id = sel.get("value") + selected_ws_id = _get_selected_option_value(body, actions.CONFIG_INVITE_WORKSPACE_SELECT) if not selected_ws_id: return @@ -663,10 +642,10 @@ def handle_accept_group_invite( ], ) ws_name = helpers.resolve_workspace_name(workspace_record) - for m in other_members: - if not m.workspace_id: + for other_member in other_members: + if not other_member.workspace_id: continue - member_ws = helpers.get_workspace_by_id(m.workspace_id) + member_ws = helpers.get_workspace_by_id(other_member.workspace_id) if not member_ws or not member_ws.bot_token or member_ws.deleted_at: continue try: @@ -677,7 +656,7 @@ def handle_accept_group_invite( ) builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) except Exception as e: - _logger.warning(f"Failed to notify group member {m.workspace_id}: {e}") + _logger.warning(f"Failed to notify group member {other_member.workspace_id}: {e}") _logger.info( "group_invite_accepted", @@ -745,10 +724,10 @@ def handle_decline_group_invite( schemas.WorkspaceGroupMember.deleted_at.is_(None), ], ) - for m in all_members: - if not m.workspace_id: + for member in all_members: + if not member.workspace_id: continue - member_ws = helpers.get_workspace_by_id(m.workspace_id) + member_ws = helpers.get_workspace_by_id(member.workspace_id) if not member_ws or not member_ws.bot_token or member_ws.deleted_at: continue with contextlib.suppress(Exception): @@ -787,24 +766,24 @@ def _update_invite_dms( ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) blocks = [{"type": "section", "text": {"type": "mrkdwn", "text": new_text}}] for entry in entries: - ch = entry.get("channel") - ts = entry.get("ts") - if not ch or ts is None: + channel_id = entry.get("channel") + message_ts = entry.get("ts") + if not channel_id or message_ts is None: continue - ts_str = str(ts).strip() - if not ts_str: + message_ts_str = str(message_ts).strip() + if not message_ts_str: continue try: ws_client.chat_update( - channel=ch, - ts=ts_str, + channel=channel_id, + ts=message_ts_str, text=new_text, blocks=blocks, ) except Exception as e: _logger.warning( "_update_invite_dms: failed to update DM channel=%s ts=%s: %s", - ch, - ts_str, + channel_id, + message_ts_str, e, ) diff --git a/syncbot/handlers/messages.py b/syncbot/handlers/messages.py index 0a6bb4b..fb83185 100644 --- a/syncbot/handlers/messages.py +++ b/syncbot/handlers/messages.py @@ -16,11 +16,11 @@ def _find_source_workspace_id(records: list[tuple], channel_id: str, ws_index: int = 1) -> int | None: """Return the workspace ID from the record whose channel matches *channel_id*.""" - for rec in records: - sc = rec[ws_index - 1] if ws_index > 1 else rec[0] - ws = rec[ws_index] - if sc.channel_id == channel_id: - return ws.id + for record in records: + sync_channel = record[ws_index - 1] if ws_index > 1 else record[0] + workspace = record[ws_index] + if sync_channel.channel_id == channel_id: + return workspace.id return None _logger = logging.getLogger(__name__) @@ -606,8 +606,13 @@ def _handle_reaction( message_ts=target_msg_ts, ) permalink = helpers.safe_get(plink_resp, "permalink") - except Exception: - pass + except Exception as exc: + # Permalink lookup is optional; if it fails we still post a + # reaction notice without the deep-link. + _logger.debug( + "reaction_permalink_lookup_failed", + extra={"channel_id": sync_channel.channel_id, "message_ts": target_msg_ts, "error": str(exc)}, + ) if permalink: msg_text = f"reacted with :{reaction}: to <{permalink}|this message>" @@ -679,7 +684,7 @@ def respond_to_message_event( if _is_own_bot_message(body, client, context): return - s3_photo_list, photo_blocks, direct_files = _build_file_context(body, client, logger) + photo_list, photo_blocks, direct_files = _build_file_context(body, client, logger) has_files = bool(photo_blocks or direct_files) if ( @@ -688,7 +693,7 @@ def respond_to_message_event( or (event_subtype == "file_share" and (ctx["msg_text"] != "" or has_files)) ): if not ctx["thread_ts"]: - _handle_new_post(body, client, logger, ctx, s3_photo_list, photo_blocks, direct_files) + _handle_new_post(body, client, logger, ctx, photo_list, photo_blocks, direct_files) else: _handle_thread_reply(body, client, logger, ctx, photo_blocks, direct_files) elif event_subtype == "message_changed": diff --git a/syncbot/handlers/sync.py b/syncbot/handlers/sync.py index 1aad2b5..32cd763 100644 --- a/syncbot/handlers/sync.py +++ b/syncbot/handlers/sync.py @@ -397,7 +397,7 @@ def check_join_sync_channel( client=client, view_id=view_id, title_text="Join Sync", - callback_id=actions.CONFIG_JOIN_SYNC_SUMBIT, + callback_id=actions.CONFIG_JOIN_SYNC_SUBMIT, ) elif len(sync_channel_records) == 0 and already_warning: blocks = [block for block in blocks if block["block_id"] != constants.WARNING_BLOCK] @@ -406,7 +406,7 @@ def check_join_sync_channel( client=client, view_id=view_id, title_text="Join Sync", - callback_id=actions.CONFIG_JOIN_SYNC_SUMBIT, + callback_id=actions.CONFIG_JOIN_SYNC_SUBMIT, ) diff --git a/syncbot/handlers/tokens.py b/syncbot/handlers/tokens.py index 2b29a2b..7d80688 100644 --- a/syncbot/handlers/tokens.py +++ b/syncbot/handlers/tokens.py @@ -85,13 +85,13 @@ def handle_tokens_revoked( schemas.WorkspaceGroupMember.deleted_at.is_(None), ], ) - for m in group_members: - if not m.workspace_id or m.workspace_id in notified_ws: + for member in group_members: + if not member.workspace_id or member.workspace_id in notified_ws: continue - member_ws = helpers.get_workspace_by_id(m.workspace_id) + member_ws = helpers.get_workspace_by_id(member.workspace_id) if not member_ws or not member_ws.bot_token or member_ws.deleted_at: continue - notified_ws.add(m.workspace_id) + notified_ws.add(member.workspace_id) try: member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) @@ -109,7 +109,7 @@ def handle_tokens_revoked( schemas.SyncChannel, [ schemas.SyncChannel.sync_id == sync_channel.sync_id, - schemas.SyncChannel.workspace_id == m.workspace_id, + schemas.SyncChannel.workspace_id == member.workspace_id, schemas.SyncChannel.deleted_at.is_(None), ], ) @@ -123,7 +123,7 @@ def handle_tokens_revoked( f":double_vertical_bar: Syncing with *{ws_name}* has been paused because they uninstalled the app.", ) except Exception as e: - _logger.warning(f"handle_tokens_revoked: failed to notify member {m.workspace_id}: {e}") + _logger.warning(f"handle_tokens_revoked: failed to notify member {member.workspace_id}: {e}") _logger.info( "workspace_soft_deleted", diff --git a/syncbot/handlers/users.py b/syncbot/handlers/users.py index c8847f0..6e1c74f 100644 --- a/syncbot/handlers/users.py +++ b/syncbot/handlers/users.py @@ -13,6 +13,7 @@ import helpers from builders._common import _get_group_members, _get_groups_for_workspace from db import DbManager, schemas +from handlers._common import _get_authorized_workspace _logger = logging.getLogger(__name__) @@ -88,12 +89,16 @@ def handle_user_profile_changed( notified_ws: set[int] = set() for group, _ in my_groups: members = _get_group_members(group.id) - for m in members: - if m.workspace_id and m.workspace_id != workspace_record.id and m.workspace_id not in notified_ws: - member_ws = helpers.get_workspace_by_id(m.workspace_id, context=context) + for member in members: + if ( + member.workspace_id + and member.workspace_id != workspace_record.id + and member.workspace_id not in notified_ws + ): + member_ws = helpers.get_workspace_by_id(member.workspace_id, context=context) if member_ws: builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) - notified_ws.add(m.workspace_id) + notified_ws.add(member.workspace_id) _logger.info( "user_profile_updated", @@ -125,19 +130,10 @@ def handle_user_mapping_refresh( Uses content hash and cached blocks; when hash unchanged and within 60s cooldown, re-publishes with cooldown message. """ - user_id = helpers.get_user_id_from_body(body) - if not user_id or not helpers.is_user_authorized(client, user_id): - _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "user_mapping_refresh"}) - return - - team_id = ( - helpers.safe_get(body, "view", "team_id") - or helpers.safe_get(body, "team", "id") - or helpers.safe_get(body, "user", "team_id") - ) - workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None - if not workspace_record: + auth_result = _get_authorized_workspace(body, client, context, "user_mapping_refresh") + if not auth_result: return + user_id, workspace_record = auth_result raw_group = helpers.safe_get(body, "actions", 0, "value") or "0" try: @@ -178,20 +174,30 @@ def handle_user_mapping_refresh( member_clients: list[tuple[WebClient, int]] = [] - for m in members: - if not m.workspace_id or m.workspace_id == workspace_record.id: + for member in members: + if not member.workspace_id or member.workspace_id == workspace_record.id: continue try: - helpers._CACHE.pop(f"dir_refresh:{m.workspace_id}", None) - member_ws = helpers.get_workspace_by_id(m.workspace_id, context=context) + # Force a fresh directory pull before rematching. Cached directory + # snapshots can keep stale display names/emails after profile edits. + helpers._CACHE.pop(f"dir_refresh:{member.workspace_id}", None) + member_ws = helpers.get_workspace_by_id(member.workspace_id, context=context) if member_ws and member_ws.bot_token: member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) - helpers._refresh_user_directory(member_client, m.workspace_id) - member_clients.append((member_client, m.workspace_id)) - helpers.seed_user_mappings(m.workspace_id, workspace_record.id, group_id=gid_opt) - helpers.seed_user_mappings(workspace_record.id, m.workspace_id, group_id=gid_opt) - except Exception: - pass + helpers._refresh_user_directory(member_client, member.workspace_id) + member_clients.append((member_client, member.workspace_id)) + helpers.seed_user_mappings(member.workspace_id, workspace_record.id, group_id=gid_opt) + helpers.seed_user_mappings(workspace_record.id, member.workspace_id, group_id=gid_opt) + except Exception as exc: + _logger.warning( + "user_mapping_refresh_member_sync_failed", + extra={ + "workspace_id": workspace_record.id, + "member_workspace_id": member.workspace_id, + "group_id": gid_opt, + "error": str(exc), + }, + ) helpers.run_auto_match_for_workspace(client, workspace_record.id) for member_client, member_ws_id in member_clients: @@ -221,15 +227,10 @@ def handle_user_mapping_edit_submit( """Save the per-user mapping edit and refresh the mapping screen.""" from handlers._common import _parse_private_metadata - user_id = helpers.get_user_id_from_body(body) - if not user_id or not helpers.is_user_authorized(client, user_id): - _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "user_mapping_edit_submit"}) - return - - team_id = helpers.safe_get(body, "view", "team_id") - workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None - if not workspace_record: + auth_result = _get_authorized_workspace(body, client, context, "user_mapping_edit_submit") + if not auth_result: return + user_id, workspace_record = auth_result meta = _parse_private_metadata(body) mapping_id = meta.get("mapping_id") diff --git a/syncbot/helpers/core.py b/syncbot/helpers/core.py index 78e88ab..e382ed0 100644 --- a/syncbot/helpers/core.py +++ b/syncbot/helpers/core.py @@ -63,7 +63,7 @@ def is_db_reset_visible_for_workspace(team_id: str | None) -> bool: Reads ENABLE_DB_RESET from os.environ at call time so it is correct even if .env was loaded after constants was first imported. """ - enabled = (os.environ.get("ENABLE_DB_RESET") or "").strip() + enabled = (os.environ.get(constants.ENABLE_DB_RESET) or "").strip() if not enabled: _logger.debug("DB reset button hidden: ENABLE_DB_RESET not set") return False diff --git a/syncbot/helpers/export_import.py b/syncbot/helpers/export_import.py index 6d65599..bf08e90 100644 --- a/syncbot/helpers/export_import.py +++ b/syncbot/helpers/export_import.py @@ -13,9 +13,9 @@ from decimal import Decimal from typing import Any -import constants from sqlalchemy import text +import constants from db import DbManager, get_engine, schemas _logger = logging.getLogger(__name__) @@ -56,8 +56,8 @@ def _restore_raw_table(table_name: str, rows: list[dict]) -> None: else: parsed[key] = value - cols = ", ".join(f"`{k}`" for k in parsed.keys()) - placeholders = ", ".join(f":{k}" for k in parsed.keys()) + cols = ", ".join(f"`{k}`" for k in parsed) + placeholders = ", ".join(f":{k}" for k in parsed) conn.execute( text(f"INSERT INTO `{table_name}` ({cols}) VALUES ({placeholders})"), parsed, diff --git a/syncbot/helpers/user_matching.py b/syncbot/helpers/user_matching.py index a12b0f8..809673a 100644 --- a/syncbot/helpers/user_matching.py +++ b/syncbot/helpers/user_matching.py @@ -511,8 +511,13 @@ def resolve_channel_references( try: info = source_client.conversations_info(channel=ch_id) ch_name = safe_get(info, "channel", "name") or ch_id - except Exception: - pass + except Exception as exc: + # If we cannot resolve channel metadata, keep the raw channel ID. + # This preserves message content without blocking sync processing. + _logger.debug( + "resolve_channel_reference_failed", + extra={"channel_id": ch_id, "error": str(exc)}, + ) if team_id and ch_name != ch_id: deep_link = f"https://slack.com/app_redirect?channel={ch_id}&team={team_id}" diff --git a/syncbot/helpers/workspace.py b/syncbot/helpers/workspace.py index 79c799b..b37dd31 100644 --- a/syncbot/helpers/workspace.py +++ b/syncbot/helpers/workspace.py @@ -351,8 +351,13 @@ def resolve_workspace_name(workspace: schemas.Workspace) -> str: ) workspace.workspace_name = name return name - except Exception: - pass + except Exception as exc: + # Name lookup is best-effort; falling back to team_id keeps UI usable + # even when Slack API calls fail intermittently. + _logger.debug( + "resolve_workspace_name_failed", + extra={"workspace_id": workspace.id, "team_id": workspace.team_id, "error": str(exc)}, + ) return workspace.team_id or f"Workspace {workspace.id}" diff --git a/syncbot/routing.py b/syncbot/routing.py index 6eed870..51d4ed6 100644 --- a/syncbot/routing.py +++ b/syncbot/routing.py @@ -62,7 +62,7 @@ """Event ``type`` -> handler.""" VIEW_MAPPER = { - actions.CONFIG_JOIN_SYNC_SUMBIT: handlers.handle_join_sync_submission, + actions.CONFIG_JOIN_SYNC_SUBMIT: handlers.handle_join_sync_submission, actions.CONFIG_NEW_SYNC_SUBMIT: handlers.handle_new_sync_submission, actions.CONFIG_USER_MAPPING_EDIT_SUBMIT: handlers.handle_user_mapping_edit_submit, actions.CONFIG_CREATE_GROUP_SUBMIT: handlers.handle_create_group_submit, diff --git a/syncbot/slack/actions.py b/syncbot/slack/actions.py index 6d45b84..13c7bb5 100644 --- a/syncbot/slack/actions.py +++ b/syncbot/slack/actions.py @@ -26,7 +26,7 @@ CONFIG_JOIN_SYNC_CHANNEL_SELECT = "config_join_sync_channel_select" """Input: channel selector in the join-sync form (dispatches an action on change).""" -CONFIG_JOIN_SYNC_SUMBIT = "config_join_sync_submit" +CONFIG_JOIN_SYNC_SUBMIT = "config_join_sync_submit" """Callback: join-sync modal submitted.""" # --------------------------------------------------------------------------- @@ -166,7 +166,7 @@ """Action: user clicked Download backup in Backup/Restore modal.""" CONFIG_BACKUP_RESTORE_JSON_INPUT = "backup_restore_json_input" -"""Input: plain text area for restore JSON in Backup/Restore modal.""" +"""Input: uploaded JSON file in Backup/Restore modal.""" CONFIG_DATA_MIGRATION = "data_migration" """Action: user clicked "Data Migration" in External Connections (opens modal).""" @@ -184,7 +184,7 @@ """Action: user clicked Export in Data Migration modal.""" CONFIG_DATA_MIGRATION_JSON_INPUT = "data_migration_json_input" -"""Input: plain text area for migration import JSON.""" +"""Input: uploaded JSON file in Data Migration modal.""" # --------------------------------------------------------------------------- # External Connections (federation) actions @@ -219,7 +219,7 @@ """Action: user clicked "Reset Database" on the Home tab.""" CONFIG_DB_RESET_CONFIRM = "db_reset_confirm" +"""Callback: database reset confirmation view submitted.""" CONFIG_DB_RESET_PROCEED = "db_reset_proceed" -"""Action: danger button to confirm database reset.""" -"""Callback: user confirmed database reset in the warning modal.""" +"""Action: danger button to proceed with database reset.""" diff --git a/syncbot/slack/orm.py b/syncbot/slack/orm.py index cfda533..23d8515 100644 --- a/syncbot/slack/orm.py +++ b/syncbot/slack/orm.py @@ -507,7 +507,7 @@ def post_modal( trigger_id: str, title_text: str, callback_id: str, - submit_button_text: str = "Submit", + submit_button_text: str | None = "Submit", parent_metadata: dict = None, close_button_text: str = "Close", notify_on_close: bool = False, @@ -526,7 +526,7 @@ def post_modal( if parent_metadata: view["private_metadata"] = json.dumps(parent_metadata) - if submit_button_text != "None": # TODO: would prefer this to use None instead of "None" + if submit_button_text: view["submit"] = {"type": "plain_text", "text": submit_button_text} try: @@ -535,8 +535,11 @@ def post_modal( elif new_or_add == "add": client.views_push(trigger_id=trigger_id, view=view) except Exception as e: - logger.error(f"Failed to open/push modal view: {e}") - logger.debug(f"View payload: {json.dumps(view, indent=2)}") + logger.error( + "modal_open_or_push_failed", + extra={"callback_id": callback_id, "mode": new_or_add, "error": str(e)}, + ) + logger.debug("modal_view_payload", extra={"view": json.dumps(view, indent=2)}) def publish_home_tab(self, client: Any, user_id: str): """Publish a Home tab view for the given user.""" @@ -552,7 +555,7 @@ def update_modal( view_id: str, title_text: str, callback_id: str, - submit_button_text: str = "Submit", + submit_button_text: str | None = "Submit", parent_metadata: dict = None, close_button_text: str = "Close", notify_on_close: bool = False, @@ -567,7 +570,7 @@ def update_modal( "notify_on_close": notify_on_close, "blocks": blocks, } - if submit_button_text != "None": + if submit_button_text: view["submit"] = {"type": "plain_text", "text": submit_button_text} if parent_metadata: view["private_metadata"] = json.dumps(parent_metadata) diff --git a/tests/test_channel_sync_handlers.py b/tests/test_channel_sync_handlers.py new file mode 100644 index 0000000..e16f9dc --- /dev/null +++ b/tests/test_channel_sync_handlers.py @@ -0,0 +1,110 @@ +"""Focused unit tests for channel sync handler branches.""" + +import os +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("ADMIN_DATABASE_USER", "root") +os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") +os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from handlers.channel_sync import ( # noqa: E402 + handle_publish_channel_submit, + handle_subscribe_channel_submit, +) + + +class TestPublishChannelSubmit: + def test_missing_group_id_exits_early(self): + client = MagicMock() + logger = MagicMock() + context = {"ack": MagicMock()} + workspace = SimpleNamespace(id=10) + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={}), + patch("handlers.channel_sync.DbManager.create_record") as create_record, + ): + handle_publish_channel_submit({}, client, logger, context) + + context["ack"].assert_not_called() + create_record.assert_not_called() + + def test_missing_channel_selection_returns_ack_error(self): + client = MagicMock() + logger = MagicMock() + ack = MagicMock() + context = {"ack": ack} + workspace = SimpleNamespace(id=10) + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={"group_id": 7}), + patch("handlers.channel_sync._get_selected_conversation_or_option", return_value="__none__"), + patch("handlers.channel_sync.DbManager.create_record") as create_record, + ): + handle_publish_channel_submit({}, client, logger, context) + + ack.assert_called_once() + kwargs = ack.call_args.kwargs + assert kwargs["response_action"] == "errors" + assert "Select a Channel to publish." in kwargs["errors"].values() + create_record.assert_not_called() + + def test_existing_sync_channel_returns_ack_error(self): + client = MagicMock() + logger = MagicMock() + ack = MagicMock() + context = {"ack": ack} + workspace = SimpleNamespace(id=10) + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={"group_id": 7}), + patch("handlers.channel_sync._get_selected_conversation_or_option", return_value="C123"), + patch("handlers.channel_sync.DbManager.find_records", return_value=[object()]), + patch("handlers.channel_sync.DbManager.create_record") as create_record, + ): + handle_publish_channel_submit({}, client, logger, context) + + ack.assert_called_once() + kwargs = ack.call_args.kwargs + assert kwargs["response_action"] == "errors" + assert "already being synced" in next(iter(kwargs["errors"].values())) + create_record.assert_not_called() + + +class TestSubscribeChannelSubmit: + def test_missing_sync_id_exits_early(self): + client = MagicMock() + logger = MagicMock() + context = {} + workspace = SimpleNamespace(id=10) + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={}), + patch("handlers.channel_sync.DbManager.create_record") as create_record, + ): + handle_subscribe_channel_submit({}, client, logger, context) + + create_record.assert_not_called() + + def test_missing_channel_selection_exits_early(self): + client = MagicMock() + logger = MagicMock() + context = {} + workspace = SimpleNamespace(id=10) + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={"sync_id": 55}), + patch("handlers.channel_sync._get_selected_conversation_or_option", return_value="__none__"), + patch("handlers.channel_sync.DbManager.create_record") as create_record, + ): + handle_subscribe_channel_submit({}, client, logger, context) + + create_record.assert_not_called() diff --git a/tests/test_db.py b/tests/test_db.py index 9408c5e..9b9616e 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -1,4 +1,4 @@ -"""Unit tests for syncbot.utils.db connection pooling and retry logic.""" +"""Unit tests for ``syncbot/db`` connection pooling and retry logic.""" import os from unittest.mock import patch diff --git a/tests/test_export_import_handlers.py b/tests/test_export_import_handlers.py new file mode 100644 index 0000000..c7828ba --- /dev/null +++ b/tests/test_export_import_handlers.py @@ -0,0 +1,50 @@ +"""Focused unit tests for backup/restore and migration handler validation.""" + +import os +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("ADMIN_DATABASE_USER", "root") +os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") +os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from handlers.export_import import handle_backup_restore_submit # noqa: E402 +from slack import actions # noqa: E402 + + +class TestBackupRestoreSubmitValidation: + def test_returns_error_when_file_missing(self): + client = MagicMock() + logger = MagicMock() + body = {"user": {"id": "U1"}, "view": {"state": {"values": {}}}} + + with patch("handlers.export_import._is_admin", return_value=True): + resp = handle_backup_restore_submit(body, client, logger, context={}) + + assert resp["response_action"] == "errors" + assert actions.CONFIG_BACKUP_RESTORE_JSON_INPUT in resp["errors"] + + def test_returns_error_when_uploaded_file_has_no_url(self): + client = MagicMock() + logger = MagicMock() + body = { + "user": {"id": "U1"}, + "view": { + "state": { + "values": { + actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: { + actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: { + "files": [{"id": "F123"}], + } + } + } + } + }, + } + + with patch("handlers.export_import._is_admin", return_value=True): + resp = handle_backup_restore_submit(body, client, logger, context={}) + + assert resp["response_action"] == "errors" + assert "Could not retrieve the uploaded file." in resp["errors"][actions.CONFIG_BACKUP_RESTORE_JSON_INPUT] diff --git a/tests/test_groups_handlers.py b/tests/test_groups_handlers.py new file mode 100644 index 0000000..1864b67 --- /dev/null +++ b/tests/test_groups_handlers.py @@ -0,0 +1,48 @@ +"""Focused unit tests for group handler edge branches.""" + +import os +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("ADMIN_DATABASE_USER", "root") +os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") +os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from handlers.groups import handle_join_group_submit # noqa: E402 + + +class TestJoinGroupSubmit: + def test_invalid_group_code_log_is_sanitized(self): + client = MagicMock() + logger = MagicMock() + workspace = SimpleNamespace(id=42) + + body = { + "user": {"id": "U1"}, + "view": {"state": {"values": {}}}, + } + + with ( + patch("handlers.groups._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.groups.forms.ENTER_GROUP_CODE_FORM.get_selected_values", return_value={}), + patch("handlers.groups.helpers._cache_get", return_value=0), + patch("handlers.groups.helpers._cache_set"), + patch("handlers.groups.DbManager.find_records", return_value=[]), + patch("handlers.groups.builders.refresh_home_tab_for_workspace"), + patch("handlers.groups._logger.warning") as warn_log, + ): + handle_join_group_submit(body, client, logger, context={}) + + matched = [ + call + for call in warn_log.call_args_list + if call.args and call.args[0] == "group_code_invalid" + ] + assert matched, "Expected group_code_invalid warning" + extra = matched[0].kwargs["extra"] + assert "code" not in extra + assert extra["workspace_id"] == workspace.id + assert extra["attempt"] == 1 + assert "code_length" in extra diff --git a/tests/test_handlers.py b/tests/test_handlers.py index dd4edce..4a1109a 100644 --- a/tests/test_handlers.py +++ b/tests/test_handlers.py @@ -1,4 +1,4 @@ -"""Unit tests for syncbot.utils.handlers event parsing and dispatch.""" +"""Unit tests for handler parsing and dispatch helpers.""" import os from unittest.mock import MagicMock, patch diff --git a/tests/test_helpers.py b/tests/test_helpers.py index 2b1e4b4..8700339 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -1,4 +1,4 @@ -"""Unit tests for syncbot.utils.helpers core utility functions.""" +"""Unit tests for helper utilities under ``syncbot/helpers``.""" import os import time From 928737ab21b7b295301ff1e74e1ebf92ccb47a93 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 13 Mar 2026 22:00:59 -0500 Subject: [PATCH 08/45] Updated infra deployment to work with test and prod. --- .github/workflows/sam-pipeline.yml | 7 +++++-- README.md | 2 +- docs/DEPLOYMENT.md | 15 +++++++++------ samconfig.toml | 6 +++--- 4 files changed, 18 insertions(+), 12 deletions(-) diff --git a/.github/workflows/sam-pipeline.yml b/.github/workflows/sam-pipeline.yml index 2e83638..2b20844 100644 --- a/.github/workflows/sam-pipeline.yml +++ b/.github/workflows/sam-pipeline.yml @@ -1,7 +1,8 @@ on: push: branches: - - main + - test + - prod jobs: sam-build: @@ -29,6 +30,7 @@ jobs: path: './.aws-sam/build' sam-deploy-test: + if: github.ref == 'refs/heads/test' runs-on: ubuntu-latest environment: test needs: sam-build @@ -69,9 +71,10 @@ jobs: PasswordEncryptKey=${{ secrets.PASSWORD_ENCRYPT_KEY }}" sam-deploy-prod: + if: github.ref == 'refs/heads/prod' runs-on: ubuntu-latest environment: prod - needs: [sam-build, sam-deploy-test] + needs: sam-build steps: - uses: aws-actions/configure-aws-credentials@v4 with: diff --git a/README.md b/README.md index a484be9..130c8f7 100644 --- a/README.md +++ b/README.md @@ -76,7 +76,7 @@ mysql -h -u -p syncbot < db/init.sql ```bash sam build --use-container -sam deploy # staging (default profile) +sam deploy # test (default profile) sam deploy --config-env prod # production profile ``` diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 87e19ff..66c4a1c 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -27,11 +27,13 @@ mysql -h -u -p syncbot < db/init.sql ## CI/CD via GitHub Actions -Pushes to `main` automatically build and deploy via `.github/workflows/sam-pipeline.yml`: +Pushes to deployment branches automatically build and deploy via `.github/workflows/sam-pipeline.yml`: 1. **Build** — `sam build --use-container` -2. **Deploy to test** — automatic -3. **Deploy to prod** — requires manual approval (configure in GitHub environment settings) +2. **Deploy to test** — automatic on push to `test` +3. **Deploy to prod** — automatic on push to `prod` (can require manual approval via GitHub environment settings) + +`main` can remain an upstream-sync branch and does not deploy. ### One-Time Setup @@ -62,14 +64,15 @@ aws s3 mb s3://my-sam-deploy-bucket --region us-east-2 |----------|-------------|-------------| | `AWS_STACK_NAME` | `syncbot-test` | `syncbot-prod` | | `AWS_S3_BUCKET` | `my-sam-deploy-bucket` | `my-sam-deploy-bucket` | -| `STAGE_NAME` | `staging` | `prod` | +| `STAGE_NAME` | `test` | `prod` | ### Deploy Flow -Once configured, merge or push to `main` and the pipeline runs: +Once configured, push to deployment branches and the pipeline runs: ``` -push to main → sam build → deploy to test → (manual approval) → deploy to prod +push to test → sam build → deploy to test +push to prod → sam build → (manual approval, optional) → deploy to prod ``` Monitor progress in your repo's **Actions** tab. The first deploy creates the CloudFormation stack (VPC, RDS, Lambda, API Gateway). SAM uses the deployment bucket only for packaging; the app stores OAuth and data in RDS and uploads media directly to Slack. diff --git a/samconfig.toml b/samconfig.toml index 480a2ed..145cbd0 100644 --- a/samconfig.toml +++ b/samconfig.toml @@ -10,13 +10,13 @@ version = 0.1 use_container = true [default.deploy.parameters] -stack_name = "syncbot-staging" +stack_name = "syncbot-test" resolve_s3 = true -s3_prefix = "syncbot-staging" +s3_prefix = "syncbot-test" region = "us-east-2" capabilities = "CAPABILITY_IAM" confirm_changeset = true -parameter_overrides = "Stage=staging" +parameter_overrides = "Stage=test" [prod.deploy.parameters] stack_name = "syncbot-prod" From 0d1e88600883aa632f9a533ae61754026204e63d Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 13 Mar 2026 22:44:07 -0500 Subject: [PATCH 09/45] Added support for SQL migrations. --- README.md | 12 +-- db/init.sql | 4 - db/migrations/README.md | 23 ++++++ docs/DEPLOYMENT.md | 6 +- docs/IMPROVEMENTS.md | 5 +- syncbot/app.py | 2 + syncbot/db/__init__.py | 169 +++++++++++++++++++++++++++++++++++----- 7 files changed, 186 insertions(+), 35 deletions(-) create mode 100644 db/migrations/README.md diff --git a/README.md b/README.md index 130c8f7..d3c2674 100644 --- a/README.md +++ b/README.md @@ -64,11 +64,7 @@ sam deploy --guided You'll be prompted for parameters like `DatabaseUser`, `DatabasePassword`, `SlackSigningSecret`, `SlackClientId`, `SlackClientSecret`, `EncryptionKey`, and `AllowedDBCidr`. These are stored as CloudFormation parameters (secrets use `NoEcho`). -3. **Initialize the database** — after the stack creates the RDS instance, grab the endpoint from the CloudFormation outputs and run: - -```bash -mysql -h -u -p syncbot < db/init.sql -``` +3. **Auto-initialize check** — on first startup, SyncBot now creates the database schema and applies pending SQL migrations automatically. No manual `mysql < db/init.sql` step is required. 4. **Update your Slack app URLs** to point at the API Gateway endpoint shown in the stack outputs (e.g., `https://xxxxx.execute-api.us-east-2.amazonaws.com/Prod/slack/events`). @@ -80,7 +76,7 @@ sam deploy # test (default profile) sam deploy --config-env prod # production profile ``` -The `samconfig.toml` file stores per-environment settings so you don't have to re-enter parameters. +The `samconfig.toml` file stores per-environment settings so you don't have to re-enter parameters. Each deploy automatically runs DB bootstrap/migrations during app startup. > For shared infrastructure, CI/CD setup, and advanced deployment options, see [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md). @@ -192,7 +188,6 @@ docker run -d --name syncbot-db \ -e MYSQL_DATABASE=syncbot \ -p 3306:3306 \ mysql:8 -mysql -h 127.0.0.1 -u root -prootpass syncbot < db/init.sql ``` Configure and run: @@ -276,7 +271,8 @@ syncbot/ │ ├── federation/ # Cross-instance sync (opt-in) │ ├── db/ # Engine, session, ORM models │ └── slack/ # Action IDs, forms, Block Kit helpers -├── db/init.sql # Database schema +├── db/init.sql # Baseline schema for fresh databases +├── db/migrations/ # Forward-only SQL migrations (auto-applied) ├── tests/ # pytest unit tests ├── docs/ # Extended documentation ├── template.yaml # AWS SAM infrastructure-as-code diff --git a/db/init.sql b/db/init.sql index 6575d53..dd6555a 100644 --- a/db/init.sql +++ b/db/init.sql @@ -1,6 +1,5 @@ -- SyncBot Database Schema -- Run this to initialize a fresh database with all tables. --- Pre-release: all schema changes are maintained here; no separate migration scripts. -- -- Usage: -- mysql -h -u -p < db/init.sql @@ -61,7 +60,6 @@ CREATE TABLE IF NOT EXISTS workspace_group_members ( FOREIGN KEY (invited_by_workspace_id) REFERENCES workspaces(id) ON DELETE SET NULL, UNIQUE KEY uq_group_workspace (group_id, workspace_id) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; --- If upgrading an existing DB, run: ALTER TABLE workspace_group_members ADD COLUMN invited_by_slack_user_id VARCHAR(32) DEFAULT NULL, ADD COLUMN invited_by_workspace_id INT DEFAULT NULL, ADD CONSTRAINT fk_wgm_invited_by FOREIGN KEY (invited_by_workspace_id) REFERENCES workspaces(id) ON DELETE SET NULL; CREATE TABLE IF NOT EXISTS syncs ( id INT AUTO_INCREMENT PRIMARY KEY, @@ -126,7 +124,6 @@ CREATE TABLE IF NOT EXISTS user_mappings ( UNIQUE KEY uq_source_target (source_workspace_id, source_user_id, target_workspace_id) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; --- Slack OAuth persistence tables (for SQLAlchemyInstallationStore / SQLAlchemyOAuthStateStore) CREATE TABLE IF NOT EXISTS slack_bots ( id INT AUTO_INCREMENT PRIMARY KEY, client_id VARCHAR(32) NOT NULL, @@ -180,7 +177,6 @@ CREATE TABLE IF NOT EXISTS slack_oauth_states ( expire_at DATETIME NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; --- Indexes CREATE INDEX idx_sync_channels_channel_id ON sync_channels(channel_id); CREATE INDEX idx_sync_channels_sync_id ON sync_channels(sync_id); CREATE INDEX idx_sync_channels_workspace_id ON sync_channels(workspace_id); diff --git a/db/migrations/README.md b/db/migrations/README.md new file mode 100644 index 0000000..0ba2a4c --- /dev/null +++ b/db/migrations/README.md @@ -0,0 +1,23 @@ +# Database Migrations + +SyncBot now applies SQL migrations automatically during app startup. + +## How it works + +- `syncbot/db/__init__.py` ensures the DB exists. +- It applies `db/init.sql` once for new databases. +- It records applied versions in `schema_migrations`. +- It applies pending `*.sql` files in this folder in filename sort order. + +## Naming convention + +Use lexicographically sortable prefixes: + +- `001_add_new_table.sql` +- `002_add_index_for_lookup.sql` + +Keep migrations: + +- idempotent when practical +- forward-only (never rewrite old files) +- focused (one change objective per file) diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 66c4a1c..17437c4 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -16,11 +16,11 @@ sam deploy --guided \ ExistingDatabaseHost=mydb.xxxx.us-east-2.rds.amazonaws.com ``` -Each app sharing the same RDS should use a **different `DatabaseSchema`** (the default is `syncbot`). Create the schema and initialize the tables on the existing instance: +Each app sharing the same RDS should use a **different `DatabaseSchema`** (the default is `syncbot`). SyncBot now auto-creates the schema and initializes/updates tables at startup, so there is no manual `init.sql` step. ```bash +# Optional one-time validation if you want to pre-create schema manually: mysql -h -u -p -e "CREATE DATABASE IF NOT EXISTS syncbot;" -mysql -h -u -p syncbot < db/init.sql ``` **What about API Gateway and Lambda?** Each stack always creates its own API Gateway and Lambda function. These are lightweight resources that don't affect free-tier billing — the free tier quotas (1M API calls, 1M Lambda requests) are shared across your entire account regardless of how many gateways or functions you have. If you want a unified domain across apps, put a CloudFront distribution or API Gateway custom domain in front. @@ -75,6 +75,6 @@ push to test → sam build → deploy to test push to prod → sam build → (manual approval, optional) → deploy to prod ``` -Monitor progress in your repo's **Actions** tab. The first deploy creates the CloudFormation stack (VPC, RDS, Lambda, API Gateway). SAM uses the deployment bucket only for packaging; the app stores OAuth and data in RDS and uploads media directly to Slack. +Monitor progress in your repo's **Actions** tab. The first deploy creates the CloudFormation stack (VPC, RDS, Lambda, API Gateway). SAM uses the deployment bucket only for packaging; the app stores OAuth and data in RDS and uploads media directly to Slack. On cold start, SyncBot also applies DB bootstrap/migrations automatically. > **Tip:** If you prefer to do the very first deploy manually (to see the interactive prompts), run `sam deploy --guided` locally first, then let the pipeline handle all future deploys. diff --git a/docs/IMPROVEMENTS.md b/docs/IMPROVEMENTS.md index b42ff1f..b33db6b 100644 --- a/docs/IMPROVEMENTS.md +++ b/docs/IMPROVEMENTS.md @@ -450,7 +450,8 @@ This document outlines the improvements made to the SyncBot application and addi - Review and update other dependencies 2. **Database Migrations** - - Currently using `db/init.sql` as the single source of truth (pre-release); consider adopting Alembic for formal migration management post-release + - Startup now auto-bootstraps schema and applies ordered SQL files from `db/migrations/` (tracked in `schema_migrations`) + - Consider adopting Alembic in the future if you want model-autogenerated migrations and down-revision support 3. **Advanced Testing** - Add integration tests for database operations @@ -484,4 +485,4 @@ This document outlines the improvements made to the SyncBot application and addi - Duplicated code has been consolidated into shared helpers throughout handlers and federation modules - Home and User Mapping Refresh buttons use content hash, cached blocks, and a 60s cooldown to minimize RDS and Slack API usage when nothing has changed; request-scoped caching keeps builds lightweight, and cross-workspace refreshes use `context=None` to prevent cache contamination - Variable naming follows a consistent domain-model convention: `member_ws`/`member_client` for group members, `sync_channel` for ORM records, `slack_channel` for raw API dicts -- Pre-release schema management uses `db/init.sql` as the single source of truth (no separate migration scripts) +- Schema bootstrap + migration application is automatic at startup (`db/init.sql` baseline + `db/migrations/*.sql`) diff --git a/syncbot/app.py b/syncbot/app.py index 05df11f..81da070 100644 --- a/syncbot/app.py +++ b/syncbot/app.py @@ -31,6 +31,7 @@ LOCAL_DEVELOPMENT, validate_config, ) +from db import initialize_database from federation.api import dispatch_federation_request from helpers import get_oauth_flow, get_request_type, safe_get from logger import ( @@ -79,6 +80,7 @@ def _redact_sensitive(obj, _depth=0): configure_logging() validate_config() +initialize_database() app = App( process_before_response=not LOCAL_DEVELOPMENT, diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py index 83480ed..0093e0c 100644 --- a/syncbot/db/__init__.py +++ b/syncbot/db/__init__.py @@ -13,12 +13,13 @@ import logging import os import ssl +import time from dataclasses import dataclass from pathlib import Path from typing import TypeVar from urllib.parse import quote_plus -from sqlalchemy import and_, create_engine, func, pool, text +from sqlalchemy import and_, create_engine, func, inspect, pool, text from sqlalchemy.exc import OperationalError from sqlalchemy.orm import sessionmaker @@ -40,6 +41,13 @@ class DatabaseField: # Maximum number of times to retry a DB operation on a transient connection error _MAX_RETRIES = 2 +_DB_INIT_MAX_ATTEMPTS = 15 +_DB_INIT_RETRY_SECONDS = 2 +_MIGRATION_TABLE = "schema_migrations" +_BASELINE_VERSION = "000_init_sql" +_PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent +_INIT_SQL_PATH = _PROJECT_ROOT / "db" / "init.sql" +_MIGRATIONS_DIR = _PROJECT_ROOT / "db" / "migrations" def _build_base_url(include_schema: bool = False) -> tuple[str, dict]: @@ -61,6 +69,144 @@ def _build_base_url(include_schema: bool = False) -> tuple[str, dict]: return db_url, connect_args +def _sql_statements_from_file(sql_path: Path) -> list[str]: + """Parse a SQL file into executable statements. + + This parser intentionally supports the project's migration style + (line comments + semicolon-delimited statements). + """ + sql = sql_path.read_text() + lines = [] + for line in sql.splitlines(): + if "--" in line: + line = line[: line.index("--")].strip() + else: + line = line.strip() + if line: + lines.append(line) + combined = " ".join(lines) + return [stmt.strip() for stmt in combined.split(";") if stmt.strip()] + + +def _execute_sql_file(conn, sql_path: Path) -> None: + """Execute all statements from *sql_path* using the provided connection.""" + for stmt in _sql_statements_from_file(sql_path): + conn.execute(text(stmt)) + + +def _ensure_migration_table(engine) -> None: + """Create the migration tracking table if it does not exist.""" + with engine.begin() as conn: + conn.execute( + text( + """ + CREATE TABLE IF NOT EXISTS schema_migrations ( + version VARCHAR(255) PRIMARY KEY, + applied_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 + """ + ) + ) + + +def _migration_applied(engine, version: str) -> bool: + with engine.begin() as conn: + row = conn.execute( + text("SELECT version FROM schema_migrations WHERE version = :version"), + {"version": version}, + ).first() + return row is not None + + +def _record_migration(engine, version: str) -> None: + with engine.begin() as conn: + conn.execute( + text( + """ + INSERT INTO schema_migrations (version) + VALUES (:version) + ON DUPLICATE KEY UPDATE version = VALUES(version) + """ + ), + {"version": version}, + ) + + +def _table_exists(engine, table_name: str) -> bool: + """Return True if *table_name* exists in the current schema.""" + return inspect(engine).has_table(table_name) + + +def _ensure_database_exists() -> None: + """Create the configured schema if it does not already exist.""" + schema = os.environ.get(constants.ADMIN_DATABASE_SCHEMA, "syncbot") + url_no_db, connect_args = _build_base_url(include_schema=False) + engine_no_db = create_engine(url_no_db, connect_args=connect_args, pool_pre_ping=True) + try: + with engine_no_db.begin() as conn: + conn.execute(text(f"CREATE DATABASE IF NOT EXISTS `{schema}` CHARACTER SET utf8mb4")) + finally: + engine_no_db.dispose() + + +def initialize_database() -> None: + """Initialize schema and apply migrations automatically. + + Behavior: + - Ensures the target database exists. + - Creates migration tracking table. + - Applies ``db/init.sql`` exactly once for fresh databases (or marks it as + baseline for already-initialized databases). + - Applies pending SQL migrations from ``db/migrations`` in filename order. + """ + if not _INIT_SQL_PATH.exists(): + raise FileNotFoundError(f"Missing init.sql at {_INIT_SQL_PATH}") + + for attempt in range(1, _DB_INIT_MAX_ATTEMPTS + 1): + try: + _ensure_database_exists() + engine = get_engine() + + _ensure_migration_table(engine) + + if not _migration_applied(engine, _BASELINE_VERSION): + if _table_exists(engine, "workspaces"): + _logger.info("db_init_baseline_marked", extra={"version": _BASELINE_VERSION}) + _record_migration(engine, _BASELINE_VERSION) + else: + _logger.info("db_init_start", extra={"file": str(_INIT_SQL_PATH)}) + with engine.begin() as conn: + _execute_sql_file(conn, _INIT_SQL_PATH) + _record_migration(engine, _BASELINE_VERSION) + _logger.info("db_init_complete", extra={"version": _BASELINE_VERSION}) + + if _MIGRATIONS_DIR.exists(): + migration_files = sorted(p for p in _MIGRATIONS_DIR.glob("*.sql") if p.is_file()) + for migration_file in migration_files: + version = migration_file.name + if _migration_applied(engine, version): + continue + _logger.info("db_migration_start", extra={"version": version}) + with engine.begin() as conn: + _execute_sql_file(conn, migration_file) + _record_migration(engine, version) + _logger.info("db_migration_complete", extra={"version": version}) + + return + except Exception as exc: + if attempt >= _DB_INIT_MAX_ATTEMPTS: + _logger.error( + "db_init_failed", + extra={"attempts": _DB_INIT_MAX_ATTEMPTS, "error": str(exc)}, + ) + raise + _logger.warning( + "db_init_retrying", + extra={"attempt": attempt, "max_attempts": _DB_INIT_MAX_ATTEMPTS, "error": str(exc)}, + ) + time.sleep(_DB_INIT_RETRY_SECONDS) + + def drop_and_init_db() -> None: """Drop the database and reinitialize from db/init.sql. All data is lost. @@ -86,35 +232,22 @@ def drop_and_init_db() -> None: url_with_db, connect_args = _build_base_url(include_schema=True) engine_with_db = create_engine(url_with_db, connect_args=connect_args, pool_pre_ping=True) - init_path = Path(__file__).resolve().parent.parent.parent / "db" / "init.sql" + init_path = _INIT_SQL_PATH if not init_path.exists(): _logger.error("drop_and_init_db: init.sql not found at %s", init_path) engine_with_db.dispose() return - sql = init_path.read_text() - # Strip line comments and split into statements - lines = [] - for line in sql.splitlines(): - if "--" in line: - line = line[: line.index("--")].strip() - else: - line = line.strip() - if line: - lines.append(line) - combined = " ".join(lines) - statements = [s.strip() for s in combined.split(";") if s.strip()] - with engine_with_db.begin() as conn: - for stmt in statements: - if stmt: - conn.execute(text(stmt)) + _execute_sql_file(conn, init_path) engine_with_db.dispose() GLOBAL_ENGINE = None GLOBAL_SESSION = None GLOBAL_SCHEMA = None + # Ensure baseline is re-recorded after reset. + initialize_database() _logger.info("drop_and_init_db: database %s dropped and reinitialized from init.sql", schema) From c1120978a15f7841b33c36416ed39709c7b3c9ae Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sat, 14 Mar 2026 00:00:11 -0500 Subject: [PATCH 10/45] Update to GitHub deployment flow. --- .github/workflows/sam-pipeline.yml | 6 ++++++ docs/DEPLOYMENT.md | 18 +++++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sam-pipeline.yml b/.github/workflows/sam-pipeline.yml index 2b20844..e30c716 100644 --- a/.github/workflows/sam-pipeline.yml +++ b/.github/workflows/sam-pipeline.yml @@ -65,6 +65,9 @@ jobs: --force-upload \ --parameter-overrides \ "Stage=${{ vars.STAGE_NAME }} \ + ExistingDatabaseHost=${{ vars.EXISTING_DATABASE_HOST }} \ + DatabaseUser=${{ vars.DATABASE_USER }} \ + DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ DatabasePassword=${{ secrets.DATABASE_PASSWORD }} \ @@ -106,6 +109,9 @@ jobs: --force-upload \ --parameter-overrides \ "Stage=${{ vars.STAGE_NAME }} \ + ExistingDatabaseHost=${{ vars.EXISTING_DATABASE_HOST }} \ + DatabaseUser=${{ vars.DATABASE_USER }} \ + DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ DatabasePassword=${{ secrets.DATABASE_PASSWORD }} \ diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 17437c4..edb4a61 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -37,7 +37,17 @@ Pushes to deployment branches automatically build and deploy via `.github/workfl ### One-Time Setup -1. **Create an IAM user** for deployments with permissions for CloudFormation, Lambda, API Gateway, S3 (for deploy artifacts only), IAM, and RDS. Generate an access key pair. +1. **Create an IAM user and access key pair** for deployments with permissions for: +``` +AWSCloudFormationFullAccess +AmazonAPIGatewayAdministrator +AWSLambda_FullAccess +AmazonRDSFullAccess +AmazonEC2FullAccess +CloudWatchFullAccess +AmazonS3FullAccess +IAMFullAccess +``` 2. **Create a SAM deployment bucket** — SAM uploads the Lambda package to an S3 bucket during deploy (packaging only; the app does not use S3 at runtime): @@ -65,6 +75,12 @@ aws s3 mb s3://my-sam-deploy-bucket --region us-east-2 | `AWS_STACK_NAME` | `syncbot-test` | `syncbot-prod` | | `AWS_S3_BUCKET` | `my-sam-deploy-bucket` | `my-sam-deploy-bucket` | | `STAGE_NAME` | `test` | `prod` | +| `EXISTING_DATABASE_HOST` | `mydb.xxxx.us-east-2.rds.amazonaws.com` | `mydb.xxxx.us-east-2.rds.amazonaws.com` | +| `DATABASE_USER` | `syncbot_user` | `syncbot_user` | +| `DATABASE_SCHEMA` | `syncbot_test` | `syncbot_prod` | + +`EXISTING_DATABASE_HOST` tells SAM to skip creating VPC/RDS resources and point Lambda at your existing RDS endpoint. Use different `DATABASE_SCHEMA` values per environment when sharing one RDS instance. +If you want SAM to create a new RDS per environment instead, leave `EXISTING_DATABASE_HOST` empty. ### Deploy Flow From 5efee538f2bf2d89a29efe00d36a62fdf116c9f7 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sun, 15 Mar 2026 22:10:38 -0500 Subject: [PATCH 11/45] Large changes to make repo more fork-friendly. Switched to Alembic for migrations. Implemented ORM with SQLAlchemy. Made IaC work fo multiple providers (default to AWS and MySQL). Added stubs for GCP and SQLite. --- .devcontainer/docker-compose.dev.yml | 13 +- .env.example | 24 +- .../{sam-pipeline.yml => deploy-aws.yml} | 32 +- .github/workflows/deploy-gcp.yml | 69 ++++ .gitignore | 2 + Dockerfile | 11 +- README.md | 34 +- db/alembic.ini | 45 +++ db/alembic/env.py | 61 ++++ db/alembic/script.py.mako | 26 ++ db/alembic/versions/001_baseline.py | 89 +++++ db/init.sql | 195 ----------- db/migrations/README.md | 23 -- docker-compose.yml | 15 +- docs/ARCHITECTURE.md | 4 +- docs/BACKUP_AND_MIGRATION.md | 2 +- docs/DEPLOYMENT.md | 234 +++++++++---- docs/IMPROVEMENTS.md | 16 +- docs/INFRA_CONTRACT.md | 112 ++++++ infra/aws/scripts/print-bootstrap-outputs.sh | 31 ++ infra/aws/template.bootstrap.yaml | 264 +++++++++++++++ template.yaml => infra/aws/template.yaml | 51 +-- infra/gcp/README.md | 72 ++++ infra/gcp/main.tf | 319 ++++++++++++++++++ infra/gcp/outputs.tf | 44 +++ infra/gcp/scripts/print-bootstrap-outputs.sh | 35 ++ infra/gcp/variables.tf | 136 ++++++++ poetry.lock | 314 ++++++++--------- pyproject.toml | 1 + samconfig.toml | 1 + syncbot/constants.py | 75 +++- syncbot/db/__init__.py | 288 +++++++--------- syncbot/db/schemas.py | 2 +- syncbot/handlers/sync.py | 2 +- syncbot/helpers/encryption.py | 8 +- syncbot/helpers/export_import.py | 46 +-- syncbot/helpers/notifications.py | 2 +- syncbot/helpers/oauth.py | 4 +- syncbot/requirements.txt | 2 - tests/test_db.py | 88 ++++- tests/test_helpers.py | 12 +- 41 files changed, 2046 insertions(+), 758 deletions(-) rename .github/workflows/{sam-pipeline.yml => deploy-aws.yml} (79%) create mode 100644 .github/workflows/deploy-gcp.yml create mode 100644 db/alembic.ini create mode 100644 db/alembic/env.py create mode 100644 db/alembic/script.py.mako create mode 100644 db/alembic/versions/001_baseline.py delete mode 100644 db/init.sql delete mode 100644 db/migrations/README.md create mode 100644 docs/INFRA_CONTRACT.md create mode 100755 infra/aws/scripts/print-bootstrap-outputs.sh create mode 100644 infra/aws/template.bootstrap.yaml rename template.yaml => infra/aws/template.yaml (88%) create mode 100644 infra/gcp/README.md create mode 100644 infra/gcp/main.tf create mode 100644 infra/gcp/outputs.tf create mode 100755 infra/gcp/scripts/print-bootstrap-outputs.sh create mode 100644 infra/gcp/variables.tf diff --git a/.devcontainer/docker-compose.dev.yml b/.devcontainer/docker-compose.dev.yml index 0f6e82d..c3cbd6f 100644 --- a/.devcontainer/docker-compose.dev.yml +++ b/.devcontainer/docker-compose.dev.yml @@ -9,7 +9,6 @@ services: - "3306:3306" volumes: - syncbot-db:/var/lib/mysql - - ../db/init.sql:/docker-entrypoint-initdb.d/01-init.sql:ro healthcheck: test: ["CMD", "mysqladmin", "ping", "-h", "localhost"] interval: 5s @@ -29,10 +28,14 @@ services: environment: # Overrides that are always fixed for local dev LOCAL_DEVELOPMENT: "true" - DATABASE_HOST: db - ADMIN_DATABASE_USER: root - ADMIN_DATABASE_PASSWORD: rootpass - ADMIN_DATABASE_SCHEMA: syncbot + DATABASE_BACKEND: ${DATABASE_BACKEND:-mysql} + DATABASE_URL: ${DATABASE_URL:-} + DATABASE_HOST: ${DATABASE_HOST:-db} + ADMIN_DATABASE_USER: ${ADMIN_DATABASE_USER:-root} + ADMIN_DATABASE_PASSWORD: ${ADMIN_DATABASE_PASSWORD:-rootpass} + ADMIN_DATABASE_SCHEMA: ${ADMIN_DATABASE_SCHEMA:-syncbot} + DATABASE_TLS_ENABLED: ${DATABASE_TLS_ENABLED:-false} + DATABASE_SSL_CA_PATH: ${DATABASE_SSL_CA_PATH:-/etc/pki/tls/certs/ca-bundle.crt} volumes: - ..:/app:cached ports: diff --git a/.env.example b/.env.example index f4a3e62..71eb12c 100644 --- a/.env.example +++ b/.env.example @@ -8,16 +8,24 @@ # For native Python development, source it: source .env or export $(cat .env | xargs) # ----------------------------------------------------------------------------- -# Database +# Database (backend: mysql or sqlite) — pre-release: fresh installs only # ----------------------------------------------------------------------------- +# Option A — MySQL (default): legacy vars or DATABASE_URL +DATABASE_BACKEND=mysql DATABASE_HOST=127.0.0.1 ADMIN_DATABASE_USER=root ADMIN_DATABASE_PASSWORD=rootpass ADMIN_DATABASE_SCHEMA=syncbot +# Optional MySQL TLS controls (provider-dependent) +# DATABASE_TLS_ENABLED=true +# DATABASE_SSL_CA_PATH=/etc/pki/tls/certs/ca-bundle.crt -# When set to a Slack Team ID, the "Reset Database" button will be avaialbe -# on the Home tab for that team. Clicking it opens a confirmation modal, -# then drops and reinitializes the DB from db/init.sql. All data is lost. +# Option B — SQLite (forks / local): set backend and URL only +# DATABASE_BACKEND=sqlite +# DATABASE_URL=sqlite:///syncbot.db + +# When set to a Slack Team ID, the "Reset Database" button will be available +# on the Home tab for that team. Clicking it drops and reinitializes the DB. # ENABLE_DB_RESET= # ----------------------------------------------------------------------------- @@ -28,22 +36,22 @@ ADMIN_DATABASE_SCHEMA=syncbot # ----------------------------------------------------------------------------- # Slack -# These are set via SAM template parameters, not .env, during deploy. -# Uncomment if running locally with OAuth flow. +# In cloud deploys these are usually injected by your provider's secret system +# (AWS/GCP/Azure). Uncomment if running locally with OAuth flow. # ----------------------------------------------------------------------------- # SLACK_BOT_TOKEN=xoxb-your-bot-token # SLACK_SIGNING_SECRET=your-signing-secret # ENV_SLACK_CLIENT_ID=your-client-id # ENV_SLACK_CLIENT_SECRET=your-client-secret # ENV_SLACK_SCOPES=app_mentions:read,channels:history,channels:join,channels:manage,channels:read,chat:write,chat:write.customize,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email -# OAuth state and installation data are stored in the same MySQL database. +# OAuth state and installation data are stored in the same database (MySQL or SQLite). # ----------------------------------------------------------------------------- # Encryption (optional) # ----------------------------------------------------------------------------- # Passphrase for Fernet bot-token encryption at rest. # Use any value except "123" to enable encryption. -# PASSWORD_ENCRYPT_KEY=my-secret-passphrase +# TOKEN_ENCRYPTION_KEY=my-secret-passphrase # ----------------------------------------------------------------------------- # Admin Authorization (optional) diff --git a/.github/workflows/sam-pipeline.yml b/.github/workflows/deploy-aws.yml similarity index 79% rename from .github/workflows/sam-pipeline.yml rename to .github/workflows/deploy-aws.yml index e30c716..5a11f2e 100644 --- a/.github/workflows/sam-pipeline.yml +++ b/.github/workflows/deploy-aws.yml @@ -1,11 +1,22 @@ +# Deploy SyncBot to AWS (SAM). See docs/DEPLOYMENT.md and docs/INFRA_CONTRACT.md. +# To use GCP instead: set repository variable DEPLOY_TARGET=gcp and disable this workflow. + +name: Deploy (AWS) + on: push: branches: - test - prod +permissions: + id-token: write + contents: read + +# Skip when using GCP (set DEPLOY_TARGET=gcp to use deploy-gcp.yml instead) jobs: sam-build: + if: vars.DEPLOY_TARGET != 'gcp' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -17,11 +28,10 @@ jobs: use-installer: true - uses: aws-actions/configure-aws-credentials@v4 with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: us-east-2 + role-to-assume: ${{ vars.AWS_ROLE_TO_ASSUME }} + aws-region: ${{ vars.AWS_REGION }} - - run: sam build --use-container + - run: sam build -t infra/aws/template.yaml - name: Publish artifact uses: actions/upload-artifact@v4 @@ -37,9 +47,8 @@ jobs: steps: - uses: aws-actions/configure-aws-credentials@v4 with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: us-east-2 + role-to-assume: ${{ vars.AWS_ROLE_TO_ASSUME }} + aws-region: ${{ vars.AWS_REGION }} - name: Download artifact uses: actions/download-artifact@v4 @@ -71,7 +80,7 @@ jobs: SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ DatabasePassword=${{ secrets.DATABASE_PASSWORD }} \ - PasswordEncryptKey=${{ secrets.PASSWORD_ENCRYPT_KEY }}" + TokenEncryptionKey=${{ secrets.TOKEN_ENCRYPTION_KEY }}" sam-deploy-prod: if: github.ref == 'refs/heads/prod' @@ -81,9 +90,8 @@ jobs: steps: - uses: aws-actions/configure-aws-credentials@v4 with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: us-east-2 + role-to-assume: ${{ vars.AWS_ROLE_TO_ASSUME }} + aws-region: ${{ vars.AWS_REGION }} - name: Download artifact uses: actions/download-artifact@v4 @@ -115,4 +123,4 @@ jobs: SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ DatabasePassword=${{ secrets.DATABASE_PASSWORD }} \ - PasswordEncryptKey=${{ secrets.PASSWORD_ENCRYPT_KEY }}" + TokenEncryptionKey=${{ secrets.TOKEN_ENCRYPTION_KEY }}" diff --git a/.github/workflows/deploy-gcp.yml b/.github/workflows/deploy-gcp.yml new file mode 100644 index 0000000..03e43f8 --- /dev/null +++ b/.github/workflows/deploy-gcp.yml @@ -0,0 +1,69 @@ +# Deploy SyncBot to GCP (Cloud Run). See docs/DEPLOYMENT.md and docs/INFRA_CONTRACT.md. +# To use AWS instead, disable this workflow and use deploy-aws.yml. +# +# Setup: +# 1. Run infra/gcp Terraform and configure Workload Identity Federation for GitHub. +# 2. Set GitHub vars: GCP_PROJECT_ID, GCP_REGION, GCP_WORKLOAD_IDENTITY_PROVIDER, GCP_SERVICE_ACCOUNT. +# 3. Set GitHub secrets for Slack/DB/encryption as needed for your CI (or use Secret Manager only). +# 4. Replace placeholder steps with real build/deploy; disable deploy-aws.yml if using GCP only. + +name: Deploy (GCP) + +on: + push: + branches: + - test + - prod + +permissions: + id-token: write + contents: read + +# Run only when DEPLOY_TARGET is set to 'gcp' (disable deploy-aws.yml if using GCP only) +jobs: + build-and-push: + if: vars.DEPLOY_TARGET == 'gcp' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + # Workload Identity Federation: authenticate without a key file + # - uses: google-github-actions/auth@v2 + # with: + # workload_identity_provider: ${{ vars.GCP_WORKLOAD_IDENTITY_PROVIDER }} + # service_account: ${{ vars.GCP_SERVICE_ACCOUNT }} + + # - uses: google-github-actions/setup-gcloud@v2 + + # Build and push container image to Artifact Registry + # - run: | + # gcloud builds submit --tag "${{ vars.GCP_REGION }}-docker.pkg.dev/${{ vars.GCP_PROJECT_ID }}/syncbot-TEST-images/syncbot:${{ github.sha }}" . + + - name: Placeholder (GCP deploy not yet configured) + run: echo "Configure Workload Identity Federation and uncomment build/deploy steps in deploy-gcp.yml" + + deploy-test: + if: github.ref == 'refs/heads/test' + runs-on: ubuntu-latest + needs: build-and-push + # environment: test + steps: + # - uses: google-github-actions/auth@v2 + # with: + # workload_identity_provider: ${{ vars.GCP_WORKLOAD_IDENTITY_PROVIDER }} + # service_account: ${{ vars.GCP_SERVICE_ACCOUNT }} + # - run: gcloud run deploy syncbot-test --image=... --region=... + - run: echo "GCP deploy placeholder" + + deploy-prod: + if: github.ref == 'refs/heads/prod' + runs-on: ubuntu-latest + needs: build-and-push + # environment: prod + steps: + # - uses: google-github-actions/auth@v2 + # with: + # workload_identity_provider: ${{ vars.GCP_WORKLOAD_IDENTITY_PROVIDER }} + # service_account: ${{ vars.GCP_SERVICE_ACCOUNT }} + # - run: gcloud run deploy syncbot-prod --image=... --region=... + - run: echo "GCP deploy placeholder" diff --git a/.gitignore b/.gitignore index 9aaf78d..85bebac 100644 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,8 @@ cover/ local_settings.py db.sqlite3 db.sqlite3-journal +test_syncbot.db +test_bootstrap.db # Flask stuff: instance/ diff --git a/Dockerfile b/Dockerfile index 84faf21..7c06ab1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,18 +2,19 @@ FROM python:3.11-slim WORKDIR /app -# Install system dependencies for cryptography and pillow-heif +# Install system dependencies for cryptography and MySQL client bindings. RUN apt-get update && \ apt-get install -y --no-install-recommends \ build-essential \ libffi-dev \ default-libmysqlclient-dev \ - libheif-dev \ && rm -rf /var/lib/apt/lists/* -# Install Python dependencies -COPY syncbot/requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt +# Export and install runtime Python dependencies from Poetry lockfile. +COPY pyproject.toml poetry.lock ./ +RUN python -m pip install --no-cache-dir --upgrade pip poetry poetry-plugin-export && \ + poetry export --only main --format requirements.txt --without-hashes --output requirements.txt && \ + pip install --no-cache-dir -r requirements.txt RUN pip install --no-cache-dir boto3 # Copy application code diff --git a/README.md b/README.md index d3c2674..74e93b9 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ Before deploying (or developing locally) you need a Slack app: ## Deploying to AWS -SyncBot ships with a full AWS SAM template (`template.yaml`) that provisions everything on the **free tier**: +SyncBot ships with a full AWS SAM template (`infra/aws/template.yaml`) that provisions everything on the **free tier**: | Resource | Service | Free-Tier Detail | |----------|---------|-----------------| @@ -64,7 +64,7 @@ sam deploy --guided You'll be prompted for parameters like `DatabaseUser`, `DatabasePassword`, `SlackSigningSecret`, `SlackClientId`, `SlackClientSecret`, `EncryptionKey`, and `AllowedDBCidr`. These are stored as CloudFormation parameters (secrets use `NoEcho`). -3. **Auto-initialize check** — on first startup, SyncBot now creates the database schema and applies pending SQL migrations automatically. No manual `mysql < db/init.sql` step is required. +3. **Auto-initialize check** — on first startup, SyncBot creates the database schema and applies pending Alembic migrations automatically. 4. **Update your Slack app URLs** to point at the API Gateway endpoint shown in the stack outputs (e.g., `https://xxxxx.execute-api.us-east-2.amazonaws.com/Prod/slack/events`). @@ -78,7 +78,7 @@ sam deploy --config-env prod # production profile The `samconfig.toml` file stores per-environment settings so you don't have to re-enter parameters. Each deploy automatically runs DB bootstrap/migrations during app startup. -> For shared infrastructure, CI/CD setup, and advanced deployment options, see [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md). +> For one-time bootstrap, **fork-and-deploy** (GitHub OIDC) and **download-and-deploy** (local limited credentials), see [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md). --- @@ -109,6 +109,15 @@ Open the project folder in your editor, then: The first build takes a minute or two. After that, your editor is running inside the container with Python, MySQL, and all dependencies ready. +Want SQLite instead of MySQL in the dev container? Set in `.env` before reopening: + +```bash +DATABASE_BACKEND=sqlite +DATABASE_URL=sqlite:////app/syncbot/syncbot.db +``` + +The app will use SQLite and ignore MySQL connection vars. + #### 3. Run the app ```bash @@ -162,6 +171,15 @@ docker compose up --build The app listens on **port 3000**. Code changes require `docker compose restart app`. Only rebuild when `requirements.txt` changes. +**SQLite mode (optional):** set this in `.env` before `docker compose up`: + +```bash +DATABASE_BACKEND=sqlite +DATABASE_URL=sqlite:////app/syncbot/syncbot.db +``` + +This stores the SQLite file inside the bind-mounted app folder. You can still leave the `db` service running; the app will ignore MySQL vars when `DATABASE_BACKEND=sqlite`. + ```bash docker compose exec app python -m pytest /app/tests -v # run tests docker compose exec db mysql -u root -prootpass syncbot # database shell @@ -223,7 +241,7 @@ See [`.env.example`](.env.example) for all available options with descriptions. | `ENV_SLACK_CLIENT_ID` | OAuth client ID | | `ENV_SLACK_CLIENT_SECRET` | OAuth client secret | | `ENV_SLACK_SCOPES` | Comma-separated OAuth scopes | -| `PASSWORD_ENCRYPT_KEY` | Passphrase for Fernet bot-token encryption | +| `TOKEN_ENCRYPTION_KEY` | Passphrase for Fernet bot-token encryption | OAuth state and installation data are stored in the same RDS MySQL database. @@ -253,7 +271,7 @@ OAuth state and installation data are stored in the same RDS MySQL database. | [User Guide](docs/USER_GUIDE.md) | End-user walkthrough of all features | | [Architecture](docs/ARCHITECTURE.md) | Message sync flow, AWS infrastructure, caching | | [Backup & Migration](docs/BACKUP_AND_MIGRATION.md) | Full-instance backup/restore, workspace data migration | -| [Deployment](docs/DEPLOYMENT.md) | Shared infrastructure, CI/CD via GitHub Actions | +| [Deployment](docs/DEPLOYMENT.md) | Bootstrap, fork-and-deploy (GitHub OIDC), download-and-deploy | | [API Reference](docs/API_REFERENCE.md) | HTTP endpoints and subscribed Slack events | | [Improvements](docs/IMPROVEMENTS.md) | Completed and planned improvements | @@ -271,11 +289,11 @@ syncbot/ │ ├── federation/ # Cross-instance sync (opt-in) │ ├── db/ # Engine, session, ORM models │ └── slack/ # Action IDs, forms, Block Kit helpers -├── db/init.sql # Baseline schema for fresh databases -├── db/migrations/ # Forward-only SQL migrations (auto-applied) +├── db/alembic/ # Alembic migrations (backend-agnostic schema source) +├── db/alembic.ini # Alembic configuration ├── tests/ # pytest unit tests ├── docs/ # Extended documentation -├── template.yaml # AWS SAM infrastructure-as-code +├── infra/aws/ # AWS SAM templates (template.yaml, template.bootstrap.yaml) ├── slack-manifest.yaml # Slack app manifest └── docker-compose.yml # Local development stack ``` diff --git a/db/alembic.ini b/db/alembic.ini new file mode 100644 index 0000000..b688396 --- /dev/null +++ b/db/alembic.ini @@ -0,0 +1,45 @@ +# Alembic config for SyncBot. Run from project root: alembic -c db/alembic.ini upgrade head +# The app runs migrations programmatically via db.initialize_database(). + +[alembic] +script_location = db/alembic +prepend_sys_path = . +version_path_separator = os + +sqlalchemy.url = driver://user:pass@localhost/dbname + +[post_write_hooks] + +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/db/alembic/env.py b/db/alembic/env.py new file mode 100644 index 0000000..8b8ffbc --- /dev/null +++ b/db/alembic/env.py @@ -0,0 +1,61 @@ +"""Alembic env: use SyncBot's engine from db.get_engine(). Run from project root with syncbot on PYTHONPATH.""" + +import sys +from pathlib import Path + +# Project root (db/alembic/env.py -> db -> project root) +_PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent +if str(_PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(_PROJECT_ROOT)) + +# Load .env when running via CLI (alembic upgrade head) +try: + from dotenv import load_dotenv + load_dotenv(_PROJECT_ROOT / ".env") +except ImportError: + pass + +from logging.config import fileConfig + +from alembic import context +from sqlalchemy import engine_from_config, pool + +from db import get_engine + +config = context.config +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Use SyncBot's engine (from env vars / DATABASE_URL). Do not use sqlalchemy.url from alembic.ini. +target_metadata = None + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = get_engine().url + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + connectable = get_engine() + with connectable.connect() as connection: + context.configure( + connection=connection, + target_metadata=target_metadata, + ) + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/db/alembic/script.py.mako b/db/alembic/script.py.mako new file mode 100644 index 0000000..fbc4b07 --- /dev/null +++ b/db/alembic/script.py.mako @@ -0,0 +1,26 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + ${downgrades if downgrades else "pass"} diff --git a/db/alembic/versions/001_baseline.py b/db/alembic/versions/001_baseline.py new file mode 100644 index 0000000..5645140 --- /dev/null +++ b/db/alembic/versions/001_baseline.py @@ -0,0 +1,89 @@ +"""Baseline schema (all app tables + Slack OAuth tables). Supports MySQL and SQLite. + +Revision ID: 001_baseline +Revises: +Create Date: Baseline from ORM models + OAuth tables + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + +from db.schemas import BaseClass + +revision: str = "001_baseline" +down_revision: Union[str, None] = None +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + bind = op.get_bind() + BaseClass.metadata.create_all(bind) + + # Slack SDK OAuth tables (not in our ORM; dialect-neutral schema) + op.create_table( + "slack_bots", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("client_id", sa.String(32), nullable=False), + sa.Column("app_id", sa.String(32), nullable=False), + sa.Column("enterprise_id", sa.String(32), nullable=True), + sa.Column("enterprise_name", sa.String(200), nullable=True), + sa.Column("team_id", sa.String(32), nullable=True), + sa.Column("team_name", sa.String(200), nullable=True), + sa.Column("bot_token", sa.String(200), nullable=True), + sa.Column("bot_id", sa.String(32), nullable=True), + sa.Column("bot_user_id", sa.String(32), nullable=True), + sa.Column("bot_scopes", sa.String(1000), nullable=True), + sa.Column("bot_refresh_token", sa.String(200), nullable=True), + sa.Column("bot_token_expires_at", sa.DateTime(), nullable=True), + sa.Column("is_enterprise_install", sa.Boolean(), nullable=False, server_default=sa.false()), + sa.Column("installed_at", sa.DateTime(), nullable=False, server_default=sa.text("CURRENT_TIMESTAMP")), + sa.PrimaryKeyConstraint("id"), + ) + op.create_table( + "slack_installations", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("client_id", sa.String(32), nullable=False), + sa.Column("app_id", sa.String(32), nullable=False), + sa.Column("enterprise_id", sa.String(32), nullable=True), + sa.Column("enterprise_name", sa.String(200), nullable=True), + sa.Column("enterprise_url", sa.String(200), nullable=True), + sa.Column("team_id", sa.String(32), nullable=True), + sa.Column("team_name", sa.String(200), nullable=True), + sa.Column("bot_token", sa.String(200), nullable=True), + sa.Column("bot_id", sa.String(32), nullable=True), + sa.Column("bot_user_id", sa.String(32), nullable=True), + sa.Column("bot_scopes", sa.String(1000), nullable=True), + sa.Column("bot_refresh_token", sa.String(200), nullable=True), + sa.Column("bot_token_expires_at", sa.DateTime(), nullable=True), + sa.Column("user_id", sa.String(32), nullable=False), + sa.Column("user_token", sa.String(200), nullable=True), + sa.Column("user_scopes", sa.String(1000), nullable=True), + sa.Column("user_refresh_token", sa.String(200), nullable=True), + sa.Column("user_token_expires_at", sa.DateTime(), nullable=True), + sa.Column("incoming_webhook_url", sa.String(200), nullable=True), + sa.Column("incoming_webhook_channel", sa.String(200), nullable=True), + sa.Column("incoming_webhook_channel_id", sa.String(200), nullable=True), + sa.Column("incoming_webhook_configuration_url", sa.String(200), nullable=True), + sa.Column("is_enterprise_install", sa.Boolean(), nullable=False, server_default=sa.false()), + sa.Column("token_type", sa.String(32), nullable=True), + sa.Column("installed_at", sa.DateTime(), nullable=False, server_default=sa.text("CURRENT_TIMESTAMP")), + sa.PrimaryKeyConstraint("id"), + ) + op.create_table( + "slack_oauth_states", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("state", sa.String(200), nullable=False), + sa.Column("expire_at", sa.DateTime(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + + +def downgrade() -> None: + op.drop_table("slack_oauth_states") + op.drop_table("slack_installations") + op.drop_table("slack_bots") + bind = op.get_bind() + BaseClass.metadata.drop_all(bind) diff --git a/db/init.sql b/db/init.sql deleted file mode 100644 index dd6555a..0000000 --- a/db/init.sql +++ /dev/null @@ -1,195 +0,0 @@ --- SyncBot Database Schema --- Run this to initialize a fresh database with all tables. --- --- Usage: --- mysql -h -u -p < db/init.sql - -CREATE TABLE IF NOT EXISTS workspaces ( - id INT AUTO_INCREMENT PRIMARY KEY, - team_id VARCHAR(100) UNIQUE NOT NULL, - workspace_name VARCHAR(100), - bot_token VARCHAR(256) NOT NULL, - deleted_at DATETIME DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - -CREATE TABLE IF NOT EXISTS instance_keys ( - id INT AUTO_INCREMENT PRIMARY KEY, - public_key TEXT NOT NULL, - private_key_encrypted TEXT NOT NULL, - created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - -CREATE TABLE IF NOT EXISTS federated_workspaces ( - id INT AUTO_INCREMENT PRIMARY KEY, - instance_id VARCHAR(64) NOT NULL UNIQUE, - webhook_url VARCHAR(500) NOT NULL, - public_key TEXT NOT NULL, - status VARCHAR(20) NOT NULL DEFAULT 'active', - name VARCHAR(200) DEFAULT NULL, - primary_team_id VARCHAR(100) DEFAULT NULL, - primary_workspace_name VARCHAR(100) DEFAULT NULL, - created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, - updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - -CREATE TABLE IF NOT EXISTS workspace_groups ( - id INT AUTO_INCREMENT PRIMARY KEY, - name VARCHAR(100) NOT NULL, - invite_code VARCHAR(20) NOT NULL UNIQUE, - status VARCHAR(20) NOT NULL DEFAULT 'active', - created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, - created_by_workspace_id INT NOT NULL, - FOREIGN KEY (created_by_workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - -CREATE TABLE IF NOT EXISTS workspace_group_members ( - id INT AUTO_INCREMENT PRIMARY KEY, - group_id INT NOT NULL, - workspace_id INT DEFAULT NULL, - federated_workspace_id INT DEFAULT NULL, - status VARCHAR(20) NOT NULL DEFAULT 'active', - role VARCHAR(20) NOT NULL DEFAULT 'member', - joined_at DATETIME DEFAULT NULL, - deleted_at DATETIME DEFAULT NULL, - dm_messages TEXT DEFAULT NULL, - invited_by_slack_user_id VARCHAR(32) DEFAULT NULL, - invited_by_workspace_id INT DEFAULT NULL, - FOREIGN KEY (group_id) REFERENCES workspace_groups(id) ON DELETE CASCADE, - FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE, - FOREIGN KEY (federated_workspace_id) REFERENCES federated_workspaces(id) ON DELETE SET NULL, - FOREIGN KEY (invited_by_workspace_id) REFERENCES workspaces(id) ON DELETE SET NULL, - UNIQUE KEY uq_group_workspace (group_id, workspace_id) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - -CREATE TABLE IF NOT EXISTS syncs ( - id INT AUTO_INCREMENT PRIMARY KEY, - title VARCHAR(100) NOT NULL, - description VARCHAR(100), - group_id INT DEFAULT NULL, - sync_mode VARCHAR(20) NOT NULL DEFAULT 'group', - target_workspace_id INT DEFAULT NULL, - publisher_workspace_id INT DEFAULT NULL, - FOREIGN KEY (group_id) REFERENCES workspace_groups(id) ON DELETE CASCADE, - FOREIGN KEY (target_workspace_id) REFERENCES workspaces(id) ON DELETE SET NULL, - FOREIGN KEY (publisher_workspace_id) REFERENCES workspaces(id) ON DELETE SET NULL -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - -CREATE TABLE IF NOT EXISTS sync_channels ( - id INT AUTO_INCREMENT PRIMARY KEY, - sync_id INT NOT NULL, - workspace_id INT NOT NULL, - channel_id VARCHAR(100) NOT NULL, - status VARCHAR(20) NOT NULL DEFAULT 'active', - created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, - deleted_at DATETIME DEFAULT NULL, - FOREIGN KEY (sync_id) REFERENCES syncs(id) ON DELETE CASCADE, - FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - -CREATE TABLE IF NOT EXISTS post_meta ( - id INT AUTO_INCREMENT PRIMARY KEY, - post_id VARCHAR(100) NOT NULL, - sync_channel_id INT NOT NULL, - ts DECIMAL(16, 6) NOT NULL, - FOREIGN KEY (sync_channel_id) REFERENCES sync_channels(id) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - -CREATE TABLE IF NOT EXISTS user_directory ( - id INT AUTO_INCREMENT PRIMARY KEY, - workspace_id INT NOT NULL, - slack_user_id VARCHAR(100) NOT NULL, - email VARCHAR(320) DEFAULT NULL, - real_name VARCHAR(200) DEFAULT NULL, - display_name VARCHAR(200) DEFAULT NULL, - normalized_name VARCHAR(200) DEFAULT NULL, - updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, - deleted_at DATETIME DEFAULT NULL, - FOREIGN KEY (workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE, - UNIQUE KEY uq_workspace_user (workspace_id, slack_user_id) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - -CREATE TABLE IF NOT EXISTS user_mappings ( - id INT AUTO_INCREMENT PRIMARY KEY, - source_workspace_id INT NOT NULL, - source_user_id VARCHAR(100) NOT NULL, - target_workspace_id INT NOT NULL, - target_user_id VARCHAR(100) DEFAULT NULL, - match_method VARCHAR(20) NOT NULL DEFAULT 'none', - source_display_name VARCHAR(200) DEFAULT NULL, - matched_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, - group_id INT DEFAULT NULL, - FOREIGN KEY (source_workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE, - FOREIGN KEY (target_workspace_id) REFERENCES workspaces(id) ON DELETE CASCADE, - FOREIGN KEY (group_id) REFERENCES workspace_groups(id) ON DELETE CASCADE, - UNIQUE KEY uq_source_target (source_workspace_id, source_user_id, target_workspace_id) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - -CREATE TABLE IF NOT EXISTS slack_bots ( - id INT AUTO_INCREMENT PRIMARY KEY, - client_id VARCHAR(32) NOT NULL, - app_id VARCHAR(32) NOT NULL, - enterprise_id VARCHAR(32) DEFAULT NULL, - enterprise_name VARCHAR(200) DEFAULT NULL, - team_id VARCHAR(32) DEFAULT NULL, - team_name VARCHAR(200) DEFAULT NULL, - bot_token VARCHAR(200) DEFAULT NULL, - bot_id VARCHAR(32) DEFAULT NULL, - bot_user_id VARCHAR(32) DEFAULT NULL, - bot_scopes VARCHAR(1000) DEFAULT NULL, - bot_refresh_token VARCHAR(200) DEFAULT NULL, - bot_token_expires_at DATETIME DEFAULT NULL, - is_enterprise_install BOOLEAN NOT NULL DEFAULT FALSE, - installed_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - -CREATE TABLE IF NOT EXISTS slack_installations ( - id INT AUTO_INCREMENT PRIMARY KEY, - client_id VARCHAR(32) NOT NULL, - app_id VARCHAR(32) NOT NULL, - enterprise_id VARCHAR(32) DEFAULT NULL, - enterprise_name VARCHAR(200) DEFAULT NULL, - enterprise_url VARCHAR(200) DEFAULT NULL, - team_id VARCHAR(32) DEFAULT NULL, - team_name VARCHAR(200) DEFAULT NULL, - bot_token VARCHAR(200) DEFAULT NULL, - bot_id VARCHAR(32) DEFAULT NULL, - bot_user_id VARCHAR(32) DEFAULT NULL, - bot_scopes VARCHAR(1000) DEFAULT NULL, - bot_refresh_token VARCHAR(200) DEFAULT NULL, - bot_token_expires_at DATETIME DEFAULT NULL, - user_id VARCHAR(32) NOT NULL, - user_token VARCHAR(200) DEFAULT NULL, - user_scopes VARCHAR(1000) DEFAULT NULL, - user_refresh_token VARCHAR(200) DEFAULT NULL, - user_token_expires_at DATETIME DEFAULT NULL, - incoming_webhook_url VARCHAR(200) DEFAULT NULL, - incoming_webhook_channel VARCHAR(200) DEFAULT NULL, - incoming_webhook_channel_id VARCHAR(200) DEFAULT NULL, - incoming_webhook_configuration_url VARCHAR(200) DEFAULT NULL, - is_enterprise_install BOOLEAN NOT NULL DEFAULT FALSE, - token_type VARCHAR(32) DEFAULT NULL, - installed_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - -CREATE TABLE IF NOT EXISTS slack_oauth_states ( - id INT AUTO_INCREMENT PRIMARY KEY, - state VARCHAR(200) NOT NULL, - expire_at DATETIME NOT NULL -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; - -CREATE INDEX idx_sync_channels_channel_id ON sync_channels(channel_id); -CREATE INDEX idx_sync_channels_sync_id ON sync_channels(sync_id); -CREATE INDEX idx_sync_channels_workspace_id ON sync_channels(workspace_id); -CREATE INDEX idx_sync_channels_deleted_at ON sync_channels(deleted_at); -CREATE INDEX idx_post_meta_ts ON post_meta(ts); -CREATE INDEX idx_post_meta_post_id ON post_meta(post_id); -CREATE INDEX idx_workspaces_team_id ON workspaces(team_id); -CREATE INDEX idx_user_dir_email ON user_directory(workspace_id, email); -CREATE INDEX idx_user_dir_normalized ON user_directory(workspace_id, normalized_name); -CREATE INDEX idx_user_mappings_target ON user_mappings(target_workspace_id, match_method); -CREATE INDEX idx_groups_code ON workspace_groups(invite_code, status); -CREATE INDEX idx_group_members_group ON workspace_group_members(group_id, status); -CREATE INDEX idx_group_members_workspace ON workspace_group_members(workspace_id, status); -CREATE INDEX idx_syncs_group ON syncs(group_id); -CREATE INDEX slack_bots_idx ON slack_bots(client_id, enterprise_id, team_id, installed_at); -CREATE INDEX slack_installations_idx ON slack_installations(client_id, enterprise_id, team_id, user_id, installed_at); diff --git a/db/migrations/README.md b/db/migrations/README.md deleted file mode 100644 index 0ba2a4c..0000000 --- a/db/migrations/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Database Migrations - -SyncBot now applies SQL migrations automatically during app startup. - -## How it works - -- `syncbot/db/__init__.py` ensures the DB exists. -- It applies `db/init.sql` once for new databases. -- It records applied versions in `schema_migrations`. -- It applies pending `*.sql` files in this folder in filename sort order. - -## Naming convention - -Use lexicographically sortable prefixes: - -- `001_add_new_table.sql` -- `002_add_index_for_lookup.sql` - -Keep migrations: - -- idempotent when practical -- forward-only (never rewrite old files) -- focused (one change objective per file) diff --git a/docker-compose.yml b/docker-compose.yml index b93932d..50294f5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,7 +9,6 @@ services: - "3306:3306" volumes: - syncbot-db:/var/lib/mysql - - ./db/init.sql:/docker-entrypoint-initdb.d/01-init.sql:ro healthcheck: test: ["CMD", "mysqladmin", "ping", "-h", "localhost"] interval: 5s @@ -25,14 +24,18 @@ services: condition: service_healthy environment: # Database - DATABASE_HOST: db - ADMIN_DATABASE_USER: root - ADMIN_DATABASE_PASSWORD: rootpass - ADMIN_DATABASE_SCHEMA: syncbot + DATABASE_BACKEND: ${DATABASE_BACKEND:-mysql} + DATABASE_URL: ${DATABASE_URL:-} + DATABASE_HOST: ${DATABASE_HOST:-db} + ADMIN_DATABASE_USER: ${ADMIN_DATABASE_USER:-root} + ADMIN_DATABASE_PASSWORD: ${ADMIN_DATABASE_PASSWORD:-rootpass} + ADMIN_DATABASE_SCHEMA: ${ADMIN_DATABASE_SCHEMA:-syncbot} + DATABASE_TLS_ENABLED: ${DATABASE_TLS_ENABLED:-false} + DATABASE_SSL_CA_PATH: ${DATABASE_SSL_CA_PATH:-/etc/pki/tls/certs/ca-bundle.crt} # Slack — replace with your values or use a .env file SLACK_BOT_TOKEN: ${SLACK_BOT_TOKEN:-xoxb-your-bot-token} # Optional - PASSWORD_ENCRYPT_KEY: ${PASSWORD_ENCRYPT_KEY:-123} + TOKEN_ENCRYPTION_KEY: ${TOKEN_ENCRYPTION_KEY:-123} REQUIRE_ADMIN: ${REQUIRE_ADMIN:-true} volumes: - ./syncbot:/app/syncbot diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index 1307d54..d32f535 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -112,7 +112,7 @@ flowchart TB Lambda -.->|logs & metrics| Monitoring ``` -All infrastructure is defined in `template.yaml` (AWS SAM). Dashed lines indicate resources that are conditionally created — when `Existing*` parameters are set, those resources are skipped. +All infrastructure is defined in `infra/aws/template.yaml` (AWS SAM). Dashed lines indicate resources that are conditionally created — when `Existing*` parameters are set, those resources are skipped. ## Security & Hardening @@ -137,6 +137,6 @@ To keep RDS and Slack API usage low when admins use the **Refresh** button on th ## Backup, Restore, and Data Migration -- **Full-instance backup** — All tables are dumped as plain JSON (no compression). The payload includes `version`, `exported_at`, `encryption_key_hash` (SHA-256 of `PASSWORD_ENCRYPT_KEY`), and `hmac` (HMAC-SHA256 over canonical JSON). Restore inserts rows in FK order; it is intended for an empty or fresh database (e.g. after an AWS rebuild). On HMAC or encryption-key mismatch, the UI warns but allows proceeding. After restore, Home tab caches (`home_tab_hash`, `home_tab_blocks`) are invalidated for all restored workspaces. +- **Full-instance backup** — All tables are dumped as plain JSON (no compression). The payload includes `version`, `exported_at`, `encryption_key_hash` (SHA-256 of `TOKEN_ENCRYPTION_KEY`), and `hmac` (HMAC-SHA256 over canonical JSON). Restore inserts rows in FK order; it is intended for an empty or fresh database (e.g. after an AWS rebuild). On HMAC or encryption-key mismatch, the UI warns but allows proceeding. After restore, Home tab caches (`home_tab_hash`, `home_tab_blocks`) are invalidated for all restored workspaces. - **Data migration (workspace-scoped)** — Export produces a JSON file with syncs, sync channels, post meta, user directory, and user mappings keyed by stable identifiers (team_id, sync title, channel_id). The export can include `source_instance` (webhook_url, instance_id, public_key, one-time connection code) so import on the new instance can establish the federation connection and then import in one step. The payload is signed with the instance Ed25519 key; import verifies the signature and warns (but does not block) on mismatch. Import uses replace mode: existing SyncChannels and PostMeta for that workspace in the federated group are removed, then data from the file is created. User mappings are imported where both source and target workspace exist on the new instance. After import, Home tab caches for that workspace are invalidated. - **Instance A detection** — When instance B connects to A via federation, B can send optional `team_id` and `workspace_name` in the pair request. A stores them on the `federated_workspaces` row (`primary_team_id`, `primary_workspace_name`) and, if a local workspace with that `team_id` exists, soft-deletes it so the only representation of that workspace on A is the federated connection. diff --git a/docs/BACKUP_AND_MIGRATION.md b/docs/BACKUP_AND_MIGRATION.md index 625725b..982cd58 100644 --- a/docs/BACKUP_AND_MIGRATION.md +++ b/docs/BACKUP_AND_MIGRATION.md @@ -4,7 +4,7 @@ Use **Backup/Restore** (Home tab, next to Refresh) to: -- **Download backup** — Generates a JSON file containing all tables (workspaces, groups, syncs, channels, post meta, user directory, user mappings, federation, instance keys). The file is sent to your DM. Backup includes an HMAC for integrity and a hash of the encryption key. **Use the same `PASSWORD_ENCRYPT_KEY` on the target instance** so restored bot tokens decrypt; otherwise workspaces must reinstall the app to re-authorize. +- **Download backup** — Generates a JSON file containing all tables (workspaces, groups, syncs, channels, post meta, user directory, user mappings, federation, instance keys). The file is sent to your DM. Backup includes an HMAC for integrity and a hash of the encryption key. **Use the same `TOKEN_ENCRYPTION_KEY` on the target instance** so restored bot tokens decrypt; otherwise workspaces must reinstall the app to re-authorize. - **Restore from backup** — Paste the backup JSON in the modal and submit. Restore is intended for an **empty or fresh database** (e.g. after an AWS rebuild). If the encryption key hash or HMAC does not match, you will see a warning and can still proceed (e.g. if you edited the file on purpose). After restore, Home tab caches are cleared so the next Refresh shows current data. diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index edb4a61..5a61014 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -1,96 +1,210 @@ # Deployment Guide -## Sharing Infrastructure Across Apps +This guide covers deploying SyncBot on **AWS** (default) or **GCP**, with two paths per provider: -If you run multiple apps in the same AWS account, you can point SyncBot at existing resources instead of creating new ones. Every `Existing*` parameter defaults to empty (create new); set it to an existing resource name to reuse it. +- **Fork and deploy** — One-time bootstrap, then all deploys via GitHub Actions (OIDC on AWS, Workload Identity Federation on GCP; no long-lived keys). +- **Download and deploy** — One-time bootstrap, then updates via local CLI (`sam` on AWS, `gcloud`/Terraform on GCP) using limited credentials. -| Parameter | What it skips | -|-----------|---------------| -| `ExistingDatabaseHost` | VPC, subnets, security groups, RDS instance | +The app code and [infrastructure contract](INFRA_CONTRACT.md) are provider-agnostic; only the infrastructure in `infra//` and the CI workflow differ. -OAuth and app data use RDS (MySQL); there are no runtime S3 buckets. Example — deploy with an existing RDS: +--- + +## Fork-First Model (Recommended) + +If your goal is "fork and deploy on a different cloud, while still PR'ing app improvements back to SyncBot", use this model: + +1. Keep `syncbot/` provider-neutral and depend only on env vars from [INFRA_CONTRACT.md](INFRA_CONTRACT.md). +2. Put provider implementation in `infra//` and `.github/workflows/deploy-.yml`. +3. Keep AWS path as the reference implementation; treat other providers as swappable scaffolds. +4. Send upstream PRs for provider-neutral changes (DB abstraction, docs contract, tests) and keep fork-only deploy glue isolated. + +This is the intended maintenance path for long-lived forks. + +--- + +## Provider selection + +| Provider | Infra folder | CI workflow | Default | +|----------|--------------|-------------|---------| +| **AWS** | `infra/aws/` | `.github/workflows/deploy-aws.yml` | Yes | +| **GCP** | `infra/gcp/` | `.github/workflows/deploy-gcp.yml` | No (opt-in) | + +- **Use AWS:** Do nothing; the AWS workflow runs on push to `test`/`prod` unless you set `DEPLOY_TARGET=gcp`. +- **Use GCP:** Run `infra/gcp/` Terraform, configure Workload Identity Federation, set repository variable **`DEPLOY_TARGET`** = **`gcp`**, and disable or remove the AWS workflow so only `deploy-gcp.yml` runs. + +See [Swapping providers](#swapping-providers) for changing providers in a fork. + +--- + +## Database backend + +The app supports **MySQL** (default) or **SQLite**. See [INFRA_CONTRACT.md](INFRA_CONTRACT.md) for required variables per backend. **Pre-release:** DB flow assumes **fresh installs only**; schema is created at startup via Alembic. + +- **MySQL:** Use for production and when using AWS/GCP templates (RDS, Cloud SQL). Set `DATABASE_BACKEND=mysql` (or leave unset) and either `DATABASE_URL` or `DATABASE_HOST` + `ADMIN_DATABASE_*`. +- **SQLite:** Use for forks or local runs where you prefer no DB server. Set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:///path/to/syncbot.db`. Single-writer; ensure backups and file durability. AWS/GCP reference templates assume MySQL; for SQLite you deploy the app (e.g. container or Lambda with a writable volume) and set the env vars only. + +--- + +## AWS + +### One-Time Bootstrap (AWS, both paths) + +Deploy the bootstrap stack **once** from your machine with credentials that can create IAM roles, OIDC providers, and S3 buckets. + +**Prerequisites:** AWS CLI, [SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/install-sam-cli.html). For fork-and-deploy: a GitHub repo `owner/repo`. + +From the project root: ```bash -sam deploy --guided \ +aws cloudformation deploy \ + --template-file infra/aws/template.bootstrap.yaml \ + --stack-name syncbot-bootstrap \ --parameter-overrides \ - ExistingDatabaseHost=mydb.xxxx.us-east-2.rds.amazonaws.com + GitHubRepository=YOUR_GITHUB_OWNER/YOUR_REPO \ + --capabilities CAPABILITY_NAMED_IAM \ + --region us-east-2 ``` -Each app sharing the same RDS should use a **different `DatabaseSchema`** (the default is `syncbot`). SyncBot now auto-creates the schema and initializes/updates tables at startup, so there is no manual `init.sql` step. +Replace `YOUR_GITHUB_OWNER/YOUR_REPO` with your repo. Optionally set `CreateOIDCProvider=false` if the account already has the GitHub OIDC provider. + +**Capture outputs:** ```bash -# Optional one-time validation if you want to pre-create schema manually: -mysql -h -u -p -e "CREATE DATABASE IF NOT EXISTS syncbot;" +./infra/aws/scripts/print-bootstrap-outputs.sh ``` -**What about API Gateway and Lambda?** Each stack always creates its own API Gateway and Lambda function. These are lightweight resources that don't affect free-tier billing — the free tier quotas (1M API calls, 1M Lambda requests) are shared across your entire account regardless of how many gateways or functions you have. If you want a unified domain across apps, put a CloudFront distribution or API Gateway custom domain in front. +You need: **GitHubDeployRoleArn** → `AWS_ROLE_TO_ASSUME`, **DeploymentBucketName** → `AWS_S3_BUCKET`, **BootstrapRegion** → `AWS_REGION`, and suggested stack names for test/prod. -## CI/CD via GitHub Actions +--- -Pushes to deployment branches automatically build and deploy via `.github/workflows/sam-pipeline.yml`: +### Fork and Deploy (AWS, GitHub CI) -1. **Build** — `sam build --use-container` -2. **Deploy to test** — automatic on push to `test` -3. **Deploy to prod** — automatic on push to `prod` (can require manual approval via GitHub environment settings) +1. Complete [One-Time Bootstrap (AWS)](#one-time-bootstrap-aws-both-paths). +2. **First app deploy** (with credentials that can create RDS/VPC/Lambda/API Gateway): -`main` can remain an upstream-sync branch and does not deploy. + ```bash + sam build -t infra/aws/template.yaml --use-container + sam deploy --guided \ + --template-file infra/aws/template.yaml \ + --stack-name syncbot-test \ + --s3-bucket YOUR_DEPLOYMENT_BUCKET_NAME \ + --capabilities CAPABILITY_IAM \ + --region us-east-2 + ``` -### One-Time Setup + Use the bootstrap **DeploymentBucketName**. Set parameters (Stage, DB, Slack, etc.) when prompted. -1. **Create an IAM user and access key pair** for deployments with permissions for: -``` -AWSCloudFormationFullAccess -AmazonAPIGatewayAdministrator -AWSLambda_FullAccess -AmazonRDSFullAccess -AmazonEC2FullAccess -CloudWatchFullAccess -AmazonS3FullAccess -IAMFullAccess +3. **GitHub:** Create environments `test` and `prod`. In **Settings → Secrets and variables → Actions**, set **Variables** (per env): `AWS_ROLE_TO_ASSUME`, `AWS_REGION`, `AWS_S3_BUCKET`, `AWS_STACK_NAME`, `STAGE_NAME`, `EXISTING_DATABASE_HOST`, `DATABASE_USER`, `DATABASE_SCHEMA`. Set **Secrets**: `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET`, `DATABASE_PASSWORD`, `TOKEN_ENCRYPTION_KEY`. No access keys — the workflow uses OIDC. +4. Push to `test` or `prod` to build and deploy. The workflow file is `.github/workflows/deploy-aws.yml` (runs when `DEPLOY_TARGET` is not `gcp`). + +--- + +### Download and Deploy (AWS, local) + +1. Run [One-Time Bootstrap (AWS)](#one-time-bootstrap-aws-both-paths) and the [first app deploy](#fork-and-deploy-aws-github-ci) once with admin (or equivalent) credentials. +2. **Future deploys** with limited credentials: assume the bootstrap deploy role (recommended): + + ```bash + export AWS_PROFILE=syncbot-deploy # profile with role_arn = GitHubDeployRoleArn + sam build -t infra/aws/template.yaml --use-container + sam deploy \ + -t .aws-sam/build/template.yaml \ + --stack-name syncbot-test \ + --s3-bucket YOUR_DEPLOYMENT_BUCKET_NAME \ + --capabilities CAPABILITY_IAM \ + --region us-east-2 + ``` + + Or use a dedicated IAM user with the same policy. See [Deployment Guide (legacy detail)](#sharing-infrastructure-across-apps-aws) for shared RDS and parameter overrides. + +--- + +## GCP + +### One-Time Bootstrap (GCP, both paths) + +From the project root (or `infra/gcp`): + +```bash +cd infra/gcp +terraform init +terraform plan -var="project_id=YOUR_PROJECT_ID" -var="stage=test" +terraform apply -var="project_id=YOUR_PROJECT_ID" -var="stage=test" ``` -2. **Create a SAM deployment bucket** — SAM uploads the Lambda package to an S3 bucket during deploy (packaging only; the app does not use S3 at runtime): +Set Secret Manager secret values for Slack and DB (see [infra/gcp/README.md](../infra/gcp/README.md)). Set **cloud_run_image** after building and pushing your container image. Capture outputs for CI: **service_url**, **region**, **project_id**, **artifact_registry_repository**, **deploy_service_account_email**. + +Helper script for GitHub vars: ```bash -aws s3 mb s3://my-sam-deploy-bucket --region us-east-2 +./infra/gcp/scripts/print-bootstrap-outputs.sh ``` -3. **Create GitHub Environments** — Go to your repo → **Settings** → **Environments** and create two environments: `test` and `prod`. For `prod`, enable **Required reviewers** so production deploys need manual approval. +--- -4. **Add GitHub Secrets** — Under **Settings** → **Secrets and variables** → **Actions**, add these as **environment secrets** for both `test` and `prod`: +### Fork and Deploy (GCP, GitHub CI) -| Secret | Where to find it | -|--------|-----------------| -| `AWS_ACCESS_KEY_ID` | IAM user access key (step 1) | -| `AWS_SECRET_ACCESS_KEY` | IAM user secret key (step 1) | -| `SLACK_SIGNING_SECRET` | Slack app → Basic Information → App Credentials | -| `SLACK_CLIENT_SECRET` | Slack app → Basic Information → App Credentials | -| `DATABASE_PASSWORD` | The RDS master password you chose | -| `PASSWORD_ENCRYPT_KEY` | Any passphrase for bot-token encryption at rest | +1. Complete [One-Time Bootstrap (GCP)](#one-time-bootstrap-gcp-both-paths). +2. Configure [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation) for GitHub so the repo can impersonate the deploy service account without a key file. +3. In GitHub: set **Variables** (e.g. `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_WORKLOAD_IDENTITY_PROVIDER`, `GCP_SERVICE_ACCOUNT`). Set **DEPLOY_TARGET** = **gcp** so `.github/workflows/deploy-gcp.yml` runs and `deploy-aws.yml` is skipped. +4. Replace the placeholder steps in `deploy-gcp.yml` with real build (e.g. Cloud Build or Docker push to Artifact Registry) and `gcloud run deploy` (or Terraform apply). See `deploy-gcp.yml` comments and [infra/gcp/README.md](../infra/gcp/README.md). +5. Keep those changes inside your fork's infra/workflow files so future upstream rebases remain straightforward. -5. **Add GitHub Variables** — Under the same settings page, add these as **environment variables** for each environment: +--- -| Variable | `test` value | `prod` value | -|----------|-------------|-------------| -| `AWS_STACK_NAME` | `syncbot-test` | `syncbot-prod` | -| `AWS_S3_BUCKET` | `my-sam-deploy-bucket` | `my-sam-deploy-bucket` | -| `STAGE_NAME` | `test` | `prod` | -| `EXISTING_DATABASE_HOST` | `mydb.xxxx.us-east-2.rds.amazonaws.com` | `mydb.xxxx.us-east-2.rds.amazonaws.com` | -| `DATABASE_USER` | `syncbot_user` | `syncbot_user` | -| `DATABASE_SCHEMA` | `syncbot_test` | `syncbot_prod` | +### Download and Deploy (GCP, local) -`EXISTING_DATABASE_HOST` tells SAM to skip creating VPC/RDS resources and point Lambda at your existing RDS endpoint. Use different `DATABASE_SCHEMA` values per environment when sharing one RDS instance. -If you want SAM to create a new RDS per environment instead, leave `EXISTING_DATABASE_HOST` empty. +1. Run [One-Time Bootstrap (GCP)](#one-time-bootstrap-gcp-both-paths). +2. Build and push the container image to the Terraform output **artifact_registry_repository**, then update the Cloud Run service: -### Deploy Flow + ```bash + gcloud run deploy syncbot-test --image=REGION-docker.pkg.dev/PROJECT/REPO/syncbot:latest --region=REGION + ``` -Once configured, push to deployment branches and the pipeline runs: + Or run `terraform apply` with an updated `cloud_run_image` variable. -``` -push to test → sam build → deploy to test -push to prod → sam build → (manual approval, optional) → deploy to prod -``` +--- + +## Swapping providers + +To switch from AWS to GCP (or the other way) in a fork: + +1. **Keep app code and [INFRA_CONTRACT.md](INFRA_CONTRACT.md) unchanged.** Only infra and CI are provider-specific. +2. **Disable the old provider:** Remove or disable the workflow for the provider you are leaving (e.g. delete or disable `deploy-aws.yml` when moving to GCP). If using the same repo, set `DEPLOY_TARGET` accordingly. +3. **Use the new provider folder:** Run bootstrap for the new provider (`infra/aws/` or `infra/gcp/`) and configure GitHub vars/secrets (and WIF for GCP) as in the sections above. +4. **Point Slack** at the new **service_url** (and run DB migrations or attach an existing DB as required by the contract). + +No changes are needed under `syncbot/` or to the deployment contract; only `infra//` and the chosen workflow change. + +--- + +## Helper scripts + +| Provider | Script | Use | +|----------|--------|-----| +| AWS | `./infra/aws/scripts/print-bootstrap-outputs.sh` | Print bootstrap stack outputs and suggested GitHub variables (run from repo root). | +| GCP | `./infra/gcp/scripts/print-bootstrap-outputs.sh` | Print Terraform outputs and suggested GitHub variables (run from repo root). | + +--- + +## Security summary + +- **Bootstrap** runs once with elevated credentials; it creates a deploy identity (IAM role or GCP service account) and artifact storage (S3 bucket or Artifact Registry). +- **GitHub** uses short-lived federation only: **AWS** OIDC with `AWS_ROLE_TO_ASSUME`; **GCP** Workload Identity Federation with a deploy service account. No long-lived API keys in secrets for deploy. +- **Local** future deploys use assume-role (AWS) or the same deploy service account (GCP) with limited scope. +- **Prod** can be protected with GitHub environment **Required reviewers**. + +--- + +## Database schema (Alembic, fresh install only) + +Schema is managed by **Alembic** (see `db/alembic/`). On startup the app runs **`alembic upgrade head`** only (pre-release: fresh installs only; no stamping of existing DBs). + +- **Fresh installs:** A new database (MySQL or SQLite) gets all tables from the baseline migration at first run. +- **Rollback:** If bootstrap fails, fix the migration issue, reset the DB file/schema, and rerun; no manual downgrade is required for the baseline. + +--- -Monitor progress in your repo's **Actions** tab. The first deploy creates the CloudFormation stack (VPC, RDS, Lambda, API Gateway). SAM uses the deployment bucket only for packaging; the app stores OAuth and data in RDS and uploads media directly to Slack. On cold start, SyncBot also applies DB bootstrap/migrations automatically. +## Sharing infrastructure across apps (AWS) -> **Tip:** If you prefer to do the very first deploy manually (to see the interactive prompts), run `sam deploy --guided` locally first, then let the pipeline handle all future deploys. +To use an existing RDS instance instead of creating one per stack, set **ExistingDatabaseHost** in parameter overrides and use a **different DatabaseSchema** per app (e.g. `syncbot_test`, `syncbot_prod`). SyncBot creates the schema and runs migrations on startup. API Gateway and Lambda are per stack; free-tier quotas are account-wide. diff --git a/docs/IMPROVEMENTS.md b/docs/IMPROVEMENTS.md index b33db6b..3374d9a 100644 --- a/docs/IMPROVEMENTS.md +++ b/docs/IMPROVEMENTS.md @@ -99,7 +99,7 @@ This document outlines the improvements made to the SyncBot application and addi - **Connection pooling** reuses DB connections across invocations in warm Lambda containers ### 15. Infrastructure as Code -- **AWS SAM template** (`template.yaml`) defining VPC, RDS, Lambda, API Gateway (SAM artifact S3 used for deploy packaging only) +- **AWS SAM template** (`infra/aws/template.yaml`) defining VPC, RDS, Lambda, API Gateway (SAM artifact S3 used for deploy packaging only) - **Free-tier optimized** (128 MB Lambda, db.t3.micro RDS, gp2 storage, no NAT Gateway) - **CI/CD pipeline** (`.github/workflows/sam-pipeline.yml`) for automated build/deploy - **SAM config** (`samconfig.toml`) for staging and production environments @@ -119,7 +119,7 @@ This document outlines the improvements made to the SyncBot application and addi - `request_error` (with `request_type`, `request_id`) - `messages_synced` (with `sync_type`: `new_post`, `thread_reply`, `message_edit`, `message_delete`) - `sync_failures` (with `sync_type`) -- **Added CloudWatch Alarms** in `template.yaml` (within free-tier's 10-alarm limit): +- **Added CloudWatch Alarms** in `infra/aws/template.yaml` (within free-tier's 10-alarm limit): - `LambdaErrorAlarm` — fires on 3+ errors in 5 minutes - `LambdaThrottleAlarm` — fires on any throttling - `LambdaDurationAlarm` — fires when average duration exceeds 10 seconds @@ -417,12 +417,12 @@ This document outlines the improvements made to the SyncBot application and addi ### 45. Backup, Restore, and Data Migration (Completed) - **Slack UI** — Home tab has **Backup/Restore** (next to Refresh) and **Data Migration** (in External Connections when federation is enabled). Modals for download backup, restore from JSON, export workspace data, and import migration file; confirmation modals when HMAC or encryption-key/signature checks fail with option to proceed anyway. -- **Full-instance backup** — All tables exported as JSON with `version`, `exported_at`, `encryption_key_hash` (SHA-256 of `PASSWORD_ENCRYPT_KEY`), and HMAC over canonical JSON. Restore inserts in FK order; intended for empty/fresh DB (e.g. after AWS rebuild). On HMAC or encryption-key mismatch, payload stored in cache and confirmation modal pushed; after restore, Home tab caches invalidated for all workspaces. +- **Full-instance backup** — All tables exported as JSON with `version`, `exported_at`, `encryption_key_hash` (SHA-256 of `TOKEN_ENCRYPTION_KEY`), and HMAC over canonical JSON. Restore inserts in FK order; intended for empty/fresh DB (e.g. after AWS rebuild). On HMAC or encryption-key mismatch, payload stored in cache and confirmation modal pushed; after restore, Home tab caches invalidated for all workspaces. - **Workspace migration export/import** — Export produces workspace-scoped JSON (syncs, sync channels, post meta, user directory, user mappings) with optional `source_instance` (webhook_url, instance_id, public_key, one-time connection code). Ed25519 signature for tampering detection. Import verifies signature, resolves or creates federated group (using `source_instance` when present), replace mode (remove then create SyncChannels/PostMeta/user_directory/user_mappings), optional tampering confirmation; Home tab and sync-list caches invalidated after import. - **Instance A detection** — Federated pair request accepts optional `team_id` and `workspace_name`; stored as `primary_team_id` and `primary_workspace_name` on `federated_workspaces`. If a local workspace with that `team_id` exists, it is soft-deleted so the federated connection is the only representation of that workspace on the instance. ### 46. Code Quality & Documentation Restructure (Completed) -- **Database reset via UI** — Renamed `DANGER_DROP_AND_INIT_DB` (auto-drop on startup) to `ENABLE_DB_RESET` (boolean env var). When enabled, a red "Reset Database" button appears in a "Danger Zone" section at the bottom of the Home tab. Clicking it opens a confirmation modal; confirming drops and reinitializes the database from `db/init.sql`, clears all caches, and publishes a confirmation message. No longer runs automatically on startup. +- **Database reset via UI** — Renamed `DANGER_DROP_AND_INIT_DB` (auto-drop on startup) to `ENABLE_DB_RESET` (boolean env var). When enabled, a red "Reset Database" button appears in a "Danger Zone" section at the bottom of the Home tab. Clicking it opens a confirmation modal; confirming drops and reinitializes the database via Alembic, clears all caches, and publishes a confirmation message. No longer runs automatically on startup. - **Variable naming convention audit** — Standardized variable names across 14 files to align with the domain model: - `partner` / `p_ws` / `p_ch` / `p_client` → `member_ws` / `sync_channel` / `member_client` (maps to `workspace_group_members` table) - `sc` (SyncChannel) → `sync_channel`; `ch` (ambiguous) → `sync_channel` or `slack_channel` depending on type @@ -439,7 +439,7 @@ This document outlines the improvements made to the SyncBot application and addi - **OAuth in RDS** — Slack OAuth state and installation data are stored in the same MySQL database via `SQLAlchemyInstallationStore` and `SQLAlchemyOAuthStateStore`. One code path for local dev and production; no file-based or S3-backed OAuth stores. - **No runtime S3** — Removed all runtime S3 usage: OAuth buckets and image bucket resources, Lambda S3 policies, and env vars. Media is uploaded directly to each target Slack channel via `files_upload_v2`. SAM deploy still uses an S3 artifact bucket for packaging only. - **HEIC and Pillow removed** — HEIC-to-PNG conversion and `upload_photos` (S3) were removed; direct upload is the only media path. Dropped `pillow` and `pillow-heif` from dependencies. -- **Template and docs** — `template.yaml` no longer creates OAuth or image buckets; README, DEPLOYMENT, ARCHITECTURE, USER_GUIDE, `.env.example`, and IMPROVEMENTS updated to describe MySQL OAuth and artifact-bucket-only S3. +- **Template and docs** — `infra/aws/template.yaml` no longer creates OAuth or image buckets; README, DEPLOYMENT, ARCHITECTURE, USER_GUIDE, `.env.example`, and IMPROVEMENTS updated to describe MySQL OAuth and artifact-bucket-only S3. ## Remaining Recommendations @@ -450,8 +450,8 @@ This document outlines the improvements made to the SyncBot application and addi - Review and update other dependencies 2. **Database Migrations** - - Startup now auto-bootstraps schema and applies ordered SQL files from `db/migrations/` (tracked in `schema_migrations`) - - Consider adopting Alembic in the future if you want model-autogenerated migrations and down-revision support + - Startup now bootstraps schema via Alembic (`alembic upgrade head`) for fresh installs. + - Continue using Alembic revisions for schema changes and add DB integration coverage as schema evolves. 3. **Advanced Testing** - Add integration tests for database operations @@ -485,4 +485,4 @@ This document outlines the improvements made to the SyncBot application and addi - Duplicated code has been consolidated into shared helpers throughout handlers and federation modules - Home and User Mapping Refresh buttons use content hash, cached blocks, and a 60s cooldown to minimize RDS and Slack API usage when nothing has changed; request-scoped caching keeps builds lightweight, and cross-workspace refreshes use `context=None` to prevent cache contamination - Variable naming follows a consistent domain-model convention: `member_ws`/`member_client` for group members, `sync_channel` for ORM records, `slack_channel` for raw API dicts -- Schema bootstrap + migration application is automatic at startup (`db/init.sql` baseline + `db/migrations/*.sql`) +- Schema bootstrap + migration application is automatic at startup via Alembic (`alembic upgrade head`) diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md new file mode 100644 index 0000000..ec38b88 --- /dev/null +++ b/docs/INFRA_CONTRACT.md @@ -0,0 +1,112 @@ +# Infrastructure Contract (Provider-Agnostic) + +This document defines what any infrastructure provider (AWS, GCP, Azure, etc.) must supply so SyncBot runs correctly. Forks can swap provider-specific IaC in `infra//` as long as they satisfy this contract. + +**Pre-release:** This repo is pre-release. Database rollout assumes **fresh installs only** (no legacy schema migration or stamping). New databases are initialized via Alembic `upgrade head` at startup. + +## Runtime Environment Variables + +The application reads configuration from environment variables. Providers must inject these at runtime (e.g. Lambda env, Cloud Run env, or a compatible secret/config layer). + +### Database (backend-agnostic) + +| Variable | Description | +|----------|-------------| +| `DATABASE_BACKEND` | `mysql` (default) or `sqlite`. | +| `DATABASE_URL` | Full SQLAlchemy URL. When set, overrides legacy MySQL vars. **Required for SQLite** (e.g. `sqlite:///path/to/syncbot.db`). For MySQL, optional (if unset, legacy vars below are used). | +| `DATABASE_HOST` | MySQL hostname (IP or FQDN). Required when backend is `mysql` and `DATABASE_URL` is unset. | +| `ADMIN_DATABASE_USER` | MySQL username. Required when backend is `mysql` and `DATABASE_URL` is unset. | +| `ADMIN_DATABASE_PASSWORD` | MySQL password. Required when backend is `mysql` and `DATABASE_URL` is unset. | +| `ADMIN_DATABASE_SCHEMA` | MySQL database/schema name (e.g. `syncbot`, `syncbot_prod`). Required when backend is `mysql` and `DATABASE_URL` is unset. | +| `DATABASE_TLS_ENABLED` | Optional MySQL TLS toggle (`true`/`false`). Defaults to enabled outside local dev. | +| `DATABASE_SSL_CA_PATH` | Optional CA bundle path used when TLS is enabled (default `/etc/pki/tls/certs/ca-bundle.crt`). | + +**SQLite (forks / local):** Set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:///path/to/file.db`. Single-writer; suitable for small teams and dev. Caveats: single-writer behavior, file durability, and backup expectations are your responsibility. For production at scale, prefer MySQL. + +**MySQL (default):** Set `DATABASE_BACKEND=mysql` (or leave unset) and either `DATABASE_URL` or the four legacy vars above. + +### Required in production (non–local) + +| Variable | Description | +|----------|-------------| +| `SLACK_SIGNING_SECRET` | Slack request verification (Basic Information → App Credentials). | +| `ENV_SLACK_CLIENT_ID` | Slack OAuth client ID. | +| `ENV_SLACK_CLIENT_SECRET` | Slack OAuth client secret. | +| `ENV_SLACK_SCOPES` | Comma-separated OAuth scopes (see `.env.example`). | +| `TOKEN_ENCRYPTION_KEY` | Passphrase for bot-token encryption at rest (any value except `123` to enable). | + +### Optional + +| Variable | Description | +|----------|-------------| +| `SLACK_BOT_TOKEN` | Set by OAuth flow; placeholder until first install. | +| `REQUIRE_ADMIN` | `true` (default) or `false`; restricts config to admins/owners. | +| `ENABLE_DB_RESET` | When set to a Slack Team ID, enables the Reset Database button for that workspace. | +| `LOCAL_DEVELOPMENT` | `true` only for local dev; disables token verification and enables dev shortcuts. | +| `LOG_LEVEL` | `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` (default `INFO`). | +| `SOFT_DELETE_RETENTION_DAYS` | Days to retain soft-deleted workspace data (default `30`). | +| `SYNCBOT_FEDERATION_ENABLED` | `true` to enable external connections (federation). | +| `SYNCBOT_INSTANCE_ID` | UUID for this instance (optional; can be auto-generated). | +| `SYNCBOT_PUBLIC_URL` | Public base URL of the app (required when federation is enabled). | + +## Platform Capabilities + +The provider must deliver: + +1. **Public HTTPS endpoint** + Slack sends events and interactivity to a single base URL. The app expects: + - `POST /slack/events` — events and actions + - `GET /slack/install` — OAuth start + - `GET /slack/oauth_redirect` — OAuth callback + Any path under `/api/federation` is used for federation when enabled. + +2. **Secret injection** + Slack and DB credentials must be available as environment variables (or equivalent) at process start. No assumption of a specific secret store; provider chooses (e.g. Lambda env, Secret Manager, Parameter Store). + +3. **Database** + **MySQL:** In non–local environments the app uses TLS; the provider must allow outbound TCP to the MySQL host (typically 3306). **SQLite:** No network; the app uses a local file. Single-writer; ensure backups and file durability for production use. + +4. **Keep-warm / scheduled ping (optional but recommended)** + To avoid cold-start latency, the app supports a periodic HTTP GET to a configurable path. The provider should support a scheduled job (e.g. CloudWatch Events, Cloud Scheduler) that hits the service on an interval (e.g. 5 minutes). + +5. **Stateless execution** + The app is stateless; state lives in the configured database (MySQL or SQLite). Horizontal scaling is supported with MySQL as long as all instances share the same DB and env; SQLite is single-writer. + +## CI Auth Model + +- **Preferred:** Short-lived federation (e.g. OIDC for AWS, Workload Identity Federation for GCP). No long-lived API keys in GitHub Secrets for deploy. +- **Bootstrap:** One-time creation of a deploy role (or service account) with least-privilege permissions for deploying the app and its resources. +- **Outputs:** Bootstrap should expose values needed for CI (see below) so users can plug them into GitHub variables. + +## Bootstrap Output Contract + +After running provider-specific bootstrap (e.g. AWS CloudFormation bootstrap stack, GCP Terraform), the following outputs should be available so users can configure GitHub Actions and/or local deploy: + +| Output key | Description | Typical use | +|------------|-------------|-------------| +| `deploy_role` | ARN or identifier of the role/identity that CI (or local) uses to deploy | GitHub variable for OIDC/WIF role-to-assume | +| `artifact_bucket` (or equivalent) | Bucket or registry where deploy artifacts (packages, images) are stored | GitHub variable; deploy step uploads here | +| `region` | Primary region for the deployment | GitHub variable (e.g. `AWS_REGION`, `GCP_REGION`) | +| `service_url` | Public base URL of the deployed app (optional at bootstrap; may come from app stack) | For Slack app configuration and docs | + +Provider-specific implementations may use different names (e.g. `GitHubDeployRoleArn`, `DeploymentBucketName`) but should document the mapping to this contract. + +## Swapping Providers + +To use a different cloud or IaC stack: + +1. Keep `syncbot/` and app behavior unchanged. +2. Add or replace contents of `infra//` with templates/scripts that satisfy the contract above. +3. Point CI (e.g. `.github/workflows/deploy-.yml`) at the new infra paths and provider-specific auth (OIDC, WIF, etc.). +4. Update [DEPLOYMENT.md](DEPLOYMENT.md) (or provider-specific README under `infra//`) with bootstrap and deploy steps that emit the bootstrap output contract. + +No application code changes are required when swapping infra as long as the runtime environment variables and platform capabilities are met. + +## Fork Compatibility Policy + +To keep forks easy to rebase and upstream contributions easy to merge: + +1. Keep provider-specific changes under `infra//` and `.github/workflows/deploy-.yml`. +2. Do not couple `syncbot/` application code to a cloud provider (AWS/GCP/Azure-specific SDK calls, metadata assumptions, or IAM wiring). +3. Treat this file as the source of truth for runtime env contract; if a fork adds infra behavior, map it back to this contract. +4. Upstream PRs should include only provider-neutral app changes unless a provider-specific file is explicitly being updated. diff --git a/infra/aws/scripts/print-bootstrap-outputs.sh b/infra/aws/scripts/print-bootstrap-outputs.sh new file mode 100755 index 0000000..19a89e0 --- /dev/null +++ b/infra/aws/scripts/print-bootstrap-outputs.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Print SyncBot AWS bootstrap stack outputs for GitHub variables or local config. +# Run from repo root: infra/aws/scripts/print-bootstrap-outputs.sh +# Optional env: BOOTSTRAP_STACK_NAME (default syncbot-bootstrap), AWS_REGION (default us-east-2). + +set -euo pipefail + +STACK_NAME="${BOOTSTRAP_STACK_NAME:-syncbot-bootstrap}" +REGION="${AWS_REGION:-us-east-2}" + +echo "Bootstrap stack: $STACK_NAME (region: $REGION)" +echo "" + +outputs=$(aws cloudformation describe-stacks \ + --stack-name "$STACK_NAME" \ + --query 'Stacks[0].Outputs[*].[OutputKey,OutputValue]' \ + --output text \ + --region "$REGION" 2>/dev/null) || { + echo "Error: Could not describe stack '$STACK_NAME' in $REGION. Is the bootstrap stack deployed?" >&2 + exit 1 +} + +while read -r key value; do + echo "$key = $value" +done <<< "$outputs" + +echo "" +echo "--- GitHub Actions variables (set these per environment) ---" +echo "AWS_ROLE_TO_ASSUME = $(echo "$outputs" | awk -F'\t' '$1=="GitHubDeployRoleArn"{print $2}')" +echo "AWS_S3_BUCKET = $(echo "$outputs" | awk -F'\t' '$1=="DeploymentBucketName"{print $2}')" +echo "AWS_REGION = $(echo "$outputs" | awk -F'\t' '$1=="BootstrapRegion"{print $2}')" diff --git a/infra/aws/template.bootstrap.yaml b/infra/aws/template.bootstrap.yaml new file mode 100644 index 0000000..89b65f6 --- /dev/null +++ b/infra/aws/template.bootstrap.yaml @@ -0,0 +1,264 @@ +# Bootstrap stack for SyncBot deployments. +# Deploy once with admin credentials; creates OIDC role, deploy bucket, and +# least-privilege policy for GitHub Actions and optional local deploy use. +# No SAM Transform — plain CloudFormation (IAM + S3). +AWSTemplateFormatVersion: "2010-09-09" +Description: > + SyncBot bootstrap: GitHub OIDC deploy role, deployment artifact bucket, + and least-privilege policy. Deploy once locally, then use outputs in GitHub + and for future local deploys. + +Parameters: + GitHubRepository: + Type: String + Description: > + GitHub repository in form owner/repo (e.g. myorg/syncbot). + Used to scope OIDC trust so only this repo can assume the deploy role. + CreateOIDCProvider: + Type: String + Default: "true" + AllowedValues: + - "true" + - "false" + Description: > + Set to false if an OIDC provider for token.actions.githubusercontent.com + already exists in this account (e.g. created by another stack). + DeploymentBucketPrefix: + Type: String + Default: "syncbot-deploy" + Description: Prefix for the deployment artifact bucket name (account + region will be appended). + +Conditions: + CreateOIDC: !Equals [!Ref CreateOIDCProvider, "true"] + +Resources: + # ------------------------------------------------------------------------- + # GitHub OIDC identity provider (one per account) + # ------------------------------------------------------------------------- + GitHubOIDCProvider: + Type: AWS::IAM::OIDCProvider + Condition: CreateOIDC + Properties: + Url: https://token.actions.githubusercontent.com + ClientIdList: + - sts.amazonaws.com + ThumbprintList: + - 6938fd4d98bab03faadb97b34396831e3780aea1 + + # ------------------------------------------------------------------------- + # Deployment artifact bucket for SAM/CloudFormation packages + # ------------------------------------------------------------------------- + DeploymentBucket: + Type: AWS::S3::Bucket + Properties: + BucketName: !Sub "${DeploymentBucketPrefix}-${AWS::AccountId}-${AWS::Region}" + PublicAccessBlockConfiguration: + BlockPublicAcls: true + BlockPublicPolicy: true + IgnorePublicAcls: true + RestrictPublicBuckets: true + LifecycleConfiguration: + Rules: + - Id: ExpireOldArtifacts + Status: Enabled + ExpirationInDays: 30 + NoncurrentVersionExpiration: + NoncurrentDays: 7 + + # ------------------------------------------------------------------------- + # IAM role for GitHub Actions (and optional local assume-role) + # ------------------------------------------------------------------------- + GitHubDeployRole: + Type: AWS::IAM::Role + Properties: + RoleName: !Sub "syncbot-github-deploy-${AWS::Region}" + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Principal: + Federated: !If + - CreateOIDC + - !Sub "arn:aws:iam::${AWS::AccountId}:oidc-provider/token.actions.githubusercontent.com" + - !Sub "arn:aws:iam::${AWS::AccountId}:oidc-provider/token.actions.githubusercontent.com" + Action: sts:AssumeRoleWithWebIdentity + Condition: + StringEquals: + token.actions.githubusercontent.com:aud: sts.amazonaws.com + StringLike: + token.actions.githubusercontent.com:sub: !Sub "repo:${GitHubRepository}:*" + + # ------------------------------------------------------------------------- + # Least-privilege policy for SAM deploy (CloudFormation, S3, IAM PassRole, etc.) + # ------------------------------------------------------------------------- + DeployPolicy: + Type: AWS::IAM::ManagedPolicy + Properties: + ManagedPolicyName: !Sub "syncbot-deploy-policy-${AWS::Region}" + PolicyDocument: + Version: "2012-10-17" + Statement: + - Sid: CloudFormation + Effect: Allow + Action: + - cloudformation:CreateStack + - cloudformation:UpdateStack + - cloudformation:DeleteStack + - cloudformation:DescribeStacks + - cloudformation:DescribeStackEvents + - cloudformation:DescribeStackResources + - cloudformation:DescribeStackResource + - cloudformation:GetTemplate + - cloudformation:GetTemplateSummary + - cloudformation:ListStackResources + - cloudformation:ValidateTemplate + Resource: "*" + - Sid: S3DeployBucket + Effect: Allow + Action: + - s3:GetObject + - s3:PutObject + - s3:DeleteObject + - s3:ListBucket + - s3:GetBucketLocation + Resource: + - !GetAtt DeploymentBucket.Arn + - !Sub "${DeploymentBucket.Arn}/*" + - Sid: IAMCreateSyncBotRole + Effect: Allow + Action: + - iam:CreateRole + Resource: "*" + Condition: + StringLike: + iam:RoleName: syncbot-* + - Sid: IAMManageSyncBotRoles + Effect: Allow + Action: + - iam:PassRole + - iam:PutRolePolicy + - iam:AttachRolePolicy + - iam:GetRole + - iam:DeleteRole + - iam:DeleteRolePolicy + - iam:DetachRolePolicy + Resource: + - !Sub "arn:aws:iam::${AWS::AccountId}:role/syncbot-*" + - Sid: Lambda + Effect: Allow + Action: + - lambda:CreateFunction + - lambda:UpdateFunctionCode + - lambda:UpdateFunctionConfiguration + - lambda:GetFunction + - lambda:DeleteFunction + - lambda:AddPermission + - lambda:RemovePermission + - lambda:PublishVersion + Resource: !Sub "arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:syncbot-*" + - Sid: ApiGateway + Effect: Allow + Action: + - apigateway:* + Resource: "*" + - Sid: Logs + Effect: Allow + Action: + - logs:CreateLogGroup + - logs:PutRetentionPolicy + - logs:DescribeLogGroups + - logs:DeleteLogGroup + Resource: !Sub "arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/syncbot-*" + - Sid: EC2Networking + Effect: Allow + Action: + - ec2:CreateVpc + - ec2:DeleteVpc + - ec2:DescribeVpcs + - ec2:CreateSubnet + - ec2:DeleteSubnet + - ec2:DescribeSubnets + - ec2:CreateInternetGateway + - ec2:DeleteInternetGateway + - ec2:AttachInternetGateway + - ec2:DetachInternetGateway + - ec2:CreateRouteTable + - ec2:DeleteRouteTable + - ec2:CreateRoute + - ec2:DeleteRoute + - ec2:AssociateRouteTable + - ec2:DisassociateRouteTable + - ec2:CreateSecurityGroup + - ec2:DeleteSecurityGroup + - ec2:DescribeSecurityGroups + - ec2:DescribeRouteTables + - ec2:DescribeInternetGateways + - ec2:DescribeVpcAttribute + - ec2:ModifyVpcAttribute + - ec2:CreateTags + - ec2:DeleteTags + - ec2:DescribeTags + Resource: "*" + - Sid: RDS + Effect: Allow + Action: + - rds:CreateDBInstance + - rds:DeleteDBInstance + - rds:ModifyDBInstance + - rds:DescribeDBInstances + - rds:CreateDBSubnetGroup + - rds:DeleteDBSubnetGroup + - rds:DescribeDBSubnetGroups + - rds:CreateDBParameterGroup + - rds:DeleteDBParameterGroup + - rds:DescribeDBParameters + - rds:AddTagsToResource + - rds:RemoveTagsFromResource + - rds:ListTagsForResource + Resource: "*" + - Sid: CloudWatchAlarms + Effect: Allow + Action: + - cloudwatch:PutMetricAlarm + - cloudwatch:DeleteAlarms + - cloudwatch:DescribeAlarms + Resource: "*" + - Sid: Events + Effect: Allow + Action: + - events:PutRule + - events:DeleteRule + - events:PutTargets + - events:RemoveTargets + - events:DescribeRule + - events:ListTargetsByRule + Resource: "*" + + DeployRolePolicyAttachment: + Type: AWS::IAM::RolePolicyAttachment + Properties: + RoleName: !Ref GitHubDeployRole + PolicyArn: !Ref DeployPolicy + +Outputs: + GitHubDeployRoleArn: + Description: ARN of the role for GitHub Actions to assume (set as AWS_ROLE_TO_ASSUME). + Value: !GetAtt GitHubDeployRole.Arn + Export: + Name: !Sub "${AWS::StackName}-GitHubDeployRoleArn" + DeploymentBucketName: + Description: Name of the S3 bucket for SAM deploy artifacts (set as AWS_S3_BUCKET). + Value: !Ref DeploymentBucket + Export: + Name: !Sub "${AWS::StackName}-DeploymentBucketName" + BootstrapRegion: + Description: Region where bootstrap and app stacks are deployed. + Value: !Ref AWS::Region + Export: + Name: !Sub "${AWS::StackName}-BootstrapRegion" + SuggestedTestStackName: + Description: Suggested stack name for test environment. + Value: syncbot-test + SuggestedProdStackName: + Description: Suggested stack name for prod environment. + Value: syncbot-prod diff --git a/template.yaml b/infra/aws/template.yaml similarity index 88% rename from template.yaml rename to infra/aws/template.yaml index fe61152..ca125da 100644 --- a/template.yaml +++ b/infra/aws/template.yaml @@ -5,6 +5,7 @@ Description: > Free-tier compatible: Lambda, API Gateway, RDS db.t3.micro. OAuth and app data use RDS (MySQL); media is uploaded directly to Slack. SAM deploy uses an S3 artifact bucket for packaging only (not runtime). + Template lives under infra/aws; CodeUri points at repo-root syncbot/. Globals: Function: @@ -27,9 +28,9 @@ Parameters: Stage: Description: Deployment stage Type: String - Default: staging + Default: test AllowedValues: - - staging + - test - prod # --- Slack --- @@ -103,8 +104,8 @@ Parameters: # --- Security --- - PasswordEncryptKey: - Description: Encryption key for sensitive data + TokenEncryptionKey: + Description: Encryption key for bot and OAuth tokens Type: String NoEcho: true Default: "123" @@ -128,7 +129,7 @@ Conditions: Mappings: StagesMap: - staging: + test: SlackClientID: "1966318390773.6037875913205" KeepWarmName: "SyncBotKeepWarmTest" prod: @@ -138,13 +139,6 @@ Mappings: Resources: # ============================================================ # Networking - # - # Minimal VPC for RDS (AWS requires RDS to live in a VPC). - # Only public subnets — no NAT Gateway, no private subnets. - # Lambda runs OUTSIDE the VPC and connects to RDS over its - # public endpoint, keeping the architecture free-tier friendly. - # - # Skipped entirely when ExistingDatabaseHost is provided. # ============================================================ VPC: @@ -229,10 +223,6 @@ Resources: SubnetId: !Ref PublicSubnet2 RouteTableId: !Ref PublicRouteTable - # ============================================================ - # Security Groups (skipped when using existing database) - # ============================================================ - RDSSecurityGroup: Type: AWS::EC2::SecurityGroup Condition: CreateDatabase @@ -250,9 +240,7 @@ Resources: Value: !Sub "syncbot-${Stage}-rds-sg" # ============================================================ - # RDS MySQL Database (free-tier: db.t3.micro, 20 GB gp2) - # - # Skipped entirely when ExistingDatabaseHost is provided. + # RDS MySQL Database # ============================================================ RDSParameterGroup: @@ -310,20 +298,13 @@ Resources: Value: !Sub "syncbot-${Stage}-db" # ============================================================ - # Lambda Function (free-tier: 1M requests, 400 000 GB-s) - # - # Runs OUTSIDE the VPC so it can reach the Slack API and RDS - # public endpoint without a NAT Gateway. - # - # Each app gets its own Lambda function — this is inherent to - # the serverless model and doesn't affect free-tier billing - # (the 1M request quota is shared across ALL functions). + # Lambda Function # ============================================================ SyncBotFunction: Type: AWS::Serverless::Function Properties: - CodeUri: syncbot/ + CodeUri: ../../syncbot/ Handler: app.handler Runtime: python3.11 Architectures: @@ -371,15 +352,11 @@ Resources: ADMIN_DATABASE_USER: !Ref DatabaseUser ADMIN_DATABASE_PASSWORD: !Ref DatabasePassword ADMIN_DATABASE_SCHEMA: !Ref DatabaseSchema - PASSWORD_ENCRYPT_KEY: !Ref PasswordEncryptKey + TOKEN_ENCRYPTION_KEY: !Ref TokenEncryptionKey REQUIRE_ADMIN: !Ref RequireAdmin # ============================================================ - # CloudWatch Alarms (free-tier: 10 alarms) - # - # Basic operational alarms that surface problems before users - # notice. All alarms use the free-tier standard-resolution - # (60-second) metrics already published by Lambda and API GW. + # CloudWatch Alarms # ============================================================ LambdaErrorAlarm: @@ -406,8 +383,7 @@ Resources: Properties: AlarmName: !Sub "syncbot-${Stage}-lambda-throttles" AlarmDescription: > - Fires when the SyncBot Lambda function is throttled, - indicating a concurrency limit has been reached. + Fires when the SyncBot Lambda function is throttled. Namespace: AWS/Lambda MetricName: Throttles Dimensions: @@ -425,8 +401,7 @@ Resources: Properties: AlarmName: !Sub "syncbot-${Stage}-lambda-duration" AlarmDescription: > - Fires when average Lambda duration exceeds 10 seconds, - indicating potential performance degradation. + Fires when average Lambda duration exceeds 10 seconds. Namespace: AWS/Lambda MetricName: Duration Dimensions: diff --git a/infra/gcp/README.md b/infra/gcp/README.md new file mode 100644 index 0000000..7fd3736 --- /dev/null +++ b/infra/gcp/README.md @@ -0,0 +1,72 @@ +# SyncBot on GCP (Terraform) + +Minimal Terraform scaffold to run SyncBot on Google Cloud. Satisfies the [infrastructure contract](../../docs/INFRA_CONTRACT.md): Cloud Run (public HTTPS), Secret Manager, optional Cloud SQL, and optional Cloud Scheduler keep-warm. + +## Prerequisites + +- [Terraform](https://www.terraform.io/downloads) >= 1.0 +- [gcloud](https://cloud.google.com/sdk/docs/install) CLI, authenticated +- A GCP project with billing enabled + +## Quick start + +1. **Enable APIs and create secrets (one-time)** + Terraform will enable required APIs. Create Secret Manager secrets and set their values (or let Terraform create placeholder secrets and add versions manually): + + ```bash + cd infra/gcp + terraform init + terraform plan -var="project_id=YOUR_PROJECT_ID" -var="stage=test" + terraform apply -var="project_id=YOUR_PROJECT_ID" -var="stage=test" + ``` + +2. **Set secret values** + After the first apply, add secret versions for Slack and DB (if using existing DB). Use the secret IDs shown in Terraform (e.g. `syncbot-test-syncbot-slack-signing-secret`): + + ```bash + echo -n "YOUR_SLACK_SIGNING_SECRET" | gcloud secrets versions add syncbot-test-syncbot-slack-signing-secret --data-file=- + # Repeat for ENV_SLACK_CLIENT_ID, ENV_SLACK_CLIENT_SECRET, ENV_SLACK_SCOPES, syncbot-token-encryption-key, syncbot-db-password (if existing DB) + ``` + +3. **Set the Cloud Run image** + By default the service uses a placeholder image. Build and push your SyncBot image to Artifact Registry, then: + + ```bash + terraform apply -var="project_id=YOUR_PROJECT_ID" -var="stage=test" \ + -var='cloud_run_image=REGION-docker.pkg.dev/PROJECT/syncbot-test-images/syncbot:latest' + ``` + +## Variables (summary) + +| Variable | Description | +|----------|-------------| +| `project_id` | GCP project ID (required) | +| `region` | Region for Cloud Run and optional Cloud SQL (default `us-central1`) | +| `stage` | Stage name, e.g. `test` or `prod` | +| `use_existing_database` | If `true`, use `existing_db_*` vars instead of creating Cloud SQL | +| `existing_db_host`, `existing_db_schema`, `existing_db_user` | Existing MySQL connection (when `use_existing_database = true`) | +| `cloud_run_image` | Container image URL for Cloud Run (set after first build) | +| `enable_keep_warm` | Create Cloud Scheduler job to ping the service (default `true`) | + +See [variables.tf](variables.tf) for all options. + +## Outputs (deploy contract) + +After `terraform apply`, outputs align with [docs/INFRA_CONTRACT.md](../../docs/INFRA_CONTRACT.md): + +- **service_url** — Public base URL (for Slack app configuration) +- **region** — Primary region +- **project_id** — GCP project ID +- **artifact_registry_repository** — Image registry URL (CI pushes here) +- **deploy_service_account_email** — Service account for CI (use with Workload Identity Federation) + +Use the [GCP bootstrap output script](scripts/print-bootstrap-outputs.sh) to print these as GitHub variable suggestions. + +## Keep-warm + +If `enable_keep_warm` is `true`, a Cloud Scheduler job pings the service at `/health` on the configured interval. Ensure your app exposes a `/health` endpoint or change the job target in [main.tf](main.tf) to another path (e.g. `/`). + +## Security + +- The Cloud Run service is publicly invokable so Slack can reach it. For production, consider Cloud Armor or IAP. +- Deploy uses a dedicated service account; prefer [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation) for GitHub Actions instead of long-lived keys. diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf new file mode 100644 index 0000000..71f531b --- /dev/null +++ b/infra/gcp/main.tf @@ -0,0 +1,319 @@ +# SyncBot on GCP — minimal Terraform scaffold +# Satisfies docs/INFRA_CONTRACT.md (Cloud Run, secrets, optional Cloud SQL, keep-warm) + +terraform { + required_version = ">= 1.0" + required_providers { + google = { + source = "hashicorp/google" + version = "~> 5.0" + } + random = { + source = "hashicorp/random" + version = "~> 3.0" + } + } +} + +provider "google" { + project = var.project_id + region = var.region +} + +locals { + name_prefix = "syncbot-${var.stage}" + secret_ids = [ + var.secret_slack_signing_secret, + var.secret_slack_client_id, + var.secret_slack_client_secret, + var.secret_slack_scopes, + var.secret_token_encryption_key, + var.secret_db_password, + ] + # Map deploy-contract env var names to Secret Manager secret variable keys (used in app_secrets) + env_to_secret_key = { + "SLACK_SIGNING_SECRET" = var.secret_slack_signing_secret + "ENV_SLACK_CLIENT_ID" = var.secret_slack_client_id + "ENV_SLACK_CLIENT_SECRET" = var.secret_slack_client_secret + "ENV_SLACK_SCOPES" = var.secret_slack_scopes + "TOKEN_ENCRYPTION_KEY" = var.secret_token_encryption_key + "ADMIN_DATABASE_PASSWORD" = var.secret_db_password + } +} + +# --------------------------------------------------------------------------- +# APIs +# --------------------------------------------------------------------------- + +resource "google_project_service" "run" { + project = var.project_id + service = "run.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "secretmanager" { + project = var.project_id + service = "secretmanager.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "sqladmin" { + count = var.use_existing_database ? 0 : 1 + project = var.project_id + service = "sqladmin.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "scheduler" { + count = var.enable_keep_warm ? 1 : 0 + project = var.project_id + service = "cloudscheduler.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "artifact_registry" { + project = var.project_id + service = "artifactregistry.googleapis.com" + disable_on_destroy = false +} + +# --------------------------------------------------------------------------- +# Secret Manager: placeholder secrets (values set via gcloud or console) +# --------------------------------------------------------------------------- + +resource "google_secret_manager_secret" "app_secrets" { + for_each = toset(local.secret_ids) + project = var.project_id + secret_id = "${local.name_prefix}-${each.key}" + + replication { + auto {} + } + + depends_on = [google_project_service.secretmanager] +} + +# --------------------------------------------------------------------------- +# Artifact Registry repository for container images (deploy contract: artifact_bucket equivalent) +# --------------------------------------------------------------------------- + +resource "google_artifact_registry_repository" "syncbot" { + location = var.region + repository_id = "${local.name_prefix}-images" + description = "SyncBot container images" + format = "DOCKER" + + depends_on = [google_project_service.artifact_registry] +} + +# --------------------------------------------------------------------------- +# Service account for Cloud Run (runtime) +# --------------------------------------------------------------------------- + +resource "google_service_account" "cloud_run" { + project = var.project_id + account_id = "${replace(local.name_prefix, "-", "")}-run" + display_name = "SyncBot Cloud Run runtime (${var.stage})" +} + +# Grant Cloud Run SA access to read the app secrets +resource "google_project_iam_member" "cloud_run_secret_access" { + for_each = toset(local.secret_ids) + project = var.project_id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${google_service_account.cloud_run.email}" +} + +# --------------------------------------------------------------------------- +# Deploy service account (CI / Workload Identity Federation) +# --------------------------------------------------------------------------- + +resource "google_service_account" "deploy" { + project = var.project_id + account_id = "${replace(local.name_prefix, "-", "")}-deploy" + display_name = "SyncBot deploy (CI) (${var.stage})" +} + +resource "google_project_iam_member" "deploy_run_admin" { + project = var.project_id + role = "roles/run.admin" + member = "serviceAccount:${google_service_account.deploy.email}" +} + +resource "google_project_iam_member" "deploy_sa_user" { + project = var.project_id + role = "roles/iam.serviceAccountUser" + member = "serviceAccount:${google_service_account.deploy.email}" +} + +resource "google_project_iam_member" "deploy_artifact_writer" { + project = var.project_id + role = "roles/artifactregistry.writer" + member = "serviceAccount:${google_service_account.deploy.email}" +} + +# --------------------------------------------------------------------------- +# Cloud SQL (optional): minimal MySQL instance +# --------------------------------------------------------------------------- + +resource "random_password" "db" { + count = var.use_existing_database ? 0 : 1 + length = 24 + special = false +} + +resource "google_sql_database_instance" "main" { + count = var.use_existing_database ? 0 : 1 + project = var.project_id + name = "${local.name_prefix}-db" + database_version = "MYSQL_8_0" + region = var.region + + settings { + tier = "db-f1-micro" + availability_type = "ZONAL" + disk_size = 10 + disk_type = "PD_SSD" + + database_flags { + name = "cloudsql_iam_authentication" + value = "on" + } + + ip_configuration { + ipv4_enabled = true + private_network = null + } + } + + deletion_protection = false + + depends_on = [google_project_service.sqladmin] +} + +resource "google_sql_database" "schema" { + count = var.use_existing_database ? 0 : 1 + name = "syncbot" + instance = google_sql_database_instance.main[0].name +} + +resource "google_sql_user" "app" { + count = var.use_existing_database ? 0 : 1 + name = "syncbot_app" + instance = google_sql_database_instance.main[0].name + host = "%" + password = random_password.db[0].result +} + +# Store Cloud SQL password in Secret Manager for Cloud Run +resource "google_secret_manager_secret_version" "db_password" { + count = var.use_existing_database ? 0 : 1 + secret = google_secret_manager_secret.app_secrets[var.secret_db_password].id + secret_data = random_password.db[0].result +} + +# --------------------------------------------------------------------------- +# Cloud Run service +# --------------------------------------------------------------------------- + +locals { + db_host = var.use_existing_database ? var.existing_db_host : (length(google_sql_database_instance.main) > 0 ? google_sql_database_instance.main[0].public_ip_address : "") + db_schema = var.use_existing_database ? var.existing_db_schema : "syncbot" + db_user = var.use_existing_database ? var.existing_db_user : "syncbot_app" + # Image: use variable or a placeholder until first deploy + image = var.cloud_run_image != "" ? var.cloud_run_image : "us-docker.pkg.dev/cloudrun/container/hello" +} + +resource "google_cloud_run_v2_service" "syncbot" { + project = var.project_id + name = local.name_prefix + location = var.region + ingress = "INGRESS_TRAFFIC_ALL" + + template { + service_account = google_service_account.cloud_run.email + + scaling { + min_instance_count = var.cloud_run_min_instances + max_instance_count = var.cloud_run_max_instances + } + + containers { + image = local.image + + resources { + limits = { + cpu = var.cloud_run_cpu + memory = var.cloud_run_memory + } + } + + env { + name = "DATABASE_HOST" + value = local.db_host + } + env { + name = "ADMIN_DATABASE_USER" + value = local.db_user + } + env { + name = "ADMIN_DATABASE_SCHEMA" + value = local.db_schema + } + + dynamic "env" { + for_each = local.env_to_secret_key + content { + name = env.key + value_source { + secret_key_ref { + secret = google_secret_manager_secret.app_secrets[env.value].name + version = "latest" + } + } + } + } + } + } + + depends_on = [ + google_project_service.run, + google_secret_manager_secret.app_secrets, + ] +} + +# Allow unauthenticated invocations (Slack calls the URL; use IAP or Cloud Armor in prod if needed) +resource "google_cloud_run_v2_service_iam_member" "public" { + project = google_cloud_run_v2_service.syncbot.project + location = google_cloud_run_v2_service.syncbot.location + name = google_cloud_run_v2_service.syncbot.name + role = "roles/run.invoker" + member = "allUsers" +} + +# --------------------------------------------------------------------------- +# Cloud Scheduler (keep-warm) +# --------------------------------------------------------------------------- + +resource "google_cloud_scheduler_job" "keep_warm" { + count = var.enable_keep_warm ? 1 : 0 + project = var.project_id + name = "${local.name_prefix}-keep-warm" + region = var.region + schedule = "*/${var.keep_warm_interval_minutes} * * * *" + time_zone = "UTC" + attempt_deadline = "60s" + + http_target { + uri = "${google_cloud_run_v2_service.syncbot.uri}/health" + http_method = "GET" + oidc_token { + service_account_email = google_service_account.cloud_run.email + } + } + + depends_on = [ + google_project_service.scheduler, + google_cloud_run_v2_service.syncbot, + ] +} diff --git a/infra/gcp/outputs.tf b/infra/gcp/outputs.tf new file mode 100644 index 0000000..b5b3516 --- /dev/null +++ b/infra/gcp/outputs.tf @@ -0,0 +1,44 @@ +# Outputs aligned with docs/INFRA_CONTRACT.md (bootstrap output contract) + +output "service_url" { + description = "Public base URL of the deployed app (for Slack app configuration)" + value = google_cloud_run_v2_service.syncbot.uri +} + +output "region" { + description = "Primary region for the deployment" + value = var.region +} + +output "project_id" { + description = "GCP project ID" + value = var.project_id +} + +# Deploy contract: artifact_bucket equivalent (registry for container images) +output "artifact_registry_repository" { + description = "Artifact Registry repository for container images (CI pushes here)" + value = "${var.region}-docker.pkg.dev/${var.project_id}/${google_artifact_registry_repository.syncbot.repository_id}" +} + +# Deploy contract: deploy_role equivalent (for Workload Identity Federation) +output "deploy_service_account_email" { + description = "Service account email for CI/deploy (use with WIF)" + value = google_service_account.deploy.email +} + +output "cloud_run_service_name" { + description = "Cloud Run service name (for deploy targeting)" + value = google_cloud_run_v2_service.syncbot.name +} + +output "cloud_run_service_location" { + description = "Cloud Run service location (region)" + value = google_cloud_run_v2_service.syncbot.location +} + +# Optional: DB connection info when Cloud SQL is created +output "database_connection_name" { + description = "Cloud SQL connection name (when not using existing DB)" + value = var.use_existing_database ? null : (length(google_sql_database_instance.main) > 0 ? google_sql_database_instance.main[0].connection_name : null) +} diff --git a/infra/gcp/scripts/print-bootstrap-outputs.sh b/infra/gcp/scripts/print-bootstrap-outputs.sh new file mode 100755 index 0000000..93164f2 --- /dev/null +++ b/infra/gcp/scripts/print-bootstrap-outputs.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Print SyncBot GCP Terraform outputs for GitHub variables (WIF, deploy). +# Run from repo root: infra/gcp/scripts/print-bootstrap-outputs.sh +# Requires: terraform in PATH; run from repo root so infra/gcp is available. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +GCP_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" + +if [[ ! -d "$GCP_DIR" ]] || [[ ! -f "$GCP_DIR/main.tf" ]]; then + echo "Error: infra/gcp not found (expected at $GCP_DIR). Run from repo root." >&2 + exit 1 +fi + +echo "GCP Terraform outputs (infra/gcp)" +echo "" + +cd "$GCP_DIR" +if ! terraform output -json >/dev/null 2>&1; then + echo "Error: Terraform state not initialized or no outputs. Run 'terraform init' and 'terraform apply' in infra/gcp first." >&2 + exit 1 +fi + +terraform output + +echo "" +echo "--- GitHub Actions variables (suggested) ---" +echo "GCP_PROJECT_ID = $(terraform output -raw project_id 2>/dev/null || echo '')" +echo "GCP_REGION = $(terraform output -raw region 2>/dev/null || echo '')" +echo "GCP_SERVICE_ACCOUNT = $(terraform output -raw deploy_service_account_email 2>/dev/null || echo '')" +echo "Artifact Registry = $(terraform output -raw artifact_registry_repository 2>/dev/null || echo '')" +echo "Service URL = $(terraform output -raw service_url 2>/dev/null || echo '')" +echo "" +echo "For deploy-gcp.yml also set: GCP_WORKLOAD_IDENTITY_PROVIDER (after configuring WIF for GitHub)." diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf new file mode 100644 index 0000000..c0be008 --- /dev/null +++ b/infra/gcp/variables.tf @@ -0,0 +1,136 @@ +# GCP Terraform variables for SyncBot (see docs/INFRA_CONTRACT.md) + +variable "project_id" { + type = string + description = "GCP project ID" +} + +variable "region" { + type = string + default = "us-central1" + description = "Primary region for Cloud Run and optional Cloud SQL" +} + +variable "stage" { + type = string + default = "test" + description = "Stage name (e.g. test, prod); used for resource naming" +} + +# --------------------------------------------------------------------------- +# Database: use existing or create Cloud SQL +# --------------------------------------------------------------------------- + +variable "use_existing_database" { + type = bool + default = false + description = "If true, do not create Cloud SQL; app uses existing_db_host/schema/user/password" +} + +variable "existing_db_host" { + type = string + default = "" + description = "Existing MySQL host (required when use_existing_database = true)" +} + +variable "existing_db_schema" { + type = string + default = "syncbot" + description = "Existing MySQL schema name (when use_existing_database = true)" +} + +variable "existing_db_user" { + type = string + default = "" + description = "Existing MySQL user (when use_existing_database = true)" +} + +# --------------------------------------------------------------------------- +# Cloud Run +# --------------------------------------------------------------------------- + +variable "cloud_run_image" { + type = string + default = "" + description = "Container image URL for Cloud Run (e.g. gcr.io/PROJECT/syncbot:latest). Set after first build or by CI." +} + +variable "cloud_run_cpu" { + type = string + default = "1" + description = "CPU allocation for Cloud Run service" +} + +variable "cloud_run_memory" { + type = string + default = "512Mi" + description = "Memory allocation for Cloud Run service" +} + +variable "cloud_run_min_instances" { + type = number + default = 0 + description = "Minimum number of instances (0 allows scale-to-zero)" +} + +variable "cloud_run_max_instances" { + type = number + default = 10 + description = "Maximum number of Cloud Run instances" +} + +# --------------------------------------------------------------------------- +# Keep-warm (Cloud Scheduler) +# --------------------------------------------------------------------------- + +variable "enable_keep_warm" { + type = bool + default = true + description = "Create a Cloud Scheduler job that pings the service periodically" +} + +variable "keep_warm_interval_minutes" { + type = number + default = 5 + description = "Interval in minutes for keep-warm ping" +} + +# --------------------------------------------------------------------------- +# Secrets: names only; values are set outside Terraform (gcloud or console) +# --------------------------------------------------------------------------- + +variable "secret_slack_signing_secret" { + type = string + default = "syncbot-slack-signing-secret" + description = "Secret Manager secret ID for SLACK_SIGNING_SECRET" +} + +variable "secret_slack_client_id" { + type = string + default = "syncbot-slack-client-id" + description = "Secret Manager secret ID for ENV_SLACK_CLIENT_ID" +} + +variable "secret_slack_client_secret" { + type = string + default = "syncbot-slack-client-secret" + description = "Secret Manager secret ID for ENV_SLACK_CLIENT_SECRET" +} + +variable "secret_slack_scopes" { + type = string + default = "syncbot-slack-scopes" + description = "Secret Manager secret ID for ENV_SLACK_SCOPES" +} + +variable "secret_token_encryption_key" { + type = string + default = "syncbot-token-encryption-key" + description = "Secret Manager secret ID for TOKEN_ENCRYPTION_KEY" +} + +variable "secret_db_password" { + type = string + default = "syncbot-db-password" + description = "Secret Manager secret ID for ADMIN_DATABASE_PASSWORD (used when use_existing_database = true or with Cloud SQL)" +} diff --git a/poetry.lock b/poetry.lock index fa8e085..766bc95 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,5 +1,25 @@ # This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. +[[package]] +name = "alembic" +version = "1.18.4" +description = "A database migration tool for SQLAlchemy." +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "alembic-1.18.4-py3-none-any.whl", hash = "sha256:a5ed4adcf6d8a4cb575f3d759f071b03cd6e5c7618eb796cb52497be25bfe19a"}, + {file = "alembic-1.18.4.tar.gz", hash = "sha256:cb6e1fd84b6174ab8dbb2329f86d631ba9559dd78df550b57804d607672cedbc"}, +] + +[package.dependencies] +Mako = "*" +SQLAlchemy = ">=1.4.23" +typing-extensions = ">=4.12" + +[package.extras] +tz = ["tzdata"] + [[package]] name = "boto3" version = "1.28.60" @@ -449,186 +469,136 @@ files = [ ] [[package]] -name = "packaging" -version = "23.2" -description = "Core utilities for Python packages" +name = "mako" +version = "1.3.10" +description = "A super-fast templating language that borrows the best ideas from the existing templating languages." optional = false -python-versions = ">=3.7" -groups = ["dev"] +python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, + {file = "mako-1.3.10-py3-none-any.whl", hash = "sha256:baef24a52fc4fc514a0887ac600f9f1cff3d82c61d4d700a1fa84d597b88db59"}, + {file = "mako-1.3.10.tar.gz", hash = "sha256:99579a6f39583fa7e5630a28c3c1f440e4e97a414b80372649c0ce338da2ea28"}, ] +[package.dependencies] +MarkupSafe = ">=0.9.2" + +[package.extras] +babel = ["Babel"] +lingua = ["lingua"] +testing = ["pytest"] + [[package]] -name = "pillow" -version = "12.1.1" -description = "Python Imaging Library (fork)" +name = "markupsafe" +version = "3.0.3" +description = "Safely add untrusted strings to HTML/XML markup." optional = false -python-versions = ">=3.10" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pillow-12.1.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f1625b72740fdda5d77b4def688eb8fd6490975d06b909fd19f13f391e077e0"}, - {file = "pillow-12.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:178aa072084bd88ec759052feca8e56cbb14a60b39322b99a049e58090479713"}, - {file = "pillow-12.1.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b66e95d05ba806247aaa1561f080abc7975daf715c30780ff92a20e4ec546e1b"}, - {file = "pillow-12.1.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:89c7e895002bbe49cdc5426150377cbbc04767d7547ed145473f496dfa40408b"}, - {file = "pillow-12.1.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a5cbdcddad0af3da87cb16b60d23648bc3b51967eb07223e9fed77a82b457c4"}, - {file = "pillow-12.1.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9f51079765661884a486727f0729d29054242f74b46186026582b4e4769918e4"}, - {file = "pillow-12.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:99c1506ea77c11531d75e3a412832a13a71c7ebc8192ab9e4b2e355555920e3e"}, - {file = "pillow-12.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:36341d06738a9f66c8287cf8b876d24b18db9bd8740fa0672c74e259ad408cff"}, - {file = "pillow-12.1.1-cp310-cp310-win32.whl", hash = "sha256:6c52f062424c523d6c4db85518774cc3d50f5539dd6eed32b8f6229b26f24d40"}, - {file = "pillow-12.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:c6008de247150668a705a6338156efb92334113421ceecf7438a12c9a12dab23"}, - {file = "pillow-12.1.1-cp310-cp310-win_arm64.whl", hash = "sha256:1a9b0ee305220b392e1124a764ee4265bd063e54a751a6b62eff69992f457fa9"}, - {file = "pillow-12.1.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e879bb6cd5c73848ef3b2b48b8af9ff08c5b71ecda8048b7dd22d8a33f60be32"}, - {file = "pillow-12.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:365b10bb9417dd4498c0e3b128018c4a624dc11c7b97d8cc54effe3b096f4c38"}, - {file = "pillow-12.1.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d4ce8e329c93845720cd2014659ca67eac35f6433fd3050393d85f3ecef0dad5"}, - {file = "pillow-12.1.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc354a04072b765eccf2204f588a7a532c9511e8b9c7f900e1b64e3e33487090"}, - {file = "pillow-12.1.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e7976bf1910a8116b523b9f9f58bf410f3e8aa330cd9a2bb2953f9266ab49af"}, - {file = "pillow-12.1.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:597bd9c8419bc7c6af5604e55847789b69123bbe25d65cc6ad3012b4f3c98d8b"}, - {file = "pillow-12.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2c1fc0f2ca5f96a3c8407e41cca26a16e46b21060fe6d5b099d2cb01412222f5"}, - {file = "pillow-12.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:578510d88c6229d735855e1f278aa305270438d36a05031dfaae5067cc8eb04d"}, - {file = "pillow-12.1.1-cp311-cp311-win32.whl", hash = "sha256:7311c0a0dcadb89b36b7025dfd8326ecfa36964e29913074d47382706e516a7c"}, - {file = "pillow-12.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:fbfa2a7c10cc2623f412753cddf391c7f971c52ca40a3f65dc5039b2939e8563"}, - {file = "pillow-12.1.1-cp311-cp311-win_arm64.whl", hash = "sha256:b81b5e3511211631b3f672a595e3221252c90af017e399056d0faabb9538aa80"}, - {file = "pillow-12.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab323b787d6e18b3d91a72fc99b1a2c28651e4358749842b8f8dfacd28ef2052"}, - {file = "pillow-12.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adebb5bee0f0af4909c30db0d890c773d1a92ffe83da908e2e9e720f8edf3984"}, - {file = "pillow-12.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb66b7cc26f50977108790e2456b7921e773f23db5630261102233eb355a3b79"}, - {file = "pillow-12.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aee2810642b2898bb187ced9b349e95d2a7272930796e022efaf12e99dccd293"}, - {file = "pillow-12.1.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a0b1cd6232e2b618adcc54d9882e4e662a089d5768cd188f7c245b4c8c44a397"}, - {file = "pillow-12.1.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7aac39bcf8d4770d089588a2e1dd111cbaa42df5a94be3114222057d68336bd0"}, - {file = "pillow-12.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ab174cd7d29a62dd139c44bf74b698039328f45cb03b4596c43473a46656b2f3"}, - {file = "pillow-12.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:339ffdcb7cbeaa08221cd401d517d4b1fe7a9ed5d400e4a8039719238620ca35"}, - {file = "pillow-12.1.1-cp312-cp312-win32.whl", hash = "sha256:5d1f9575a12bed9e9eedd9a4972834b08c97a352bd17955ccdebfeca5913fa0a"}, - {file = "pillow-12.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:21329ec8c96c6e979cd0dfd29406c40c1d52521a90544463057d2aaa937d66a6"}, - {file = "pillow-12.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:af9a332e572978f0218686636610555ae3defd1633597be015ed50289a03c523"}, - {file = "pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:d242e8ac078781f1de88bf823d70c1a9b3c7950a44cdf4b7c012e22ccbcd8e4e"}, - {file = "pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:02f84dfad02693676692746df05b89cf25597560db2857363a208e393429f5e9"}, - {file = "pillow-12.1.1-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:e65498daf4b583091ccbb2556c7000abf0f3349fcd57ef7adc9a84a394ed29f6"}, - {file = "pillow-12.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c6db3b84c87d48d0088943bf33440e0c42370b99b1c2a7989216f7b42eede60"}, - {file = "pillow-12.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b7e5304e34942bf62e15184219a7b5ad4ff7f3bb5cca4d984f37df1a0e1aee2"}, - {file = "pillow-12.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5bddd742a44b7e6b1e773ab5db102bd7a94c32555ba656e76d319d19c3850"}, - {file = "pillow-12.1.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc44ef1f3de4f45b50ccf9136999d71abb99dca7706bc75d222ed350b9fd2289"}, - {file = "pillow-12.1.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a8eb7ed8d4198bccbd07058416eeec51686b498e784eda166395a23eb99138e"}, - {file = "pillow-12.1.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47b94983da0c642de92ced1702c5b6c292a84bd3a8e1d1702ff923f183594717"}, - {file = "pillow-12.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:518a48c2aab7ce596d3bf79d0e275661b846e86e4d0e7dec34712c30fe07f02a"}, - {file = "pillow-12.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a550ae29b95c6dc13cf69e2c9dc5747f814c54eeb2e32d683e5e93af56caa029"}, - {file = "pillow-12.1.1-cp313-cp313-win32.whl", hash = "sha256:a003d7422449f6d1e3a34e3dd4110c22148336918ddbfc6a32581cd54b2e0b2b"}, - {file = "pillow-12.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:344cf1e3dab3be4b1fa08e449323d98a2a3f819ad20f4b22e77a0ede31f0faa1"}, - {file = "pillow-12.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:5c0dd1636633e7e6a0afe7bf6a51a14992b7f8e60de5789018ebbdfae55b040a"}, - {file = "pillow-12.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0330d233c1a0ead844fc097a7d16c0abff4c12e856c0b325f231820fee1f39da"}, - {file = "pillow-12.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dae5f21afb91322f2ff791895ddd8889e5e947ff59f71b46041c8ce6db790bc"}, - {file = "pillow-12.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e0c664be47252947d870ac0d327fea7e63985a08794758aa8af5b6cb6ec0c9c"}, - {file = "pillow-12.1.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:691ab2ac363b8217f7d31b3497108fb1f50faab2f75dfb03284ec2f217e87bf8"}, - {file = "pillow-12.1.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9e8064fb1cc019296958595f6db671fba95209e3ceb0c4734c9baf97de04b20"}, - {file = "pillow-12.1.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:472a8d7ded663e6162dafdf20015c486a7009483ca671cece7a9279b512fcb13"}, - {file = "pillow-12.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:89b54027a766529136a06cfebeecb3a04900397a3590fd252160b888479517bf"}, - {file = "pillow-12.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:86172b0831b82ce4f7877f280055892b31179e1576aa00d0df3bb1bbf8c3e524"}, - {file = "pillow-12.1.1-cp313-cp313t-win32.whl", hash = "sha256:44ce27545b6efcf0fdbdceb31c9a5bdea9333e664cda58a7e674bb74608b3986"}, - {file = "pillow-12.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a285e3eb7a5a45a2ff504e31f4a8d1b12ef62e84e5411c6804a42197c1cf586c"}, - {file = "pillow-12.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cc7d296b5ea4d29e6570dabeaed58d31c3fea35a633a69679fb03d7664f43fb3"}, - {file = "pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:417423db963cb4be8bac3fc1204fe61610f6abeed1580a7a2cbb2fbda20f12af"}, - {file = "pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:b957b71c6b2387610f556a7eb0828afbe40b4a98036fc0d2acfa5a44a0c2036f"}, - {file = "pillow-12.1.1-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:097690ba1f2efdeb165a20469d59d8bb03c55fb6621eb2041a060ae8ea3e9642"}, - {file = "pillow-12.1.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2815a87ab27848db0321fb78c7f0b2c8649dee134b7f2b80c6a45c6831d75ccd"}, - {file = "pillow-12.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f7ed2c6543bad5a7d5530eb9e78c53132f93dfa44a28492db88b41cdab885202"}, - {file = "pillow-12.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:652a2c9ccfb556235b2b501a3a7cf3742148cd22e04b5625c5fe057ea3e3191f"}, - {file = "pillow-12.1.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d6e4571eedf43af33d0fc233a382a76e849badbccdf1ac438841308652a08e1f"}, - {file = "pillow-12.1.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b574c51cf7d5d62e9be37ba446224b59a2da26dc4c1bb2ecbe936a4fb1a7cb7f"}, - {file = "pillow-12.1.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a37691702ed687799de29a518d63d4682d9016932db66d4e90c345831b02fb4e"}, - {file = "pillow-12.1.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f95c00d5d6700b2b890479664a06e754974848afaae5e21beb4d83c106923fd0"}, - {file = "pillow-12.1.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:559b38da23606e68681337ad74622c4dbba02254fc9cb4488a305dd5975c7eeb"}, - {file = "pillow-12.1.1-cp314-cp314-win32.whl", hash = "sha256:03edcc34d688572014ff223c125a3f77fb08091e4607e7745002fc214070b35f"}, - {file = "pillow-12.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:50480dcd74fa63b8e78235957d302d98d98d82ccbfac4c7e12108ba9ecbdba15"}, - {file = "pillow-12.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:5cb1785d97b0c3d1d1a16bc1d710c4a0049daefc4935f3a8f31f827f4d3d2e7f"}, - {file = "pillow-12.1.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1f90cff8aa76835cba5769f0b3121a22bd4eb9e6884cfe338216e557a9a548b8"}, - {file = "pillow-12.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f1be78ce9466a7ee64bfda57bdba0f7cc499d9794d518b854816c41bf0aa4e9"}, - {file = "pillow-12.1.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:42fc1f4677106188ad9a55562bbade416f8b55456f522430fadab3cef7cd4e60"}, - {file = "pillow-12.1.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98edb152429ab62a1818039744d8fbb3ccab98a7c29fc3d5fcef158f3f1f68b7"}, - {file = "pillow-12.1.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d470ab1178551dd17fdba0fef463359c41aaa613cdcd7ff8373f54be629f9f8f"}, - {file = "pillow-12.1.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6408a7b064595afcab0a49393a413732a35788f2a5092fdc6266952ed67de586"}, - {file = "pillow-12.1.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5d8c41325b382c07799a3682c1c258469ea2ff97103c53717b7893862d0c98ce"}, - {file = "pillow-12.1.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c7697918b5be27424e9ce568193efd13d925c4481dd364e43f5dff72d33e10f8"}, - {file = "pillow-12.1.1-cp314-cp314t-win32.whl", hash = "sha256:d2912fd8114fc5545aa3a4b5576512f64c55a03f3ebcca4c10194d593d43ea36"}, - {file = "pillow-12.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:4ceb838d4bd9dab43e06c363cab2eebf63846d6a4aeaea283bbdfd8f1a8ed58b"}, - {file = "pillow-12.1.1-cp314-cp314t-win_arm64.whl", hash = "sha256:7b03048319bfc6170e93bd60728a1af51d3dd7704935feb228c4d4faab35d334"}, - {file = "pillow-12.1.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:600fd103672b925fe62ed08e0d874ea34d692474df6f4bf7ebe148b30f89f39f"}, - {file = "pillow-12.1.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:665e1b916b043cef294bc54d47bf02d87e13f769bc4bc5fa225a24b3a6c5aca9"}, - {file = "pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:495c302af3aad1ca67420ddd5c7bd480c8867ad173528767d906428057a11f0e"}, - {file = "pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8fd420ef0c52c88b5a035a0886f367748c72147b2b8f384c9d12656678dfdfa9"}, - {file = "pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f975aa7ef9684ce7e2c18a3aa8f8e2106ce1e46b94ab713d156b2898811651d3"}, - {file = "pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8089c852a56c2966cf18835db62d9b34fef7ba74c726ad943928d494fa7f4735"}, - {file = "pillow-12.1.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:cb9bb857b2d057c6dfc72ac5f3b44836924ba15721882ef103cecb40d002d80e"}, - {file = "pillow-12.1.1.tar.gz", hash = "sha256:9ad8fa5937ab05218e2b6a4cff30295ad35afd2f83ac592e68c0d871bb0fdbc4"}, + {file = "markupsafe-3.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559"}, + {file = "markupsafe-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1"}, + {file = "markupsafe-3.0.3-cp310-cp310-win32.whl", hash = "sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a"}, + {file = "markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b"}, + {file = "markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12"}, + {file = "markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe"}, + {file = "markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d"}, + {file = "markupsafe-3.0.3-cp39-cp39-win32.whl", hash = "sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8"}, + {file = "markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698"}, ] -[package.extras] -docs = ["furo", "olefile", "sphinx (>=8.2)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] -fpx = ["olefile"] -mic = ["olefile"] -test-arrow = ["arro3-compute", "arro3-core", "nanoarrow", "pyarrow"] -tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma (>=5)", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "trove-classifiers (>=2024.10.12)"] -xmp = ["defusedxml"] - [[package]] -name = "pillow-heif" -version = "1.2.1" -description = "Python interface for libheif library" +name = "packaging" +version = "23.2" +description = "Core utilities for Python packages" optional = false -python-versions = ">=3.10" -groups = ["main"] +python-versions = ">=3.7" +groups = ["dev"] files = [ - {file = "pillow_heif-1.2.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:aef93f67030b953c401058b4735782b412787629054a4979809f721a27e74836"}, - {file = "pillow_heif-1.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:702f2ebf111fd13fc82c50685f0695f2bee3dd3ebd29305ddc49d6d2478e916b"}, - {file = "pillow_heif-1.2.1-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2d03c95b69bb4ca830ff2b58a9c3f7f43c61696a32a688f858fe0a9989d42c53"}, - {file = "pillow_heif-1.2.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:476f10c5785505d2ace0f86eccb2fa614b2c6ae49f636adc36cd48cbecf19e64"}, - {file = "pillow_heif-1.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5a6a72e28a8fa73457fb9c67fe8bb9f27053994e765337d21312ce23eaed3116"}, - {file = "pillow_heif-1.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e04ca9f833b42e2cfa2c72e9f8c6163e988a8b07d62f87e1f33c55f4c683138d"}, - {file = "pillow_heif-1.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:6a21dbdd1183aa44f6519c36557f8dc018d2f86c1ea3091b29008c3d7cb0db2a"}, - {file = "pillow_heif-1.2.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:c534c40708160f38a45bfe5abac1400370079edbb3bc8f23be0d51f556695a16"}, - {file = "pillow_heif-1.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dcc3ab9cbd7df179fe2b51569881732584cdc8cd306461b2cfa8416035137305"}, - {file = "pillow_heif-1.2.1-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba34dfe3ce0584b2f1b7653a075e18e4c97d72110d106b1e7aef5d702dec8045"}, - {file = "pillow_heif-1.2.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cdb956f4b3dc1d1fce2364f539f6b26d604bcb212055d0087c6ff1bd0668599"}, - {file = "pillow_heif-1.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8372ce54b76aff80eb1859d79699e7f972a7837d0e7fb4ce3350d25ac53890bc"}, - {file = "pillow_heif-1.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:110e14113a08aff5047c8c879a8ae3f284b93134e4b9b5b5a7734838030ba9a1"}, - {file = "pillow_heif-1.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2dfa2047dee77a8e8321a949bf9d0c53d03296afb459a5b03201e9d8af6dba36"}, - {file = "pillow_heif-1.2.1-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:c5a3c8fec8cf63f6d9170f092a210e76d584beef5a5b0f5e8fbfa171eb27520a"}, - {file = "pillow_heif-1.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:add430cf7f5340eaa70c2e57af59655515fd415b2b93dde0baec87be48debd0f"}, - {file = "pillow_heif-1.2.1-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f9a6daa0f88fe5fa76b72c848615836368d0577a108059e3070615c1e50551dc"}, - {file = "pillow_heif-1.2.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:35a355df6024f09b0e46b56bb5805c275a8ca7dc67e1da2be245aee3a70c82ec"}, - {file = "pillow_heif-1.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:33d84eb1c40d9c63d2ea869e6290f5b59ebf4421ed16090796be60b8e3b2a061"}, - {file = "pillow_heif-1.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2522a54df26f996993189326208513a6c8458ac89de51644a89b19fcda712539"}, - {file = "pillow_heif-1.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:0c965277fde806c7c628b16f9a45f4a7b10c32c390ce7d70c0572499a5d8426f"}, - {file = "pillow_heif-1.2.1-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:246acfade36d877fc7e01ccde03edaafd75e5aad66f889f484fc8ba7b651b688"}, - {file = "pillow_heif-1.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a746d38f2c54774fd680da45f2af56467b15f6b6c46962328ad1ed005d16ca6"}, - {file = "pillow_heif-1.2.1-cp313-cp313-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a93e374ff86ef61dc374a6d3c22e73fddc609e10b342802fa1674cc26db50859"}, - {file = "pillow_heif-1.2.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f8f0158a0368a38870deda5124d74086f8708268f335ddbdeb0890ef83ecd7ad"}, - {file = "pillow_heif-1.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5132c9c84e18ca800d559b79e389114b289899614c241e4399f8b677f1bbd3d7"}, - {file = "pillow_heif-1.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c31012a51fe3d67ee0c6c91549a5ee0590f3fa07b03882022238d0d0f052ad20"}, - {file = "pillow_heif-1.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:e27d7690a08f52c63295f5ca5e13b97bbe168f2f55e32794e3b24898a5270255"}, - {file = "pillow_heif-1.2.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:a9083f80271130580e6f99f6b79204fc7f5ff61eefb83ac64c026c68f0000775"}, - {file = "pillow_heif-1.2.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b3584abf861d33a422a7bda1f2926131cbf4bbd2801390cb7f75f03ef3833a2d"}, - {file = "pillow_heif-1.2.1-cp314-cp314-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be790ce430645c3e0b148e873ed5ebeeb6d001ae685e8db40f77f43474ab9848"}, - {file = "pillow_heif-1.2.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:957060f8f2ceaa2e1fd41450da05bee87abc054a6247c02b53e9322ce4e53958"}, - {file = "pillow_heif-1.2.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fe231ca4c4e387785a97f2acf38a24474f3a0819b7e2234144cff9fa3de5d3ac"}, - {file = "pillow_heif-1.2.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:f780890596161c7f43512377dda9106f793421565a376c70988355de5c4241de"}, - {file = "pillow_heif-1.2.1-cp314-cp314-win_amd64.whl", hash = "sha256:eee8c933cce88dc8f6a01afc3befc159341fbc404a981c3759b3dca97b7f2dbb"}, - {file = "pillow_heif-1.2.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a1e6ae0b83068874ec8735e7fd066433fda77189facd158d750b820e24686454"}, - {file = "pillow_heif-1.2.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:b49dbcefa59c54d03cc2cfd98e3fbafa3aa38c3afa1ad719f2a5d6682fbe2752"}, - {file = "pillow_heif-1.2.1-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:39371c003a9ac657e0e083989b4a25f1eb4ad6a9ea01dd7ea85f93dd00ab9376"}, - {file = "pillow_heif-1.2.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:084e1d78a6a74efd41d2cb803554500067d509be3ff7f77b61140adeb9867660"}, - {file = "pillow_heif-1.2.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c0fa8c2ceec1fc83b45b8ea036add55706aa2d1f789acfece5f30b124f11fdb3"}, - {file = "pillow_heif-1.2.1.tar.gz", hash = "sha256:29be44d636269e2d779b4aec629bc056ec7260b734a16b4d3bb284c49c200274"}, + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, ] -[package.dependencies] -pillow = ">=11.1.0" - -[package.extras] -dev = ["coverage", "defusedxml", "numpy", "opencv-python (==4.13.0.92)", "packaging", "pre-commit", "pylint", "pympler", "pytest", "setuptools"] -docs = ["sphinx (>=4.4)", "sphinx-issues (>=3.0.1)", "sphinx-rtd-theme (>=1.0)"] -tests = ["defusedxml", "numpy", "packaging", "pympler", "pytest"] -tests-min = ["defusedxml", "packaging", "pytest"] - [[package]] name = "pluggy" version = "1.6.0" @@ -890,6 +860,18 @@ postgresql-psycopg2cffi = ["psycopg2cffi"] pymysql = ["pymysql (<1) ; python_version < \"3\"", "pymysql ; python_version >= \"3\""] sqlcipher = ["sqlcipher3-binary ; python_version >= \"3\""] +[[package]] +name = "typing-extensions" +version = "4.15.0" +description = "Backported and Experimental Type Hints for Python 3.9+" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"}, + {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"}, +] + [[package]] name = "urllib3" version = "1.26.17" @@ -910,4 +892,4 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [metadata] lock-version = "2.1" python-versions = "^3.11" -content-hash = "9f953e1dbbc8bc13aa7597380931c869f83d75db1bbaa0700527d77a0b0cde84" +content-hash = "8fdfafb375ef7b2e690dcb9c76b155903945907a274747a9e7e8eddebe95175f" diff --git a/pyproject.toml b/pyproject.toml index 594ff14..7e30654 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,6 +7,7 @@ readme = "README.md" [tool.poetry.dependencies] python = "^3.11" +alembic = "^1.13" slack-bolt = "^1.27.0" sqlalchemy = "<2.0" pymysql = "^1.1.2" diff --git a/samconfig.toml b/samconfig.toml index 145cbd0..dff16b5 100644 --- a/samconfig.toml +++ b/samconfig.toml @@ -7,6 +7,7 @@ version = 0.1 [default.build.parameters] +template_file = "infra/aws/template.yaml" use_container = true [default.deploy.parameters] diff --git a/syncbot/constants.py b/syncbot/constants.py index e71a198..73e55b2 100644 --- a/syncbot/constants.py +++ b/syncbot/constants.py @@ -25,13 +25,20 @@ SLACK_CLIENT_SECRET = "ENV_SLACK_CLIENT_SECRET" SLACK_SCOPES = "ENV_SLACK_SCOPES" SLACK_SIGNING_SECRET = "SLACK_SIGNING_SECRET" -PASSWORD_ENCRYPT_KEY = "PASSWORD_ENCRYPT_KEY" +TOKEN_ENCRYPTION_KEY = "TOKEN_ENCRYPTION_KEY" REQUIRE_ADMIN = "REQUIRE_ADMIN" +# Database: backend-agnostic (mysql or sqlite) +DATABASE_BACKEND = "DATABASE_BACKEND" +DATABASE_URL = "DATABASE_URL" + +# Legacy MySQL-only vars (used when DATABASE_URL unset and backend is mysql) DATABASE_HOST = "DATABASE_HOST" ADMIN_DATABASE_USER = "ADMIN_DATABASE_USER" ADMIN_DATABASE_PASSWORD = "ADMIN_DATABASE_PASSWORD" ADMIN_DATABASE_SCHEMA = "ADMIN_DATABASE_SCHEMA" +DATABASE_SSL_CA_PATH = "DATABASE_SSL_CA_PATH" +DATABASE_TLS_ENABLED = "DATABASE_TLS_ENABLED" # Name of env var that scopes the Reset Database button to one workspace. ENABLE_DB_RESET = "ENABLE_DB_RESET" @@ -89,27 +96,68 @@ def _has_real_bot_token() -> bool: # handles any requests. Fails fast in production; warns in local dev. # --------------------------------------------------------------------------- -# Required in all environments -_REQUIRED_ALWAYS = [ - DATABASE_HOST, - ADMIN_DATABASE_USER, - ADMIN_DATABASE_PASSWORD, - ADMIN_DATABASE_SCHEMA, -] +def get_database_backend() -> str: + """Return 'mysql' or 'sqlite'. Defaults to 'mysql' when unset for backward compatibility.""" + return os.environ.get(DATABASE_BACKEND, "mysql").lower().strip() or "mysql" + + +def _env_bool(name: str, default: bool) -> bool: + """Parse common boolean env values with a safe default.""" + value = os.environ.get(name) + if value is None: + return default + return value.strip().lower() in {"1", "true", "yes", "on"} + + +def database_tls_enabled() -> bool: + """Return True when MySQL TLS should be used. + + Defaults: + - local dev: disabled + - non-local: enabled + Can be overridden with DATABASE_TLS_ENABLED=true/false. + """ + default = not LOCAL_DEVELOPMENT + return _env_bool(DATABASE_TLS_ENABLED, default) + + +def database_ssl_ca_path() -> str: + """Return optional CA bundle path for DB TLS verification.""" + return os.environ.get(DATABASE_SSL_CA_PATH, "/etc/pki/tls/certs/ca-bundle.crt") + + +def get_required_db_vars() -> list: + """Return list of required env var names for the current database backend.""" + backend = get_database_backend() + if backend == "sqlite": + return [DATABASE_URL] + # mysql: require URL or legacy host/user/password/schema + if os.environ.get(DATABASE_URL): + return [] # URL is enough + return [ + DATABASE_HOST, + ADMIN_DATABASE_USER, + ADMIN_DATABASE_PASSWORD, + ADMIN_DATABASE_SCHEMA, + ] + + +# Required in all environments (non-DB vars; DB vars are backend-dependent) +_REQUIRED_ALWAYS_NON_DB: list = [] -# Required only in production (Lambda). OAuth uses MySQL; no S3 buckets. +# Required only in production (non-local deployments). _REQUIRED_PRODUCTION = [ SLACK_SIGNING_SECRET, SLACK_CLIENT_ID, SLACK_CLIENT_SECRET, SLACK_SCOPES, - PASSWORD_ENCRYPT_KEY, + TOKEN_ENCRYPTION_KEY, ] def _encryption_active() -> bool: """Return True if bot-token encryption is configured with a real key.""" - key = os.environ.get(PASSWORD_ENCRYPT_KEY, "") + key = os.environ.get(TOKEN_ENCRYPTION_KEY, "") return bool(key) and key != "123" @@ -118,8 +166,9 @@ def validate_config() -> None: In production this raises immediately so the Lambda fails on cold-start rather than silently misbehaving. In local development it only warns. + DB requirements depend on DATABASE_BACKEND (mysql vs sqlite). """ - required = list(_REQUIRED_ALWAYS) + required = list(_REQUIRED_ALWAYS_NON_DB) + list(get_required_db_vars()) if not LOCAL_DEVELOPMENT: required.extend(_REQUIRED_PRODUCTION) @@ -136,5 +185,5 @@ def validate_config() -> None: if not LOCAL_DEVELOPMENT and not _encryption_active(): _logger.critical( "Bot-token encryption is DISABLED in production. " - "Set PASSWORD_ENCRYPT_KEY to a strong passphrase to encrypt tokens at rest." + "Set TOKEN_ENCRYPTION_KEY to a strong passphrase to encrypt tokens at rest." ) diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py index 0093e0c..ee49877 100644 --- a/syncbot/db/__init__.py +++ b/syncbot/db/__init__.py @@ -19,7 +19,7 @@ from typing import TypeVar from urllib.parse import quote_plus -from sqlalchemy import and_, create_engine, func, inspect, pool, text +from sqlalchemy import and_, create_engine, func, pool, text from sqlalchemy.exc import OperationalError from sqlalchemy.orm import sessionmaker @@ -43,15 +43,12 @@ class DatabaseField: _MAX_RETRIES = 2 _DB_INIT_MAX_ATTEMPTS = 15 _DB_INIT_RETRY_SECONDS = 2 -_MIGRATION_TABLE = "schema_migrations" -_BASELINE_VERSION = "000_init_sql" _PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent -_INIT_SQL_PATH = _PROJECT_ROOT / "db" / "init.sql" -_MIGRATIONS_DIR = _PROJECT_ROOT / "db" / "migrations" +_ALEMBIC_SCRIPT_LOCATION = _PROJECT_ROOT / "db" / "alembic" -def _build_base_url(include_schema: bool = False) -> tuple[str, dict]: - """Build MySQL URL and connect_args for get_engine (no schema or with schema).""" +def _build_mysql_url(include_schema: bool = False) -> tuple[str, dict]: + """Build MySQL URL and connect_args from legacy env vars.""" host = os.environ[constants.DATABASE_HOST] user = quote_plus(os.environ[constants.ADMIN_DATABASE_USER]) passwd = quote_plus(os.environ[constants.ADMIN_DATABASE_PASSWORD]) @@ -59,8 +56,8 @@ def _build_base_url(include_schema: bool = False) -> tuple[str, dict]: path = f"/{schema}" if include_schema else "" db_url = f"mysql+pymysql://{user}:{passwd}@{host}:3306{path}?charset=utf8mb4" connect_args: dict = {} - if not constants.LOCAL_DEVELOPMENT: - ca_path = "/etc/pki/tls/certs/ca-bundle.crt" + if constants.database_tls_enabled(): + ca_path = constants.database_ssl_ca_path() try: ssl_ctx = ssl.create_default_context(cafile=ca_path) except (OSError, ssl.SSLError): @@ -69,78 +66,45 @@ def _build_base_url(include_schema: bool = False) -> tuple[str, dict]: return db_url, connect_args -def _sql_statements_from_file(sql_path: Path) -> list[str]: - """Parse a SQL file into executable statements. - - This parser intentionally supports the project's migration style - (line comments + semicolon-delimited statements). - """ - sql = sql_path.read_text() - lines = [] - for line in sql.splitlines(): - if "--" in line: - line = line[: line.index("--")].strip() - else: - line = line.strip() - if line: - lines.append(line) - combined = " ".join(lines) - return [stmt.strip() for stmt in combined.split(";") if stmt.strip()] - - -def _execute_sql_file(conn, sql_path: Path) -> None: - """Execute all statements from *sql_path* using the provided connection.""" - for stmt in _sql_statements_from_file(sql_path): - conn.execute(text(stmt)) - - -def _ensure_migration_table(engine) -> None: - """Create the migration tracking table if it does not exist.""" - with engine.begin() as conn: - conn.execute( - text( - """ - CREATE TABLE IF NOT EXISTS schema_migrations ( - version VARCHAR(255) PRIMARY KEY, - applied_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 - """ - ) - ) - - -def _migration_applied(engine, version: str) -> bool: - with engine.begin() as conn: - row = conn.execute( - text("SELECT version FROM schema_migrations WHERE version = :version"), - {"version": version}, - ).first() - return row is not None - - -def _record_migration(engine, version: str) -> None: - with engine.begin() as conn: - conn.execute( - text( - """ - INSERT INTO schema_migrations (version) - VALUES (:version) - ON DUPLICATE KEY UPDATE version = VALUES(version) - """ - ), - {"version": version}, - ) +def _get_database_url_and_args(schema: str = None) -> tuple[str, dict]: + """Return (url, connect_args) for the configured backend. Dialect-aware.""" + backend = constants.get_database_backend() + if backend == "sqlite": + url = os.environ.get(constants.DATABASE_URL) or "sqlite:///db.sqlite3" + # Ensure path is absolute for SQLite when file path is used + if url.startswith("sqlite:///") and not url.startswith("sqlite:////"): + path_part = url[10:] + if not path_part.startswith("/") and ":" not in path_part[:2]: + url = f"sqlite:///{_PROJECT_ROOT / path_part}" + connect_args = {"check_same_thread": False} + return url, connect_args + # mysql + if os.environ.get(constants.DATABASE_URL): + url = os.environ[constants.DATABASE_URL] + connect_args = {} + if constants.database_tls_enabled(): + ca_path = constants.database_ssl_ca_path() + try: + ssl_ctx = ssl.create_default_context(cafile=ca_path) + except (OSError, ssl.SSLError): + ssl_ctx = ssl.create_default_context() + connect_args["ssl"] = ssl_ctx + return url, connect_args + return _build_mysql_url(include_schema=True) -def _table_exists(engine, table_name: str) -> bool: - """Return True if *table_name* exists in the current schema.""" - return inspect(engine).has_table(table_name) +def _is_sqlite(engine) -> bool: + return engine.dialect.name == "sqlite" def _ensure_database_exists() -> None: - """Create the configured schema if it does not already exist.""" + """Create the configured schema if it does not already exist (MySQL only).""" + if constants.get_database_backend() != "mysql": + return + if os.environ.get(constants.DATABASE_URL): + return # URL already points at a database schema = os.environ.get(constants.ADMIN_DATABASE_SCHEMA, "syncbot") - url_no_db, connect_args = _build_base_url(include_schema=False) + url_no_db, connect_args = _build_mysql_url(include_schema=False) engine_no_db = create_engine(url_no_db, connect_args=connect_args, pool_pre_ping=True) try: with engine_no_db.begin() as conn: @@ -149,49 +113,31 @@ def _ensure_database_exists() -> None: engine_no_db.dispose() +def _alembic_config(): + """Build Alembic config with script_location set to project db/alembic.""" + from alembic.config import Config # pyright: ignore[reportMissingImports] + config = Config() + config.set_main_option("script_location", str(_ALEMBIC_SCRIPT_LOCATION)) + return config + + +def _run_alembic_upgrade() -> None: + """Run Alembic upgrade head (fresh-install flow only; pre-release).""" + from alembic import command # pyright: ignore[reportMissingImports] + + config = _alembic_config() + command.upgrade(config, "head") + + def initialize_database() -> None: - """Initialize schema and apply migrations automatically. - - Behavior: - - Ensures the target database exists. - - Creates migration tracking table. - - Applies ``db/init.sql`` exactly once for fresh databases (or marks it as - baseline for already-initialized databases). - - Applies pending SQL migrations from ``db/migrations`` in filename order. - """ - if not _INIT_SQL_PATH.exists(): - raise FileNotFoundError(f"Missing init.sql at {_INIT_SQL_PATH}") + """Initialize schema via Alembic migrations (fresh install only; pre-release). + Ensures DB exists (MySQL only), then runs Alembic upgrade head. + """ for attempt in range(1, _DB_INIT_MAX_ATTEMPTS + 1): try: _ensure_database_exists() - engine = get_engine() - - _ensure_migration_table(engine) - - if not _migration_applied(engine, _BASELINE_VERSION): - if _table_exists(engine, "workspaces"): - _logger.info("db_init_baseline_marked", extra={"version": _BASELINE_VERSION}) - _record_migration(engine, _BASELINE_VERSION) - else: - _logger.info("db_init_start", extra={"file": str(_INIT_SQL_PATH)}) - with engine.begin() as conn: - _execute_sql_file(conn, _INIT_SQL_PATH) - _record_migration(engine, _BASELINE_VERSION) - _logger.info("db_init_complete", extra={"version": _BASELINE_VERSION}) - - if _MIGRATIONS_DIR.exists(): - migration_files = sorted(p for p in _MIGRATIONS_DIR.glob("*.sql") if p.is_file()) - for migration_file in migration_files: - version = migration_file.name - if _migration_applied(engine, version): - continue - _logger.info("db_migration_start", extra={"version": version}) - with engine.begin() as conn: - _execute_sql_file(conn, migration_file) - _record_migration(engine, version) - _logger.info("db_migration_complete", extra={"version": version}) - + _run_alembic_upgrade() return except Exception as exc: if attempt >= _DB_INIT_MAX_ATTEMPTS: @@ -207,89 +153,97 @@ def initialize_database() -> None: time.sleep(_DB_INIT_RETRY_SECONDS) +def _drop_all_tables_dialect_aware(engine) -> None: + """Drop all tables in the current schema. MySQL: information_schema + FK off; SQLite: metadata reflect + drop.""" + if _is_sqlite(engine): + from sqlalchemy import MetaData + meta = MetaData() + meta.reflect(bind=engine) + with engine.begin() as conn: + for table in reversed(meta.sorted_tables): + table.drop(conn, checkfirst=True) + return + with engine.begin() as conn: + conn.execute(text("SET FOREIGN_KEY_CHECKS = 0")) + result = conn.execute( + text( + "SELECT TABLE_NAME FROM information_schema.TABLES " + "WHERE TABLE_SCHEMA = DATABASE()" + ) + ) + for (table_name,) in result: + conn.execute(text(f"DROP TABLE IF EXISTS `{table_name}`")) + conn.execute(text("SET FOREIGN_KEY_CHECKS = 1")) + + def drop_and_init_db() -> None: - """Drop the database and reinitialize from db/init.sql. All data is lost. + """Empty the current schema and reinitialize via Alembic. All data is lost. + Drops all tables dialect-aware, then runs Alembic upgrade head. Called from the "Reset Database" UI button (gated by ENABLE_DB_RESET). Resets GLOBAL_ENGINE and GLOBAL_SESSION so the next get_engine() uses a fresh DB. """ global GLOBAL_ENGINE, GLOBAL_SESSION, GLOBAL_SCHEMA _logger.critical( - "DB RESET: dropping database and reinitializing from init.sql. All data will be lost." + "DB RESET: emptying schema and reinitializing via Alembic. All data will be lost." ) - schema = os.environ.get(constants.ADMIN_DATABASE_SCHEMA, "syncbot") - url_no_db, connect_args = _build_base_url(include_schema=False) - engine_no_db = create_engine(url_no_db, connect_args=connect_args, pool_pre_ping=True) - - with engine_no_db.begin() as conn: - conn.execute(text(f"DROP DATABASE IF EXISTS `{schema}`")) - conn.execute(text(f"CREATE DATABASE `{schema}` CHARACTER SET utf8mb4")) - - engine_no_db.dispose() - - url_with_db, connect_args = _build_base_url(include_schema=True) - engine_with_db = create_engine(url_with_db, connect_args=connect_args, pool_pre_ping=True) - - init_path = _INIT_SQL_PATH - if not init_path.exists(): - _logger.error("drop_and_init_db: init.sql not found at %s", init_path) - engine_with_db.dispose() - return + db_url, connect_args = _get_database_url_and_args() + engine = create_engine( + db_url, + connect_args=connect_args, + poolclass=pool.NullPool if constants.get_database_backend() == "sqlite" else pool.QueuePool, + pool_pre_ping=constants.get_database_backend() == "mysql", + ) - with engine_with_db.begin() as conn: - _execute_sql_file(conn, init_path) + _drop_all_tables_dialect_aware(engine) - engine_with_db.dispose() + engine.dispose() GLOBAL_ENGINE = None GLOBAL_SESSION = None GLOBAL_SCHEMA = None - # Ensure baseline is re-recorded after reset. + # Recreate schema via Alembic upgrade head. initialize_database() - _logger.info("drop_and_init_db: database %s dropped and reinitialized from init.sql", schema) + _logger.info("drop_and_init_db: schema emptied and reinitialized via Alembic") def get_engine(echo: bool = False, schema: str = None): """Return the global SQLAlchemy engine, creating it on first call. - Uses QueuePool with pool_pre_ping so that stale connections (common - in Lambda warm containers) are detected and replaced transparently. + Uses QueuePool with pool_pre_ping for MySQL; NullPool for SQLite. """ global GLOBAL_ENGINE, GLOBAL_SCHEMA - target_schema = schema or os.environ[constants.ADMIN_DATABASE_SCHEMA] + backend = constants.get_database_backend() + target_schema = (schema or os.environ.get(constants.ADMIN_DATABASE_SCHEMA, "syncbot")) if backend == "mysql" else "" + cache_key = target_schema or backend - if target_schema == GLOBAL_SCHEMA and GLOBAL_ENGINE is not None: + if cache_key == GLOBAL_SCHEMA and GLOBAL_ENGINE is not None: return GLOBAL_ENGINE - host = os.environ[constants.DATABASE_HOST] - user = quote_plus(os.environ[constants.ADMIN_DATABASE_USER]) - passwd = quote_plus(os.environ[constants.ADMIN_DATABASE_PASSWORD]) - - db_url = f"mysql+pymysql://{user}:{passwd}@{host}:3306/{target_schema}?charset=utf8mb4" + db_url, connect_args = _get_database_url_and_args(schema=target_schema or None) - connect_args: dict = {} - if not constants.LOCAL_DEVELOPMENT: - ca_path = "/etc/pki/tls/certs/ca-bundle.crt" - try: - ssl_ctx = ssl.create_default_context(cafile=ca_path) - except (OSError, ssl.SSLError): - ssl_ctx = ssl.create_default_context() - connect_args["ssl"] = ssl_ctx - - GLOBAL_ENGINE = create_engine( - db_url, - echo=echo, - poolclass=pool.QueuePool, - pool_size=1, - max_overflow=1, - pool_recycle=3600, - pool_pre_ping=True, - connect_args=connect_args, - ) - GLOBAL_SCHEMA = target_schema + if backend == "sqlite": + GLOBAL_ENGINE = create_engine( + db_url, + echo=echo, + poolclass=pool.NullPool, + connect_args=connect_args, + ) + else: + GLOBAL_ENGINE = create_engine( + db_url, + echo=echo, + poolclass=pool.QueuePool, + pool_size=1, + max_overflow=1, + pool_recycle=3600, + pool_pre_ping=True, + connect_args=connect_args, + ) + GLOBAL_SCHEMA = cache_key return GLOBAL_ENGINE diff --git a/syncbot/db/schemas.py b/syncbot/db/schemas.py index c011f35..a044a9b 100644 --- a/syncbot/db/schemas.py +++ b/syncbot/db/schemas.py @@ -187,7 +187,7 @@ def get_id(): class InstanceKey(BaseClass, GetDBClass): """This instance's Ed25519 keypair, auto-generated on first boot. - The private key is stored Fernet-encrypted using PASSWORD_ENCRYPT_KEY. + The private key is stored Fernet-encrypted using TOKEN_ENCRYPTION_KEY. The public key is shared with federated workspaces during connection setup. """ diff --git a/syncbot/handlers/sync.py b/syncbot/handlers/sync.py index 32cd763..d33e00a 100644 --- a/syncbot/handlers/sync.py +++ b/syncbot/handlers/sync.py @@ -511,7 +511,7 @@ def handle_db_reset_proceed( _logger.warning("Failed to update modal after DB reset: %s", e) _logger.critical( - "DB_RESET triggered by user %s — dropping database and reinitializing from init.sql", + "DB_RESET triggered by user %s — dropping database and reinitializing via Alembic", user_id, ) diff --git a/syncbot/helpers/encryption.py b/syncbot/helpers/encryption.py index cae496d..bb7429c 100644 --- a/syncbot/helpers/encryption.py +++ b/syncbot/helpers/encryption.py @@ -1,6 +1,6 @@ """Bot-token encryption / decryption using Fernet (AES-128-CBC + HMAC-SHA256). -The PASSWORD_ENCRYPT_KEY env var is stretched to a 32-byte key using +The TOKEN_ENCRYPTION_KEY env var is stretched to a 32-byte key using PBKDF2-HMAC-SHA256 with 600,000 iterations. The derived Fernet instance is cached so the expensive KDF runs at most once per key per process. """ @@ -39,7 +39,7 @@ def _get_fernet(key: str) -> Fernet: def _encryption_enabled() -> bool: """Return *True* if bot-token encryption is active.""" - key = os.environ.get(constants.PASSWORD_ENCRYPT_KEY, "") + key = os.environ.get(constants.TOKEN_ENCRYPTION_KEY, "") return bool(key) and key != "123" @@ -47,7 +47,7 @@ def encrypt_bot_token(token: str) -> str: """Encrypt a bot token before storing it in the database.""" if not _encryption_enabled(): return token - key = os.environ[constants.PASSWORD_ENCRYPT_KEY] + key = os.environ[constants.TOKEN_ENCRYPTION_KEY] return _get_fernet(key).encrypt(token.encode()).decode() @@ -58,7 +58,7 @@ def decrypt_bot_token(encrypted: str) -> str: """ if not _encryption_enabled(): return encrypted - key = os.environ[constants.PASSWORD_ENCRYPT_KEY] + key = os.environ[constants.TOKEN_ENCRYPTION_KEY] try: return _get_fernet(key).decrypt(encrypted.encode()).decode() except InvalidToken: diff --git a/syncbot/helpers/export_import.py b/syncbot/helpers/export_import.py index bf08e90..43b5352 100644 --- a/syncbot/helpers/export_import.py +++ b/syncbot/helpers/export_import.py @@ -13,7 +13,7 @@ from decimal import Decimal from typing import Any -from sqlalchemy import text +from sqlalchemy import MetaData, Table, delete, select import constants from db import DbManager, get_engine, schemas @@ -23,45 +23,47 @@ BACKUP_VERSION = 1 MIGRATION_VERSION = 1 _RAW_BACKUP_TABLES = ("slack_bots", "slack_installations", "slack_oauth_states") +_DATETIME_COLUMNS = frozenset({ + "bot_token_expires_at", + "user_token_expires_at", + "installed_at", + "expire_at", +}) def _dump_raw_table(table_name: str) -> list[dict]: - """Return all rows from a non-ORM table as dictionaries.""" + """Return all rows from a non-ORM table as dictionaries (dialect-neutral via reflection).""" engine = get_engine() + meta = MetaData() + table = Table(table_name, meta, autoload_with=engine) with engine.connect() as conn: - rows = conn.execute(text(f"SELECT * FROM `{table_name}`")).mappings().all() + rows = conn.execute(select(table)).mappings().all() return [dict(row) for row in rows] def _restore_raw_table(table_name: str, rows: list[dict]) -> None: - """Replace table contents for a non-ORM table from backup rows.""" + """Replace table contents for a non-ORM table from backup rows (dialect-neutral).""" engine = get_engine() + meta = MetaData() + table = Table(table_name, meta, autoload_with=engine) with engine.begin() as conn: - conn.execute(text(f"DELETE FROM `{table_name}`")) + conn.execute(delete(table)) for row in rows: if not row: continue parsed: dict[str, Any] = {} for key, value in row.items(): - if isinstance(value, str) and key in { - "bot_token_expires_at", - "user_token_expires_at", - "installed_at", - "expire_at", - }: + if key not in table.c: + continue + if isinstance(value, str) and key in _DATETIME_COLUMNS: try: parsed[key] = datetime.fromisoformat(value.replace("Z", "+00:00")) except ValueError: parsed[key] = value else: parsed[key] = value - - cols = ", ".join(f"`{k}`" for k in parsed) - placeholders = ", ".join(f":{k}" for k in parsed) - conn.execute( - text(f"INSERT INTO `{table_name}` ({cols}) VALUES ({placeholders})"), - parsed, - ) + if parsed: + conn.execute(table.insert().values(**parsed)) def _json_serializer(obj: Any) -> Any: @@ -84,16 +86,16 @@ def canonical_json_dumps(obj: dict) -> bytes: def _compute_encryption_key_hash() -> str | None: - """SHA-256 hex of PASSWORD_ENCRYPT_KEY, or None if unset.""" - key = os.environ.get(constants.PASSWORD_ENCRYPT_KEY, "") + """SHA-256 hex of TOKEN_ENCRYPTION_KEY, or None if unset.""" + key = os.environ.get(constants.TOKEN_ENCRYPTION_KEY, "") if not key or key == "123": return None return hashlib.sha256(key.encode()).hexdigest() def _compute_backup_hmac(payload_without_hmac: dict) -> str: - """HMAC-SHA256 of canonical JSON of payload (excluding hmac field), keyed by PASSWORD_ENCRYPT_KEY.""" - key = os.environ.get(constants.PASSWORD_ENCRYPT_KEY, "") + """HMAC-SHA256 of canonical JSON of payload (excluding hmac field), keyed by TOKEN_ENCRYPTION_KEY.""" + key = os.environ.get(constants.TOKEN_ENCRYPTION_KEY, "") if not key: key = "" raw = canonical_json_dumps(payload_without_hmac) diff --git a/syncbot/helpers/notifications.py b/syncbot/helpers/notifications.py index f8bf5b0..4dc472c 100644 --- a/syncbot/helpers/notifications.py +++ b/syncbot/helpers/notifications.py @@ -153,7 +153,7 @@ def notify_synced_channels(client: WebClient, channel_ids: list[str], message: s def purge_stale_soft_deletes() -> int: """Permanently delete workspaces that have been soft-deleted beyond the retention period. - Returns 0 without raising if the schema is missing (e.g. fresh DB before init.sql). + Returns 0 without raising if the schema is missing (e.g. fresh DB before Alembic bootstrap). """ from helpers.workspace import get_workspace_by_id diff --git a/syncbot/helpers/oauth.py b/syncbot/helpers/oauth.py index b0270d2..4b67906 100644 --- a/syncbot/helpers/oauth.py +++ b/syncbot/helpers/oauth.py @@ -16,9 +16,9 @@ def get_oauth_flow(): - """Build the Slack OAuth flow using MySQL-backed stores. + """Build the Slack OAuth flow using SQLAlchemy-backed stores. - Uses the same RDS/MySQL connection as the rest of the app. Works for both + Uses the same database engine as the rest of the app. Works for both local development and production (Lambda). If OAuth credentials are not set and LOCAL_DEVELOPMENT is true, returns None (single-workspace mode). """ diff --git a/syncbot/requirements.txt b/syncbot/requirements.txt index ccfd8d3..5d3dded 100644 --- a/syncbot/requirements.txt +++ b/syncbot/requirements.txt @@ -4,8 +4,6 @@ charset-normalizer==3.4.4 ; python_version >= "3.11" and python_version < "4.0" cryptography==46.0.5 ; python_version >= "3.11" and python_version < "4.0" greenlet==3.1.1 ; python_version >= "3.11" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") and python_version < "4.0" idna==3.11 ; python_version >= "3.11" and python_version < "4.0" -pillow-heif==1.2.0 ; python_version >= "3.11" and python_version < "4.0" -pillow==12.1.1 ; python_version >= "3.11" and python_version < "4.0" pycparser==2.23 ; python_version >= "3.11" and python_version < "4.0" pymysql==1.1.2 ; python_version >= "3.11" and python_version < "4.0" python-dotenv==1.2.1 ; python_version >= "3.11" and python_version < "4.0" diff --git a/tests/test_db.py b/tests/test_db.py index 9b9616e..11ac10d 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -1,4 +1,4 @@ -"""Unit tests for ``syncbot/db`` connection pooling and retry logic.""" +"""Unit tests for ``syncbot/db`` connection pooling, retry logic, and backend parity (MySQL/SQLite).""" import os from unittest.mock import patch @@ -11,6 +11,7 @@ os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") +from sqlalchemy import inspect from sqlalchemy.exc import OperationalError from db import _MAX_RETRIES, _with_retry @@ -78,13 +79,15 @@ class TestEngineConfig: @patch.dict( os.environ, { + "DATABASE_BACKEND": "mysql", "DATABASE_HOST": "localhost", "ADMIN_DATABASE_USER": "root", "ADMIN_DATABASE_PASSWORD": "test", "ADMIN_DATABASE_SCHEMA": "syncbot", }, + clear=False, ) - def test_engine_uses_queue_pool(self): + def test_engine_uses_queue_pool_mysql(self): from sqlalchemy.pool import QueuePool import db as db_mod @@ -103,3 +106,84 @@ def test_engine_uses_queue_pool(self): engine.dispose() db_mod.GLOBAL_ENGINE = old_engine db_mod.GLOBAL_SCHEMA = old_schema + + @patch.dict( + os.environ, + { + "DATABASE_BACKEND": "sqlite", + "DATABASE_URL": "sqlite:///:memory:", + }, + clear=False, + ) + def test_engine_uses_null_pool_sqlite(self): + from sqlalchemy.pool import NullPool + + import db as db_mod + from db import get_engine + + old_engine = db_mod.GLOBAL_ENGINE + old_schema = db_mod.GLOBAL_SCHEMA + engine = None + try: + db_mod.GLOBAL_ENGINE = None + db_mod.GLOBAL_SCHEMA = None + engine = get_engine() + assert isinstance(engine.pool, NullPool) + finally: + if engine: + engine.dispose() + db_mod.GLOBAL_ENGINE = old_engine + db_mod.GLOBAL_SCHEMA = old_schema + + +# ----------------------------------------------------------------------- +# Backend parity: SQLite bootstrap and required vars +# ----------------------------------------------------------------------- + + +class TestBackendParity: + @pytest.mark.parametrize("sqlite_url", ["sqlite:///test_bootstrap.db"]) + @patch.dict(os.environ, {"DATABASE_BACKEND": "sqlite"}, clear=False) + def test_sqlite_initialize_database_creates_tables(self, sqlite_url): + import db as db_mod + from db import get_engine, initialize_database + + os.environ["DATABASE_URL"] = sqlite_url + old_engine = db_mod.GLOBAL_ENGINE + old_schema = db_mod.GLOBAL_SCHEMA + try: + db_mod.GLOBAL_ENGINE = None + db_mod.GLOBAL_SCHEMA = None + initialize_database() + engine = get_engine() + insp = inspect(engine) + assert insp.has_table("workspaces") + assert insp.has_table("alembic_version") + assert insp.has_table("slack_bots") + finally: + if db_mod.GLOBAL_ENGINE: + db_mod.GLOBAL_ENGINE.dispose() + db_mod.GLOBAL_ENGINE = old_engine + db_mod.GLOBAL_SCHEMA = old_schema + if "DATABASE_URL" in os.environ and "test_bootstrap" in os.environ["DATABASE_URL"]: + try: + (__import__("pathlib").Path("test_bootstrap.db")).unlink(missing_ok=True) + except Exception: + pass + + def test_get_required_db_vars_mysql_without_url(self): + with patch.dict(os.environ, {"DATABASE_BACKEND": "mysql"}, clear=False): + if "DATABASE_URL" in os.environ: + del os.environ["DATABASE_URL"] + from constants import get_required_db_vars + + required = get_required_db_vars() + assert "DATABASE_HOST" in required + assert "ADMIN_DATABASE_USER" in required + + def test_get_required_db_vars_sqlite(self): + with patch.dict(os.environ, {"DATABASE_BACKEND": "sqlite"}, clear=False): + from constants import get_required_db_vars + + required = get_required_db_vars() + assert required == ["DATABASE_URL"] diff --git a/tests/test_helpers.py b/tests/test_helpers.py index 8700339..fe4d995 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -61,7 +61,7 @@ def test_deeply_nested(self): class TestEncryption: - @patch.dict(os.environ, {"PASSWORD_ENCRYPT_KEY": "my-secret-key"}) + @patch.dict(os.environ, {"TOKEN_ENCRYPTION_KEY": "my-secret-key"}) def test_encrypt_decrypt_roundtrip(self): # Use a non-secret placeholder; encryption accepts any string token = "xoxb-0-0" @@ -70,12 +70,12 @@ def test_encrypt_decrypt_roundtrip(self): decrypted = helpers.decrypt_bot_token(encrypted) assert decrypted == token - @patch.dict(os.environ, {"PASSWORD_ENCRYPT_KEY": "my-secret-key"}) + @patch.dict(os.environ, {"TOKEN_ENCRYPTION_KEY": "my-secret-key"}) def test_decrypt_invalid_token_raises(self): with pytest.raises(ValueError, match="decryption failed"): helpers.decrypt_bot_token("not-a-valid-encrypted-token") - @patch.dict(os.environ, {"PASSWORD_ENCRYPT_KEY": "123"}) + @patch.dict(os.environ, {"TOKEN_ENCRYPTION_KEY": "123"}) def test_encryption_disabled_with_default_key(self): token = "xoxb-0-0" assert helpers.encrypt_bot_token(token) == token @@ -83,18 +83,18 @@ def test_encryption_disabled_with_default_key(self): @patch.dict(os.environ, {}, clear=False) def test_encryption_disabled_when_key_missing(self): - os.environ.pop("PASSWORD_ENCRYPT_KEY", None) + os.environ.pop("TOKEN_ENCRYPTION_KEY", None) token = "xoxb-0-0" assert helpers.encrypt_bot_token(token) == token assert helpers.decrypt_bot_token(token) == token - @patch.dict(os.environ, {"PASSWORD_ENCRYPT_KEY": "key-A"}) + @patch.dict(os.environ, {"TOKEN_ENCRYPTION_KEY": "key-A"}) def test_wrong_key_raises(self): token = "xoxb-0-0" encrypted = helpers.encrypt_bot_token(token) with ( - patch.dict(os.environ, {"PASSWORD_ENCRYPT_KEY": "key-B"}), + patch.dict(os.environ, {"TOKEN_ENCRYPTION_KEY": "key-B"}), pytest.raises(ValueError, match="decryption failed"), ): helpers.decrypt_bot_token(encrypted) From 5e2b3decb6e2e8ceb8d1caf2f6c699468dc6a394 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sun, 15 Mar 2026 22:23:18 -0500 Subject: [PATCH 12/45] Auto-generate token encryption key during deploy. --- .github/workflows/deploy-aws.yml | 19 ++++++++--- docs/DEPLOYMENT.md | 20 ++++++++++-- docs/INFRA_CONTRACT.md | 2 +- infra/aws/scripts/print-bootstrap-outputs.sh | 6 ++++ infra/aws/template.yaml | 33 ++++++++++++++++---- infra/gcp/README.md | 5 ++- infra/gcp/main.tf | 11 +++++++ infra/gcp/outputs.tf | 5 +++ infra/gcp/scripts/print-bootstrap-outputs.sh | 6 ++++ infra/gcp/variables.tf | 7 +++++ 10 files changed, 100 insertions(+), 14 deletions(-) diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index 5a11f2e..45ec7d8 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -1,5 +1,6 @@ # Deploy SyncBot to AWS (SAM). See docs/DEPLOYMENT.md and docs/INFRA_CONTRACT.md. # To use GCP instead: set repository variable DEPLOY_TARGET=gcp and disable this workflow. +# Optional DR secret: TOKEN_ENCRYPTION_KEY_OVERRIDE (passes TokenEncryptionKeyOverride on deploy). name: Deploy (AWS) @@ -62,6 +63,12 @@ jobs: - name: Deploy to test run: | + TOKEN_KEY_OVERRIDE="${{ secrets.TOKEN_ENCRYPTION_KEY_OVERRIDE }}" + OVERRIDE_PARAM="" + if [ -n "$TOKEN_KEY_OVERRIDE" ]; then + OVERRIDE_PARAM=" TokenEncryptionKeyOverride=$TOKEN_KEY_OVERRIDE" + echo "Using TOKEN_ENCRYPTION_KEY_OVERRIDE for disaster-recovery deploy." + fi sam deploy \ -t .aws-sam/build/template.yaml \ --no-confirm-changeset \ @@ -79,8 +86,7 @@ jobs: DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ - DatabasePassword=${{ secrets.DATABASE_PASSWORD }} \ - TokenEncryptionKey=${{ secrets.TOKEN_ENCRYPTION_KEY }}" + DatabasePassword=${{ secrets.DATABASE_PASSWORD }}$OVERRIDE_PARAM" sam-deploy-prod: if: github.ref == 'refs/heads/prod' @@ -105,6 +111,12 @@ jobs: - name: Deploy to prod run: | + TOKEN_KEY_OVERRIDE="${{ secrets.TOKEN_ENCRYPTION_KEY_OVERRIDE }}" + OVERRIDE_PARAM="" + if [ -n "$TOKEN_KEY_OVERRIDE" ]; then + OVERRIDE_PARAM=" TokenEncryptionKeyOverride=$TOKEN_KEY_OVERRIDE" + echo "Using TOKEN_ENCRYPTION_KEY_OVERRIDE for disaster-recovery deploy." + fi sam deploy \ -t .aws-sam/build/template.yaml \ --no-confirm-changeset \ @@ -122,5 +134,4 @@ jobs: DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ - DatabasePassword=${{ secrets.DATABASE_PASSWORD }} \ - TokenEncryptionKey=${{ secrets.TOKEN_ENCRYPTION_KEY }}" + DatabasePassword=${{ secrets.DATABASE_PASSWORD }}$OVERRIDE_PARAM" diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 5a61014..9dd4505 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -94,9 +94,19 @@ You need: **GitHubDeployRoleArn** → `AWS_ROLE_TO_ASSUME`, **DeploymentBucketNa Use the bootstrap **DeploymentBucketName**. Set parameters (Stage, DB, Slack, etc.) when prompted. -3. **GitHub:** Create environments `test` and `prod`. In **Settings → Secrets and variables → Actions**, set **Variables** (per env): `AWS_ROLE_TO_ASSUME`, `AWS_REGION`, `AWS_S3_BUCKET`, `AWS_STACK_NAME`, `STAGE_NAME`, `EXISTING_DATABASE_HOST`, `DATABASE_USER`, `DATABASE_SCHEMA`. Set **Secrets**: `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET`, `DATABASE_PASSWORD`, `TOKEN_ENCRYPTION_KEY`. No access keys — the workflow uses OIDC. +3. **GitHub:** Create environments `test` and `prod`. In **Settings → Secrets and variables → Actions**, set **Variables** (per env): `AWS_ROLE_TO_ASSUME`, `AWS_REGION`, `AWS_S3_BUCKET`, `AWS_STACK_NAME`, `STAGE_NAME`, `EXISTING_DATABASE_HOST`, `DATABASE_USER`, `DATABASE_SCHEMA`. Set **Secrets**: `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET`, `DATABASE_PASSWORD`. No access keys — the workflow uses OIDC. 4. Push to `test` or `prod` to build and deploy. The workflow file is `.github/workflows/deploy-aws.yml` (runs when `DEPLOY_TARGET` is not `gcp`). +**Important:** `TOKEN_ENCRYPTION_KEY` is generated once and stored in Secrets Manager by the app stack. Back up the secret value after first deploy. If this key is lost, existing workspaces must reinstall to re-authorize bot tokens. + +**Disaster recovery:** if you must rebuild and keep existing encrypted tokens working, deploy with the old key: + +```bash +sam deploy ... --parameter-overrides "... TokenEncryptionKeyOverride=" +``` + +If using GitHub Actions, set optional secret `TOKEN_ENCRYPTION_KEY_OVERRIDE`; the AWS workflow will pass it automatically. + --- ### Download and Deploy (AWS, local) @@ -132,7 +142,13 @@ terraform plan -var="project_id=YOUR_PROJECT_ID" -var="stage=test" terraform apply -var="project_id=YOUR_PROJECT_ID" -var="stage=test" ``` -Set Secret Manager secret values for Slack and DB (see [infra/gcp/README.md](../infra/gcp/README.md)). Set **cloud_run_image** after building and pushing your container image. Capture outputs for CI: **service_url**, **region**, **project_id**, **artifact_registry_repository**, **deploy_service_account_email**. +Set Secret Manager secret values for Slack and DB (see [infra/gcp/README.md](../infra/gcp/README.md)). `TOKEN_ENCRYPTION_KEY` is auto-generated once and stored in Secret Manager during apply. Set **cloud_run_image** after building and pushing your container image. Capture outputs for CI: **service_url**, **region**, **project_id**, **artifact_registry_repository**, **deploy_service_account_email**. + +**Disaster recovery:** if rebuilding and you need to preserve existing token decryption, re-apply with: + +```bash +terraform apply -var="project_id=YOUR_PROJECT_ID" -var="stage=test" -var='token_encryption_key_override=' +``` Helper script for GitHub vars: diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index ec38b88..c346f15 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -33,7 +33,7 @@ The application reads configuration from environment variables. Providers must i | `ENV_SLACK_CLIENT_ID` | Slack OAuth client ID. | | `ENV_SLACK_CLIENT_SECRET` | Slack OAuth client secret. | | `ENV_SLACK_SCOPES` | Comma-separated OAuth scopes (see `.env.example`). | -| `TOKEN_ENCRYPTION_KEY` | Passphrase for bot-token encryption at rest (any value except `123` to enable). | +| `TOKEN_ENCRYPTION_KEY` | Passphrase for bot-token encryption at rest. Should be generated once, stored in secret manager, and persisted/backed up. | ### Optional diff --git a/infra/aws/scripts/print-bootstrap-outputs.sh b/infra/aws/scripts/print-bootstrap-outputs.sh index 19a89e0..c712505 100755 --- a/infra/aws/scripts/print-bootstrap-outputs.sh +++ b/infra/aws/scripts/print-bootstrap-outputs.sh @@ -29,3 +29,9 @@ echo "--- GitHub Actions variables (set these per environment) ---" echo "AWS_ROLE_TO_ASSUME = $(echo "$outputs" | awk -F'\t' '$1=="GitHubDeployRoleArn"{print $2}')" echo "AWS_S3_BUCKET = $(echo "$outputs" | awk -F'\t' '$1=="DeploymentBucketName"{print $2}')" echo "AWS_REGION = $(echo "$outputs" | awk -F'\t' '$1=="BootstrapRegion"{print $2}')" +echo "" +echo "WARNING: TOKEN_ENCRYPTION_KEY is generated once in app-stack Secrets Manager." +echo "Back up this secret value after first app deploy." +echo "If the key is lost, existing workspaces must reinstall the app to re-authorize tokens." +echo "Expected secret name after app deploy: syncbot--token-encryption-key" +echo "Disaster recovery: pass the old key as SAM parameter TokenEncryptionKeyOverride=." diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index ca125da..8faa46e 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -102,13 +102,14 @@ Parameters: Type: String Default: "10.0.0.0/16" - # --- Security --- - - TokenEncryptionKey: - Description: Encryption key for bot and OAuth tokens + TokenEncryptionKeyOverride: + Description: > + Optional disaster-recovery override for TOKEN_ENCRYPTION_KEY. + Use only when restoring an existing deployment with a known key. + Leave empty for normal deploys (auto-generated Secret Manager key is used). Type: String NoEcho: true - Default: "123" + Default: "" RequireAdmin: Description: > @@ -126,6 +127,7 @@ Parameters: Conditions: CreateDatabase: !Equals [!Ref ExistingDatabaseHost, ""] + HasTokenEncryptionKeyOverride: !Not [!Equals [!Ref TokenEncryptionKeyOverride, ""]] Mappings: StagesMap: @@ -297,6 +299,18 @@ Resources: - Key: Name Value: !Sub "syncbot-${Stage}-db" + TokenEncryptionKeySecret: + Type: AWS::SecretsManager::Secret + DeletionPolicy: Retain + UpdateReplacePolicy: Retain + Properties: + Name: !Sub "syncbot-${Stage}-token-encryption-key" + Description: !Sub "SyncBot ${Stage} token encryption key (backup required)" + GenerateSecretString: + PasswordLength: 48 + ExcludePunctuation: true + IncludeSpace: false + # ============================================================ # Lambda Function # ============================================================ @@ -352,7 +366,10 @@ Resources: ADMIN_DATABASE_USER: !Ref DatabaseUser ADMIN_DATABASE_PASSWORD: !Ref DatabasePassword ADMIN_DATABASE_SCHEMA: !Ref DatabaseSchema - TOKEN_ENCRYPTION_KEY: !Ref TokenEncryptionKey + TOKEN_ENCRYPTION_KEY: !If + - HasTokenEncryptionKeyOverride + - !Ref TokenEncryptionKeyOverride + - !Sub "{{resolve:secretsmanager:${TokenEncryptionKeySecret}:SecretString}}" REQUIRE_ADMIN: !Ref RequireAdmin # ============================================================ @@ -471,3 +488,7 @@ Outputs: Condition: CreateDatabase Description: VPC ID (only when VPC is created by this stack) Value: !Ref VPC + + TokenEncryptionSecretArn: + Description: Secrets Manager ARN containing TOKEN_ENCRYPTION_KEY + Value: !Ref TokenEncryptionKeySecret diff --git a/infra/gcp/README.md b/infra/gcp/README.md index 7fd3736..039eb70 100644 --- a/infra/gcp/README.md +++ b/infra/gcp/README.md @@ -25,9 +25,12 @@ Minimal Terraform scaffold to run SyncBot on Google Cloud. Satisfies the [infras ```bash echo -n "YOUR_SLACK_SIGNING_SECRET" | gcloud secrets versions add syncbot-test-syncbot-slack-signing-secret --data-file=- - # Repeat for ENV_SLACK_CLIENT_ID, ENV_SLACK_CLIENT_SECRET, ENV_SLACK_SCOPES, syncbot-token-encryption-key, syncbot-db-password (if existing DB) + # Repeat for ENV_SLACK_CLIENT_ID, ENV_SLACK_CLIENT_SECRET, ENV_SLACK_SCOPES, syncbot-db-password (if existing DB) ``` + `TOKEN_ENCRYPTION_KEY` is generated once automatically by Terraform and stored in Secret Manager. Back it up. If lost, existing workspaces must reinstall to re-authorize bot tokens. + For disaster recovery, restore with `-var='token_encryption_key_override='`. + 3. **Set the Cloud Run image** By default the service uses a placeholder image. Build and push your SyncBot image to Artifact Registry, then: diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf index 71f531b..25992dc 100644 --- a/infra/gcp/main.tf +++ b/infra/gcp/main.tf @@ -162,6 +162,11 @@ resource "random_password" "db" { special = false } +resource "random_password" "token_encryption_key" { + length = 48 + special = false +} + resource "google_sql_database_instance" "main" { count = var.use_existing_database ? 0 : 1 project = var.project_id @@ -212,6 +217,12 @@ resource "google_secret_manager_secret_version" "db_password" { secret_data = random_password.db[0].result } +# Generate TOKEN_ENCRYPTION_KEY once and persist in Secret Manager. +resource "google_secret_manager_secret_version" "token_encryption_key" { + secret = google_secret_manager_secret.app_secrets[var.secret_token_encryption_key].id + secret_data = var.token_encryption_key_override != "" ? var.token_encryption_key_override : random_password.token_encryption_key.result +} + # --------------------------------------------------------------------------- # Cloud Run service # --------------------------------------------------------------------------- diff --git a/infra/gcp/outputs.tf b/infra/gcp/outputs.tf index b5b3516..cb623cd 100644 --- a/infra/gcp/outputs.tf +++ b/infra/gcp/outputs.tf @@ -42,3 +42,8 @@ output "database_connection_name" { description = "Cloud SQL connection name (when not using existing DB)" value = var.use_existing_database ? null : (length(google_sql_database_instance.main) > 0 ? google_sql_database_instance.main[0].connection_name : null) } + +output "token_encryption_secret_name" { + description = "Secret Manager secret name containing TOKEN_ENCRYPTION_KEY" + value = google_secret_manager_secret.app_secrets[var.secret_token_encryption_key].name +} diff --git a/infra/gcp/scripts/print-bootstrap-outputs.sh b/infra/gcp/scripts/print-bootstrap-outputs.sh index 93164f2..f923665 100755 --- a/infra/gcp/scripts/print-bootstrap-outputs.sh +++ b/infra/gcp/scripts/print-bootstrap-outputs.sh @@ -33,3 +33,9 @@ echo "Artifact Registry = $(terraform output -raw artifact_registry_repository echo "Service URL = $(terraform output -raw service_url 2>/dev/null || echo '')" echo "" echo "For deploy-gcp.yml also set: GCP_WORKLOAD_IDENTITY_PROVIDER (after configuring WIF for GitHub)." +echo "" +echo "WARNING: TOKEN_ENCRYPTION_KEY is generated once and stored in Secret Manager." +echo "Back up the secret value (or ensure durable secret backup/replication)." +echo "If this key is lost, existing workspaces must reinstall the app to re-authorize tokens." +echo "Secret name: $(terraform output -raw token_encryption_secret_name 2>/dev/null || echo '')" +echo "Disaster recovery: re-apply with -var='token_encryption_key_override=' to preserve decryption." diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf index c0be008..678323e 100644 --- a/infra/gcp/variables.tf +++ b/infra/gcp/variables.tf @@ -129,6 +129,13 @@ variable "secret_token_encryption_key" { description = "Secret Manager secret ID for TOKEN_ENCRYPTION_KEY" } +variable "token_encryption_key_override" { + type = string + default = "" + sensitive = true + description = "Optional disaster-recovery override for TOKEN_ENCRYPTION_KEY. Leave empty for normal deploys." +} + variable "secret_db_password" { type = string default = "syncbot-db-password" From 6293eb7cd38ef79d85e1b2d7e59b4abac9872742 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Mon, 16 Mar 2026 07:09:52 -0500 Subject: [PATCH 13/45] Fixed dotenv req. Added ability to reuse existing DB during deploy. --- .github/workflows/deploy-aws.yml | 4 + docs/DEPLOYMENT.md | 47 ++++++++++- infra/aws/db_setup/handler.py | 119 ++++++++++++++++++++++++++++ infra/aws/db_setup/requirements.txt | 1 + infra/aws/template.bootstrap.yaml | 8 +- infra/aws/template.yaml | 77 ++++++++++++++++-- poetry.lock | 23 +++++- pyproject.toml | 1 + 8 files changed, 261 insertions(+), 19 deletions(-) create mode 100644 infra/aws/db_setup/handler.py create mode 100644 infra/aws/db_setup/requirements.txt diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index 45ec7d8..ae7d79e 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -82,6 +82,8 @@ jobs: --parameter-overrides \ "Stage=${{ vars.STAGE_NAME }} \ ExistingDatabaseHost=${{ vars.EXISTING_DATABASE_HOST }} \ + ExistingDatabaseAdminUser=${{ vars.EXISTING_DATABASE_ADMIN_USER }} \ + ExistingDatabaseAdminPassword=${{ secrets.EXISTING_DATABASE_ADMIN_PASSWORD }} \ DatabaseUser=${{ vars.DATABASE_USER }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ @@ -130,6 +132,8 @@ jobs: --parameter-overrides \ "Stage=${{ vars.STAGE_NAME }} \ ExistingDatabaseHost=${{ vars.EXISTING_DATABASE_HOST }} \ + ExistingDatabaseAdminUser=${{ vars.EXISTING_DATABASE_ADMIN_USER }} \ + ExistingDatabaseAdminPassword=${{ secrets.EXISTING_DATABASE_ADMIN_PASSWORD }} \ DatabaseUser=${{ vars.DATABASE_USER }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 9dd4505..a144273 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -65,7 +65,7 @@ aws cloudformation deploy \ --region us-east-2 ``` -Replace `YOUR_GITHUB_OWNER/YOUR_REPO` with your repo. Optionally set `CreateOIDCProvider=false` if the account already has the GitHub OIDC provider. +Replace `YOUR_GITHUB_OWNER/YOUR_REPO` with your repo. Optionally set `CreateOIDCProvider=false` if the account already has the GitHub OIDC provider. The bootstrap template only accepts `GitHubRepository`, `CreateOIDCProvider`, and `DeploymentBucketPrefix` (database options go in the main app deploy, not bootstrap). **Capture outputs:** @@ -94,11 +94,52 @@ You need: **GitHubDeployRoleArn** → `AWS_ROLE_TO_ASSUME`, **DeploymentBucketNa Use the bootstrap **DeploymentBucketName**. Set parameters (Stage, DB, Slack, etc.) when prompted. -3. **GitHub:** Create environments `test` and `prod`. In **Settings → Secrets and variables → Actions**, set **Variables** (per env): `AWS_ROLE_TO_ASSUME`, `AWS_REGION`, `AWS_S3_BUCKET`, `AWS_STACK_NAME`, `STAGE_NAME`, `EXISTING_DATABASE_HOST`, `DATABASE_USER`, `DATABASE_SCHEMA`. Set **Secrets**: `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET`, `DATABASE_PASSWORD`. No access keys — the workflow uses OIDC. +3. **GitHub:** Create environments `test` and `prod`. In **Settings → Secrets and variables → Actions**, set **Variables** (per env): `AWS_ROLE_TO_ASSUME`, `AWS_REGION`, `AWS_S3_BUCKET`, `AWS_STACK_NAME`, `STAGE_NAME`, `EXISTING_DATABASE_HOST`, `EXISTING_DATABASE_ADMIN_USER` (when using existing RDS host), `DATABASE_USER` (when creating new RDS), `DATABASE_SCHEMA`. Set **Secrets**: `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET`, `EXISTING_DATABASE_ADMIN_PASSWORD` (when using existing host), `DATABASE_PASSWORD` (when creating new RDS). No access keys — the workflow uses OIDC. 4. Push to `test` or `prod` to build and deploy. The workflow file is `.github/workflows/deploy-aws.yml` (runs when `DEPLOY_TARGET` is not `gcp`). **Important:** `TOKEN_ENCRYPTION_KEY` is generated once and stored in Secrets Manager by the app stack. Back up the secret value after first deploy. If this key is lost, existing workspaces must reinstall to re-authorize bot tokens. +#### Using an existing RDS host (AWS) + +To **reuse only the DB host** and have the deploy create the schema and a dedicated app user (and generated password) for you: + +1. **What the stack does:** + When you set **ExistingDatabaseHost**, the template skips creating VPC, subnets, and RDS. A custom resource runs during deploy: it connects to your existing MySQL with a **bootstrap** (master) user you provide, creates the schema, creates an app user `syncbot_` with a **generated** password (stored in Secrets Manager), and grants that user full access to the schema. The app Lambda then uses that app user and generated password. You never manage the app DB password. + +2. **What you provide:** + - **Host:** The RDS endpoint (e.g. `mydb.xxxx.us-east-2.rds.amazonaws.com`). No `:3306` or path. + - **Admin user & password:** A MySQL user that can create databases and users (e.g. RDS master). Used only by the deploy step; the app uses a separate `syncbot_` user. + - **Schema name:** A dedicated schema per app or environment (e.g. `syncbot_test`, `syncbot_prod`). The deploy creates this schema and the app user with full access to it; the app runs Alembic migrations on startup. + +3. **Connectivity:** + When using an existing host, Lambda is **not** put in a VPC. It can only reach **publicly accessible** endpoints. Your RDS must be: + - Set to **publicly accessible** (in RDS settings), and + - Protected by a security group that allows **inbound TCP 3306** from the internet (or restrict to known IPs). + For production, consider a VPC-enabled Lambda and private RDS; that would require template changes. + +4. **First deploy (local `sam deploy`):** + Pass the **existing-host** parameters (admin user/password; do **not** pass `DatabaseUser`/`DatabasePassword` for the app — they are created for you): + ```bash + sam deploy --guided ... \ + --parameter-overrides \ + ExistingDatabaseHost=your-db.xxxx.us-east-2.rds.amazonaws.com \ + ExistingDatabaseAdminUser=admin \ + ExistingDatabaseAdminPassword=your_master_password \ + DatabaseSchema=syncbot_test \ + SlackSigningSecret=... \ + SlackClientSecret=... + ``` + Omit **ExistingDatabaseHost** (or leave it empty) to have the stack create a new RDS instance; then you must pass **DatabaseUser** and **DatabasePassword** for the new RDS master. + +5. **GitHub Actions:** + For **existing host** (deploy creates schema and app user), set **Variables**: + - **EXISTING_DATABASE_HOST** — Full RDS hostname. Leave **empty** to create a new RDS instead. + - **EXISTING_DATABASE_ADMIN_USER** — MySQL user that can create DBs/users (e.g. master). + - **DATABASE_SCHEMA** — Schema name (e.g. `syncbot_test` or `syncbot_prod`). + Set **Secrets**: + - **EXISTING_DATABASE_ADMIN_PASSWORD** — Password for the admin user. + For **new RDS** (stack creates the instance), set **DATABASE_USER**, **DATABASE_SCHEMA**, and secret **DATABASE_PASSWORD** instead, and leave **EXISTING_DATABASE_HOST** empty. The workflow passes all of these; the template uses the right set based on whether **EXISTING_DATABASE_HOST** is set. + **Disaster recovery:** if you must rebuild and keep existing encrypted tokens working, deploy with the old key: ```bash @@ -223,4 +264,4 @@ Schema is managed by **Alembic** (see `db/alembic/`). On startup the app runs ** ## Sharing infrastructure across apps (AWS) -To use an existing RDS instance instead of creating one per stack, set **ExistingDatabaseHost** in parameter overrides and use a **different DatabaseSchema** per app (e.g. `syncbot_test`, `syncbot_prod`). SyncBot creates the schema and runs migrations on startup. API Gateway and Lambda are per stack; free-tier quotas are account-wide. +To use an existing RDS instance instead of creating one per stack, see **[Using an existing RDS host (AWS)](#using-an-existing-rds-host-aws)**. Set **ExistingDatabaseHost** and use a **different DatabaseSchema** per app or environment (e.g. `syncbot_test`, `syncbot_prod`). API Gateway and Lambda are per stack; free-tier quotas are account-wide. diff --git a/infra/aws/db_setup/handler.py b/infra/aws/db_setup/handler.py new file mode 100644 index 0000000..09935fe --- /dev/null +++ b/infra/aws/db_setup/handler.py @@ -0,0 +1,119 @@ +""" +Custom CloudFormation resource: create schema and app user on an existing MySQL (RDS) host. + +Run during stack create/update when ExistingDatabaseHost is set. Uses bootstrap credentials +to create the schema and a dedicated app user; the app password is read from the generated +Secrets Manager secret (created by the template) so the app Lambda can use it. +""" + +import json +import os +import boto3 +import pymysql +from pymysql.cursors import DictCursor + +# CloudFormation custom resource response helper (no cfnresponse in Lambda by default for Python 3) +def send(event, context, status, data=None, reason=None, physical_resource_id=None): + import urllib.request + pid = physical_resource_id or event.get("PhysicalResourceId") or event["LogicalResourceId"] + body = json.dumps({ + "Status": status, + "Reason": reason or f"See CloudWatch Log Stream: {context.log_stream_name}", + "PhysicalResourceId": pid, + "StackId": event["StackId"], + "RequestId": event["RequestId"], + "LogicalResourceId": event["LogicalResourceId"], + "Data": data or {}, + }).encode("utf-8") + req = urllib.request.Request( + event["ResponseURL"], + data=body, + method="PUT", + headers={"Content-Type": ""}, + ) + with urllib.request.urlopen(req) as f: + f.read() + + +def handler(event, context): + request_type = event.get("RequestType", "Create") + props = event.get("ResourceProperties", {}) + host = props.get("Host", "").strip() + admin_user = (props.get("AdminUser") or "").strip() + admin_password = props.get("AdminPassword") or "" + schema = (props.get("Schema") or "syncbot").strip() + stage = (props.get("Stage") or "test").strip() + secret_arn = (props.get("SecretArn") or "").strip() + + if request_type == "Delete": + # Leave schema and user for manual cleanup if desired + send(event, context, "SUCCESS", {"Username": ""}, physical_resource_id=event.get("PhysicalResourceId", "n/a")) + return + + if not all([host, admin_user, admin_password, schema, stage, secret_arn]): + send( + event, context, "FAILED", + reason="Missing Host, AdminUser, AdminPassword, Schema, Stage, or SecretArn", + ) + return + + app_username = f"syncbot_{stage}".replace("-", "_") + try: + app_password = get_app_password(secret_arn) + except Exception as e: + send(event, context, "FAILED", reason=f"GetSecretValue failed: {e}") + return + + try: + setup_database( + host=host, + admin_user=admin_user, + admin_password=admin_password, + schema=schema, + app_username=app_username, + app_password=app_password, + ) + except Exception as e: + send(event, context, "FAILED", reason=f"Database setup failed: {e}") + return + + send(event, context, "SUCCESS", {"Username": app_username}, reason="OK", physical_resource_id=app_username) + return {"Username": app_username} + + +def get_app_password(secret_arn: str) -> str: + client = boto3.client("secretsmanager") + resp = client.get_secret_value(SecretId=secret_arn) + return (resp.get("SecretString") or "").strip() + + +def setup_database( + *, + host: str, + admin_user: str, + admin_password: str, + schema: str, + app_username: str, + app_password: str, +) -> None: + conn = pymysql.connect( + host=host, + user=admin_user, + password=admin_password, + port=3306, + charset="utf8mb4", + cursorclass=DictCursor, + ) + try: + with conn.cursor() as cur: + cur.execute(f"CREATE DATABASE IF NOT EXISTS `{schema}`") + # MySQL 5.7: CREATE USER ... IDENTIFIED BY; 8.0 supports IF NOT EXISTS + cur.execute( + "CREATE USER IF NOT EXISTS %s@'%%' IDENTIFIED BY %s", + (app_username, app_password), + ) + cur.execute(f"GRANT ALL PRIVILEGES ON `{schema}`.* TO %s@'%%'", (app_username,)) + cur.execute("FLUSH PRIVILEGES") + conn.commit() + finally: + conn.close() diff --git a/infra/aws/db_setup/requirements.txt b/infra/aws/db_setup/requirements.txt new file mode 100644 index 0000000..76db31a --- /dev/null +++ b/infra/aws/db_setup/requirements.txt @@ -0,0 +1 @@ +pymysql>=1.1.0 diff --git a/infra/aws/template.bootstrap.yaml b/infra/aws/template.bootstrap.yaml index 89b65f6..7345bd2 100644 --- a/infra/aws/template.bootstrap.yaml +++ b/infra/aws/template.bootstrap.yaml @@ -87,6 +87,8 @@ Resources: token.actions.githubusercontent.com:aud: sts.amazonaws.com StringLike: token.actions.githubusercontent.com:sub: !Sub "repo:${GitHubRepository}:*" + ManagedPolicyArns: + - !Ref DeployPolicy # ------------------------------------------------------------------------- # Least-privilege policy for SAM deploy (CloudFormation, S3, IAM PassRole, etc.) @@ -234,12 +236,6 @@ Resources: - events:ListTargetsByRule Resource: "*" - DeployRolePolicyAttachment: - Type: AWS::IAM::RolePolicyAttachment - Properties: - RoleName: !Ref GitHubDeployRole - PolicyArn: !Ref DeployPolicy - Outputs: GitHubDeployRoleArn: Description: ARN of the role for GitHub Actions to assume (set as AWS_ROLE_TO_ASSUME). diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index 8faa46e..f649199 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -58,19 +58,34 @@ Parameters: Description: > Endpoint of an existing RDS instance (e.g. mydb.xxxx.us-east-2.rds.amazonaws.com). Leave EMPTY to create a new RDS instance. When set, all VPC and RDS - resources are skipped. + resources are skipped and the deploy creates the schema and app user for you. Type: String Default: "" + ExistingDatabaseAdminUser: + Description: > + MySQL user that can create databases and users (e.g. master). Used only when + ExistingDatabaseHost is set; the deploy creates a dedicated app user and schema. + Type: String + Default: "" + + ExistingDatabaseAdminPassword: + Description: Password for ExistingDatabaseAdminUser. Used only when using existing host. + Type: String + NoEcho: true + Default: "" + DatabaseUser: - Description: Database username (master user if creating new RDS, or an existing user) + Description: > + Database username: master user when creating new RDS; ignored when ExistingDatabaseHost is set. Type: String + Default: "" DatabasePassword: - Description: Database password + Description: Database password (master when creating new RDS; min 8 chars). Ignored when using existing host. Type: String NoEcho: true - MinLength: 8 + Default: "" DatabaseSchema: Description: > @@ -127,6 +142,7 @@ Parameters: Conditions: CreateDatabase: !Equals [!Ref ExistingDatabaseHost, ""] + UseExistingDatabase: !Not [!Equals [!Ref ExistingDatabaseHost, ""]] HasTokenEncryptionKeyOverride: !Not [!Equals [!Ref TokenEncryptionKeyOverride, ""]] Mappings: @@ -311,6 +327,47 @@ Resources: ExcludePunctuation: true IncludeSpace: false + # --- Existing RDS: generated app password and setup Lambda --- + AppDbCredentialsSecret: + Type: AWS::SecretsManager::Secret + Condition: UseExistingDatabase + Properties: + Name: !Sub "syncbot-${Stage}-app-db-password" + Description: !Sub "SyncBot ${Stage} app DB user password (created by stack)" + GenerateSecretString: + PasswordLength: 32 + ExcludePunctuation: true + IncludeSpace: false + + DbSetupFunction: + Type: AWS::Serverless::Function + Condition: UseExistingDatabase + Properties: + CodeUri: db_setup/ + Handler: handler.handler + Runtime: python3.11 + Timeout: 60 + MemorySize: 256 + Policies: + - Version: "2012-10-17" + Statement: + - Effect: Allow + Action: secretsmanager:GetSecretValue + Resource: !Ref AppDbCredentialsSecret + + AppDbSetup: + Type: Custom::ExistingRDSSetup + Condition: UseExistingDatabase + DependsOn: DbSetupFunction + Properties: + ServiceToken: !GetAtt DbSetupFunction.Arn + Host: !Ref ExistingDatabaseHost + AdminUser: !Ref ExistingDatabaseAdminUser + AdminPassword: !Ref ExistingDatabaseAdminPassword + Schema: !Ref DatabaseSchema + Stage: !Ref Stage + SecretArn: !Ref AppDbCredentialsSecret + # ============================================================ # Lambda Function # ============================================================ @@ -363,8 +420,16 @@ Resources: - CreateDatabase - !GetAtt RDSInstance.Endpoint.Address - !Ref ExistingDatabaseHost - ADMIN_DATABASE_USER: !Ref DatabaseUser - ADMIN_DATABASE_PASSWORD: !Ref DatabasePassword + ADMIN_DATABASE_USER: !If + - UseExistingDatabase + - !GetAtt AppDbSetup.Username + - !Ref DatabaseUser + ADMIN_DATABASE_PASSWORD: !If + - UseExistingDatabase + - !Sub + - "{{resolve:secretsmanager:${SecretArn}:SecretString}}" + - { SecretArn: !Ref AppDbCredentialsSecret } + - !Ref DatabasePassword ADMIN_DATABASE_SCHEMA: !Ref DatabaseSchema TOKEN_ENCRYPTION_KEY: !If - HasTokenEncryptionKeyOverride diff --git a/poetry.lock b/poetry.lock index 766bc95..ef00121 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.3.2 and should not be changed by hand. [[package]] name = "alembic" @@ -696,6 +696,21 @@ files = [ [package.dependencies] six = ">=1.5" +[[package]] +name = "python-dotenv" +version = "1.2.2" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "python_dotenv-1.2.2-py3-none-any.whl", hash = "sha256:1d8214789a24de455a8b8bd8ae6fe3c6b69a5e3d64aa8a8e5d68e694bbcb285a"}, + {file = "python_dotenv-1.2.2.tar.gz", hash = "sha256:2c371a91fbd7ba082c2c1dc1f8bf89ca22564a087c2c287cd9b662adde799cf3"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + [[package]] name = "requests" version = "2.32.5" @@ -731,10 +746,10 @@ files = [ ] [package.dependencies] -botocore = ">=1.12.36,<2.0a.0" +botocore = ">=1.12.36,<2.0a0" [package.extras] -crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] +crt = ["botocore[crt] (>=1.20.29,<2.0a0)"] [[package]] name = "six" @@ -892,4 +907,4 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [metadata] lock-version = "2.1" python-versions = "^3.11" -content-hash = "8fdfafb375ef7b2e690dcb9c76b155903945907a274747a9e7e8eddebe95175f" +content-hash = "a73183522f531f6f23e9c83b2d78f3a23725778358039b9e2d04693b5e6e55a1" diff --git a/pyproject.toml b/pyproject.toml index 7e30654..08f6de0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,6 +8,7 @@ readme = "README.md" [tool.poetry.dependencies] python = "^3.11" alembic = "^1.13" +python-dotenv = "^1.2.0" slack-bolt = "^1.27.0" sqlalchemy = "<2.0" pymysql = "^1.1.2" From 7ed8556319a0bbbbdb32652917836ef0f408ffcf Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Mon, 16 Mar 2026 13:21:57 -0500 Subject: [PATCH 14/45] Requiring random token encryption key for non-local deploys. --- .github/workflows/deploy-aws.yml | 5 +++- docs/DEPLOYMENT.md | 2 +- docs/INFRA_CONTRACT.md | 2 +- infra/aws/scripts/print-bootstrap-outputs.sh | 7 ++--- infra/gcp/scripts/print-bootstrap-outputs.sh | 7 ++--- syncbot/constants.py | 29 ++++++++++++++++---- 6 files changed, 33 insertions(+), 19 deletions(-) diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index ae7d79e..387921a 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -1,6 +1,9 @@ # Deploy SyncBot to AWS (SAM). See docs/DEPLOYMENT.md and docs/INFRA_CONTRACT.md. # To use GCP instead: set repository variable DEPLOY_TARGET=gcp and disable this workflow. -# Optional DR secret: TOKEN_ENCRYPTION_KEY_OVERRIDE (passes TokenEncryptionKeyOverride on deploy). +# +# Token key policy: Non-local deploys require a secure TOKEN_ENCRYPTION_KEY. The AWS app stack +# auto-generates it in Secrets Manager by default. Back up the generated key after first deploy. +# Optional DR secret TOKEN_ENCRYPTION_KEY_OVERRIDE passes TokenEncryptionKeyOverride for restore. name: Deploy (AWS) diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index a144273..77975dd 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -97,7 +97,7 @@ You need: **GitHubDeployRoleArn** → `AWS_ROLE_TO_ASSUME`, **DeploymentBucketNa 3. **GitHub:** Create environments `test` and `prod`. In **Settings → Secrets and variables → Actions**, set **Variables** (per env): `AWS_ROLE_TO_ASSUME`, `AWS_REGION`, `AWS_S3_BUCKET`, `AWS_STACK_NAME`, `STAGE_NAME`, `EXISTING_DATABASE_HOST`, `EXISTING_DATABASE_ADMIN_USER` (when using existing RDS host), `DATABASE_USER` (when creating new RDS), `DATABASE_SCHEMA`. Set **Secrets**: `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET`, `EXISTING_DATABASE_ADMIN_PASSWORD` (when using existing host), `DATABASE_PASSWORD` (when creating new RDS). No access keys — the workflow uses OIDC. 4. Push to `test` or `prod` to build and deploy. The workflow file is `.github/workflows/deploy-aws.yml` (runs when `DEPLOY_TARGET` is not `gcp`). -**Important:** `TOKEN_ENCRYPTION_KEY` is generated once and stored in Secrets Manager by the app stack. Back up the secret value after first deploy. If this key is lost, existing workspaces must reinstall to re-authorize bot tokens. +**Important (token encryption key):** Non-local deploys require a secure `TOKEN_ENCRYPTION_KEY`. The AWS app stack **auto-generates** it in Secrets Manager by default. You must **back up the generated key** after first deploy; if it is lost, existing workspaces must reinstall to re-authorize bot tokens. For local development you may set the key manually in `.env` or leave it unset. #### Using an existing RDS host (AWS) diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index c346f15..42eb756 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -33,7 +33,7 @@ The application reads configuration from environment variables. Providers must i | `ENV_SLACK_CLIENT_ID` | Slack OAuth client ID. | | `ENV_SLACK_CLIENT_SECRET` | Slack OAuth client secret. | | `ENV_SLACK_SCOPES` | Comma-separated OAuth scopes (see `.env.example`). | -| `TOKEN_ENCRYPTION_KEY` | Passphrase for bot-token encryption at rest. Should be generated once, stored in secret manager, and persisted/backed up. | +| `TOKEN_ENCRYPTION_KEY` | **Required** in production; must be a strong, random value (e.g. 16+ characters). Providers may auto-generate it (e.g. AWS Secrets Manager). Back up the key after first deploy. In local dev you may set it manually or leave unset. | ### Optional diff --git a/infra/aws/scripts/print-bootstrap-outputs.sh b/infra/aws/scripts/print-bootstrap-outputs.sh index c712505..78196b7 100755 --- a/infra/aws/scripts/print-bootstrap-outputs.sh +++ b/infra/aws/scripts/print-bootstrap-outputs.sh @@ -30,8 +30,5 @@ echo "AWS_ROLE_TO_ASSUME = $(echo "$outputs" | awk -F'\t' '$1=="GitHubDeployRol echo "AWS_S3_BUCKET = $(echo "$outputs" | awk -F'\t' '$1=="DeploymentBucketName"{print $2}')" echo "AWS_REGION = $(echo "$outputs" | awk -F'\t' '$1=="BootstrapRegion"{print $2}')" echo "" -echo "WARNING: TOKEN_ENCRYPTION_KEY is generated once in app-stack Secrets Manager." -echo "Back up this secret value after first app deploy." -echo "If the key is lost, existing workspaces must reinstall the app to re-authorize tokens." -echo "Expected secret name after app deploy: syncbot--token-encryption-key" -echo "Disaster recovery: pass the old key as SAM parameter TokenEncryptionKeyOverride=." +echo "Next: deploy the app stack (sam deploy) and set the remaining GitHub vars/secrets." +echo "TOKEN_ENCRYPTION_KEY is created by the app stack on first deploy — back it up then (see docs/DEPLOYMENT.md)." diff --git a/infra/gcp/scripts/print-bootstrap-outputs.sh b/infra/gcp/scripts/print-bootstrap-outputs.sh index f923665..22f6515 100755 --- a/infra/gcp/scripts/print-bootstrap-outputs.sh +++ b/infra/gcp/scripts/print-bootstrap-outputs.sh @@ -34,8 +34,5 @@ echo "Service URL = $(terraform output -raw service_url 2>/dev/null || e echo "" echo "For deploy-gcp.yml also set: GCP_WORKLOAD_IDENTITY_PROVIDER (after configuring WIF for GitHub)." echo "" -echo "WARNING: TOKEN_ENCRYPTION_KEY is generated once and stored in Secret Manager." -echo "Back up the secret value (or ensure durable secret backup/replication)." -echo "If this key is lost, existing workspaces must reinstall the app to re-authorize tokens." -echo "Secret name: $(terraform output -raw token_encryption_secret_name 2>/dev/null || echo '')" -echo "Disaster recovery: re-apply with -var='token_encryption_key_override=' to preserve decryption." +echo "TOKEN_ENCRYPTION_KEY is generated by this Terraform apply and stored in Secret Manager." +echo "Back it up after apply (see docs/DEPLOYMENT.md). Secret: $(terraform output -raw token_encryption_secret_name 2>/dev/null || echo '')" diff --git a/syncbot/constants.py b/syncbot/constants.py index 73e55b2..9f1e66f 100644 --- a/syncbot/constants.py +++ b/syncbot/constants.py @@ -155,10 +155,23 @@ def get_required_db_vars() -> list: ] +# Minimum length for TOKEN_ENCRYPTION_KEY in production (reject weak/placeholder values). +_TOKEN_ENCRYPTION_KEY_MIN_LEN = 16 +_TOKEN_ENCRYPTION_KEY_PLACEHOLDERS = frozenset({"123", "changeme", "secret", "password"}) + + def _encryption_active() -> bool: - """Return True if bot-token encryption is configured with a real key.""" - key = os.environ.get(TOKEN_ENCRYPTION_KEY, "") - return bool(key) and key != "123" + """Return True if bot-token encryption is configured with a strong key. + + In non-local environments the key must be set, at least _TOKEN_ENCRYPTION_KEY_MIN_LEN + characters, and not a known placeholder. Local dev can use any value or leave unset. + """ + key = (os.environ.get(TOKEN_ENCRYPTION_KEY) or "").strip() + if not key or len(key) < _TOKEN_ENCRYPTION_KEY_MIN_LEN: + return False + if key.lower() in _TOKEN_ENCRYPTION_KEY_PLACEHOLDERS: + return False + return True def validate_config() -> None: @@ -183,7 +196,11 @@ def validate_config() -> None: raise OSError(msg) if not LOCAL_DEVELOPMENT and not _encryption_active(): - _logger.critical( - "Bot-token encryption is DISABLED in production. " - "Set TOKEN_ENCRYPTION_KEY to a strong passphrase to encrypt tokens at rest." + msg = ( + "TOKEN_ENCRYPTION_KEY is required in production and must be a secure, random value " + f"(at least {_TOKEN_ENCRYPTION_KEY_MIN_LEN} characters). " + "Use your provider's secret manager; the AWS template auto-generates it. " + "Back up the key after first deploy. In local dev you may set it manually or leave unset." ) + _logger.critical(msg) + raise OSError(msg) From 878676ec544719f7bf1fd13438942b9878aac0d0 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Mon, 16 Mar 2026 19:04:32 -0500 Subject: [PATCH 15/45] Renamed DB env consts. --- .devcontainer/devcontainer.json | 6 ++--- .devcontainer/docker-compose.dev.yml | 6 ++--- .env.example | 6 ++--- .github/workflows/deploy-aws.yml | 2 ++ README.md | 6 ++--- docker-compose.yml | 6 ++--- docs/DEPLOYMENT.md | 9 +++++--- docs/INFRA_CONTRACT.md | 8 +++---- infra/aws/template.yaml | 34 ++++++++++++++++++++-------- infra/gcp/main.tf | 6 ++--- infra/gcp/variables.tf | 2 +- syncbot/constants.py | 12 +++++----- syncbot/db/__init__.py | 10 ++++---- tests/test_channel_sync_handlers.py | 6 ++--- tests/test_db.py | 14 ++++++------ tests/test_export_import_handlers.py | 6 ++--- tests/test_groups_handlers.py | 6 ++--- tests/test_handlers.py | 6 ++--- tests/test_helpers.py | 6 ++--- tests/test_oauth.py | 6 ++--- 20 files changed, 92 insertions(+), 71 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 3cce8bb..583c158 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -35,8 +35,8 @@ "PYTHONPATH": "/app/syncbot", "LOCAL_DEVELOPMENT": "true", "DATABASE_HOST": "db", - "ADMIN_DATABASE_USER": "root", - "ADMIN_DATABASE_PASSWORD": "rootpass", - "ADMIN_DATABASE_SCHEMA": "syncbot" + "DATABASE_USER": "root", + "DATABASE_PASSWORD": "rootpass", + "DATABASE_SCHEMA": "syncbot" } } diff --git a/.devcontainer/docker-compose.dev.yml b/.devcontainer/docker-compose.dev.yml index c3cbd6f..ba23142 100644 --- a/.devcontainer/docker-compose.dev.yml +++ b/.devcontainer/docker-compose.dev.yml @@ -31,9 +31,9 @@ services: DATABASE_BACKEND: ${DATABASE_BACKEND:-mysql} DATABASE_URL: ${DATABASE_URL:-} DATABASE_HOST: ${DATABASE_HOST:-db} - ADMIN_DATABASE_USER: ${ADMIN_DATABASE_USER:-root} - ADMIN_DATABASE_PASSWORD: ${ADMIN_DATABASE_PASSWORD:-rootpass} - ADMIN_DATABASE_SCHEMA: ${ADMIN_DATABASE_SCHEMA:-syncbot} + DATABASE_USER: ${DATABASE_USER:-root} + DATABASE_PASSWORD: ${DATABASE_PASSWORD:-rootpass} + DATABASE_SCHEMA: ${DATABASE_SCHEMA:-syncbot} DATABASE_TLS_ENABLED: ${DATABASE_TLS_ENABLED:-false} DATABASE_SSL_CA_PATH: ${DATABASE_SSL_CA_PATH:-/etc/pki/tls/certs/ca-bundle.crt} volumes: diff --git a/.env.example b/.env.example index 71eb12c..2a1498a 100644 --- a/.env.example +++ b/.env.example @@ -13,9 +13,9 @@ # Option A — MySQL (default): legacy vars or DATABASE_URL DATABASE_BACKEND=mysql DATABASE_HOST=127.0.0.1 -ADMIN_DATABASE_USER=root -ADMIN_DATABASE_PASSWORD=rootpass -ADMIN_DATABASE_SCHEMA=syncbot +DATABASE_USER=root +DATABASE_PASSWORD=rootpass +DATABASE_SCHEMA=syncbot # Optional MySQL TLS controls (provider-dependent) # DATABASE_TLS_ENABLED=true # DATABASE_SSL_CA_PATH=/etc/pki/tls/certs/ca-bundle.crt diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index 387921a..b99b283 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -89,6 +89,7 @@ jobs: ExistingDatabaseAdminPassword=${{ secrets.EXISTING_DATABASE_ADMIN_PASSWORD }} \ DatabaseUser=${{ vars.DATABASE_USER }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ + SlackClientID=${{ vars.SLACK_CLIENT_ID }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ DatabasePassword=${{ secrets.DATABASE_PASSWORD }}$OVERRIDE_PARAM" @@ -139,6 +140,7 @@ jobs: ExistingDatabaseAdminPassword=${{ secrets.EXISTING_DATABASE_ADMIN_PASSWORD }} \ DatabaseUser=${{ vars.DATABASE_USER }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ + SlackClientID=${{ vars.SLACK_CLIENT_ID }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ DatabasePassword=${{ secrets.DATABASE_PASSWORD }}$OVERRIDE_PARAM" diff --git a/README.md b/README.md index 74e93b9..130bffe 100644 --- a/README.md +++ b/README.md @@ -229,9 +229,9 @@ See [`.env.example`](.env.example) for all available options with descriptions. | Variable | Description | |----------|-------------| | `DATABASE_HOST` | MySQL hostname | -| `ADMIN_DATABASE_USER` | MySQL username | -| `ADMIN_DATABASE_PASSWORD` | MySQL password | -| `ADMIN_DATABASE_SCHEMA` | MySQL database name | +| `DATABASE_USER` | MySQL username | +| `DATABASE_PASSWORD` | MySQL password | +| `DATABASE_SCHEMA` | MySQL database name | ### Required in Production (Lambda) diff --git a/docker-compose.yml b/docker-compose.yml index 50294f5..7a2d1b3 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -27,9 +27,9 @@ services: DATABASE_BACKEND: ${DATABASE_BACKEND:-mysql} DATABASE_URL: ${DATABASE_URL:-} DATABASE_HOST: ${DATABASE_HOST:-db} - ADMIN_DATABASE_USER: ${ADMIN_DATABASE_USER:-root} - ADMIN_DATABASE_PASSWORD: ${ADMIN_DATABASE_PASSWORD:-rootpass} - ADMIN_DATABASE_SCHEMA: ${ADMIN_DATABASE_SCHEMA:-syncbot} + DATABASE_USER: ${DATABASE_USER:-root} + DATABASE_PASSWORD: ${DATABASE_PASSWORD:-rootpass} + DATABASE_SCHEMA: ${DATABASE_SCHEMA:-syncbot} DATABASE_TLS_ENABLED: ${DATABASE_TLS_ENABLED:-false} DATABASE_SSL_CA_PATH: ${DATABASE_SSL_CA_PATH:-/etc/pki/tls/certs/ca-bundle.crt} # Slack — replace with your values or use a .env file diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 77975dd..3212b40 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -40,7 +40,7 @@ See [Swapping providers](#swapping-providers) for changing providers in a fork. The app supports **MySQL** (default) or **SQLite**. See [INFRA_CONTRACT.md](INFRA_CONTRACT.md) for required variables per backend. **Pre-release:** DB flow assumes **fresh installs only**; schema is created at startup via Alembic. -- **MySQL:** Use for production and when using AWS/GCP templates (RDS, Cloud SQL). Set `DATABASE_BACKEND=mysql` (or leave unset) and either `DATABASE_URL` or `DATABASE_HOST` + `ADMIN_DATABASE_*`. +- **MySQL:** Use for production and when using AWS/GCP templates (RDS, Cloud SQL). Set `DATABASE_BACKEND=mysql` (or leave unset) and either `DATABASE_URL` or `DATABASE_HOST` + `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA`. - **SQLite:** Use for forks or local runs where you prefer no DB server. Set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:///path/to/syncbot.db`. Single-writer; ensure backups and file durability. AWS/GCP reference templates assume MySQL; for SQLite you deploy the app (e.g. container or Lambda with a writable volume) and set the env vars only. --- @@ -94,7 +94,7 @@ You need: **GitHubDeployRoleArn** → `AWS_ROLE_TO_ASSUME`, **DeploymentBucketNa Use the bootstrap **DeploymentBucketName**. Set parameters (Stage, DB, Slack, etc.) when prompted. -3. **GitHub:** Create environments `test` and `prod`. In **Settings → Secrets and variables → Actions**, set **Variables** (per env): `AWS_ROLE_TO_ASSUME`, `AWS_REGION`, `AWS_S3_BUCKET`, `AWS_STACK_NAME`, `STAGE_NAME`, `EXISTING_DATABASE_HOST`, `EXISTING_DATABASE_ADMIN_USER` (when using existing RDS host), `DATABASE_USER` (when creating new RDS), `DATABASE_SCHEMA`. Set **Secrets**: `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET`, `EXISTING_DATABASE_ADMIN_PASSWORD` (when using existing host), `DATABASE_PASSWORD` (when creating new RDS). No access keys — the workflow uses OIDC. +3. **GitHub:** Create environments `test` and `prod`. In **Settings → Secrets and variables → Actions**, set **Variables** (per env): `AWS_ROLE_TO_ASSUME`, `AWS_REGION`, `AWS_S3_BUCKET`, `AWS_STACK_NAME`, `STAGE_NAME`, `SLACK_CLIENT_ID` (Slack app Client ID from Basic Information → App Credentials), `EXISTING_DATABASE_HOST`, `EXISTING_DATABASE_ADMIN_USER` (when using existing RDS host), `DATABASE_USER` (when creating new RDS), `DATABASE_SCHEMA`. Set **Secrets**: `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET`, `EXISTING_DATABASE_ADMIN_PASSWORD` (when using existing host), `DATABASE_PASSWORD` (when creating new RDS). No access keys — the workflow uses OIDC. 4. Push to `test` or `prod` to build and deploy. The workflow file is `.github/workflows/deploy-aws.yml` (runs when `DEPLOY_TARGET` is not `gcp`). **Important (token encryption key):** Non-local deploys require a secure `TOKEN_ENCRYPTION_KEY`. The AWS app stack **auto-generates** it in Secrets Manager by default. You must **back up the generated key** after first deploy; if it is lost, existing workspaces must reinstall to re-authorize bot tokens. For local development you may set the key manually in `.env` or leave it unset. @@ -118,14 +118,17 @@ To **reuse only the DB host** and have the deploy create the schema and a dedica For production, consider a VPC-enabled Lambda and private RDS; that would require template changes. 4. **First deploy (local `sam deploy`):** - Pass the **existing-host** parameters (admin user/password; do **not** pass `DatabaseUser`/`DatabasePassword` for the app — they are created for you): + Pass the **existing-host** parameters (admin user/password). When using **guided** mode, SAM will still prompt for **DatabaseUser** and **DatabasePassword**; the stack ignores these when using an existing host (app user/password are auto-generated). If the **DatabasePassword** prompt repeats in a loop (SAM often rejects empty for password fields), type any placeholder (e.g. `ignored`) and continue — it is never used. To avoid interactive prompts, use **parameter-overrides** and set `DatabaseUser=` and `DatabasePassword=ignored` (or any value) for existing-host deploys: ```bash sam deploy --guided ... \ --parameter-overrides \ ExistingDatabaseHost=your-db.xxxx.us-east-2.rds.amazonaws.com \ ExistingDatabaseAdminUser=admin \ ExistingDatabaseAdminPassword=your_master_password \ + DatabaseUser= \ + DatabasePassword=ignored \ DatabaseSchema=syncbot_test \ + SlackClientID=your_slack_app_client_id \ SlackSigningSecret=... \ SlackClientSecret=... ``` diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index 42eb756..fb7b252 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -15,15 +15,15 @@ The application reads configuration from environment variables. Providers must i | `DATABASE_BACKEND` | `mysql` (default) or `sqlite`. | | `DATABASE_URL` | Full SQLAlchemy URL. When set, overrides legacy MySQL vars. **Required for SQLite** (e.g. `sqlite:///path/to/syncbot.db`). For MySQL, optional (if unset, legacy vars below are used). | | `DATABASE_HOST` | MySQL hostname (IP or FQDN). Required when backend is `mysql` and `DATABASE_URL` is unset. | -| `ADMIN_DATABASE_USER` | MySQL username. Required when backend is `mysql` and `DATABASE_URL` is unset. | -| `ADMIN_DATABASE_PASSWORD` | MySQL password. Required when backend is `mysql` and `DATABASE_URL` is unset. | -| `ADMIN_DATABASE_SCHEMA` | MySQL database/schema name (e.g. `syncbot`, `syncbot_prod`). Required when backend is `mysql` and `DATABASE_URL` is unset. | +| `DATABASE_USER` | MySQL username. Required when backend is `mysql` and `DATABASE_URL` is unset. | +| `DATABASE_PASSWORD` | MySQL password. Required when backend is `mysql` and `DATABASE_URL` is unset. | +| `DATABASE_SCHEMA` | MySQL database/schema name (e.g. `syncbot`, `syncbot_prod`). Required when backend is `mysql` and `DATABASE_URL` is unset. | | `DATABASE_TLS_ENABLED` | Optional MySQL TLS toggle (`true`/`false`). Defaults to enabled outside local dev. | | `DATABASE_SSL_CA_PATH` | Optional CA bundle path used when TLS is enabled (default `/etc/pki/tls/certs/ca-bundle.crt`). | **SQLite (forks / local):** Set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:///path/to/file.db`. Single-writer; suitable for small teams and dev. Caveats: single-writer behavior, file durability, and backup expectations are your responsibility. For production at scale, prefer MySQL. -**MySQL (default):** Set `DATABASE_BACKEND=mysql` (or leave unset) and either `DATABASE_URL` or the four legacy vars above. +**MySQL (default):** Set `DATABASE_BACKEND=mysql` (or leave unset) and either `DATABASE_URL` or the four legacy vars above. Deploy-time bootstrap credentials (e.g. `ExistingDatabaseAdmin*` in AWS) are used only for one-time schema setup; the app reads `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA` at runtime. ### Required in production (non–local) diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index f649199..279059f 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -41,6 +41,13 @@ Parameters: NoEcho: true Default: "123" + SlackClientID: + Description: > + Slack OAuth app Client ID (Basic Information → App Credentials). + Required for your Slack app; use the ID from the app you created for this deploy. + Type: String + Default: "" + SlackClientSecret: Description: Slack OAuth client secret Type: String @@ -77,12 +84,15 @@ Parameters: DatabaseUser: Description: > - Database username: master user when creating new RDS; ignored when ExistingDatabaseHost is set. + Database username for new RDS only. When using an existing database + (ExistingDatabaseHost set), leave BLANK — the deploy creates syncbot_ and uses it. Type: String Default: "" DatabasePassword: - Description: Database password (master when creating new RDS; min 8 chars). Ignored when using existing host. + Description: > + Master password for new RDS only (min 8 chars). When using an existing database + (ExistingDatabaseHost set), leave BLANK — the app user password is auto-generated. Type: String NoEcho: true Default: "" @@ -144,6 +154,7 @@ Conditions: CreateDatabase: !Equals [!Ref ExistingDatabaseHost, ""] UseExistingDatabase: !Not [!Equals [!Ref ExistingDatabaseHost, ""]] HasTokenEncryptionKeyOverride: !Not [!Equals [!Ref TokenEncryptionKeyOverride, ""]] + HasSlackClientID: !Not [!Equals [!Ref SlackClientID, ""]] Mappings: StagesMap: @@ -346,6 +357,8 @@ Resources: CodeUri: db_setup/ Handler: handler.handler Runtime: python3.11 + Architectures: + - x86_64 Timeout: 60 MemorySize: 256 Policies: @@ -412,25 +425,28 @@ Resources: SLACK_SIGNING_SECRET: !Ref SlackSigningSecret ENV_SLACK_CLIENT_SECRET: !Ref SlackClientSecret ENV_SLACK_SCOPES: !Ref SlackOauthScopes - ENV_SLACK_CLIENT_ID: !FindInMap - - StagesMap - - !Ref Stage - - SlackClientID + ENV_SLACK_CLIENT_ID: !If + - HasSlackClientID + - !Ref SlackClientID + - !FindInMap + - StagesMap + - !Ref Stage + - SlackClientID DATABASE_HOST: !If - CreateDatabase - !GetAtt RDSInstance.Endpoint.Address - !Ref ExistingDatabaseHost - ADMIN_DATABASE_USER: !If + DATABASE_USER: !If - UseExistingDatabase - !GetAtt AppDbSetup.Username - !Ref DatabaseUser - ADMIN_DATABASE_PASSWORD: !If + DATABASE_PASSWORD: !If - UseExistingDatabase - !Sub - "{{resolve:secretsmanager:${SecretArn}:SecretString}}" - { SecretArn: !Ref AppDbCredentialsSecret } - !Ref DatabasePassword - ADMIN_DATABASE_SCHEMA: !Ref DatabaseSchema + DATABASE_SCHEMA: !Ref DatabaseSchema TOKEN_ENCRYPTION_KEY: !If - HasTokenEncryptionKeyOverride - !Ref TokenEncryptionKeyOverride diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf index 25992dc..19c1010 100644 --- a/infra/gcp/main.tf +++ b/infra/gcp/main.tf @@ -37,7 +37,7 @@ locals { "ENV_SLACK_CLIENT_SECRET" = var.secret_slack_client_secret "ENV_SLACK_SCOPES" = var.secret_slack_scopes "TOKEN_ENCRYPTION_KEY" = var.secret_token_encryption_key - "ADMIN_DATABASE_PASSWORD" = var.secret_db_password + "DATABASE_PASSWORD" = var.secret_db_password } } @@ -264,11 +264,11 @@ resource "google_cloud_run_v2_service" "syncbot" { value = local.db_host } env { - name = "ADMIN_DATABASE_USER" + name = "DATABASE_USER" value = local.db_user } env { - name = "ADMIN_DATABASE_SCHEMA" + name = "DATABASE_SCHEMA" value = local.db_schema } diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf index 678323e..a71daef 100644 --- a/infra/gcp/variables.tf +++ b/infra/gcp/variables.tf @@ -139,5 +139,5 @@ variable "token_encryption_key_override" { variable "secret_db_password" { type = string default = "syncbot-db-password" - description = "Secret Manager secret ID for ADMIN_DATABASE_PASSWORD (used when use_existing_database = true or with Cloud SQL)" + description = "Secret Manager secret ID for DATABASE_PASSWORD (used when use_existing_database = true or with Cloud SQL)" } diff --git a/syncbot/constants.py b/syncbot/constants.py index 9f1e66f..ec0bc89 100644 --- a/syncbot/constants.py +++ b/syncbot/constants.py @@ -34,9 +34,9 @@ # Legacy MySQL-only vars (used when DATABASE_URL unset and backend is mysql) DATABASE_HOST = "DATABASE_HOST" -ADMIN_DATABASE_USER = "ADMIN_DATABASE_USER" -ADMIN_DATABASE_PASSWORD = "ADMIN_DATABASE_PASSWORD" -ADMIN_DATABASE_SCHEMA = "ADMIN_DATABASE_SCHEMA" +DATABASE_USER = "DATABASE_USER" +DATABASE_PASSWORD = "DATABASE_PASSWORD" +DATABASE_SCHEMA = "DATABASE_SCHEMA" DATABASE_SSL_CA_PATH = "DATABASE_SSL_CA_PATH" DATABASE_TLS_ENABLED = "DATABASE_TLS_ENABLED" @@ -136,9 +136,9 @@ def get_required_db_vars() -> list: return [] # URL is enough return [ DATABASE_HOST, - ADMIN_DATABASE_USER, - ADMIN_DATABASE_PASSWORD, - ADMIN_DATABASE_SCHEMA, + DATABASE_USER, + DATABASE_PASSWORD, + DATABASE_SCHEMA, ] diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py index ee49877..01264c4 100644 --- a/syncbot/db/__init__.py +++ b/syncbot/db/__init__.py @@ -50,9 +50,9 @@ class DatabaseField: def _build_mysql_url(include_schema: bool = False) -> tuple[str, dict]: """Build MySQL URL and connect_args from legacy env vars.""" host = os.environ[constants.DATABASE_HOST] - user = quote_plus(os.environ[constants.ADMIN_DATABASE_USER]) - passwd = quote_plus(os.environ[constants.ADMIN_DATABASE_PASSWORD]) - schema = os.environ.get(constants.ADMIN_DATABASE_SCHEMA, "syncbot") + user = quote_plus(os.environ[constants.DATABASE_USER]) + passwd = quote_plus(os.environ[constants.DATABASE_PASSWORD]) + schema = os.environ.get(constants.DATABASE_SCHEMA, "syncbot") path = f"/{schema}" if include_schema else "" db_url = f"mysql+pymysql://{user}:{passwd}@{host}:3306{path}?charset=utf8mb4" connect_args: dict = {} @@ -103,7 +103,7 @@ def _ensure_database_exists() -> None: return if os.environ.get(constants.DATABASE_URL): return # URL already points at a database - schema = os.environ.get(constants.ADMIN_DATABASE_SCHEMA, "syncbot") + schema = os.environ.get(constants.DATABASE_SCHEMA, "syncbot") url_no_db, connect_args = _build_mysql_url(include_schema=False) engine_no_db = create_engine(url_no_db, connect_args=connect_args, pool_pre_ping=True) try: @@ -217,7 +217,7 @@ def get_engine(echo: bool = False, schema: str = None): global GLOBAL_ENGINE, GLOBAL_SCHEMA backend = constants.get_database_backend() - target_schema = (schema or os.environ.get(constants.ADMIN_DATABASE_SCHEMA, "syncbot")) if backend == "mysql" else "" + target_schema = (schema or os.environ.get(constants.DATABASE_SCHEMA, "syncbot")) if backend == "mysql" else "" cache_key = target_schema or backend if cache_key == GLOBAL_SCHEMA and GLOBAL_ENGINE is not None: diff --git a/tests/test_channel_sync_handlers.py b/tests/test_channel_sync_handlers.py index e16f9dc..5021b40 100644 --- a/tests/test_channel_sync_handlers.py +++ b/tests/test_channel_sync_handlers.py @@ -5,9 +5,9 @@ from unittest.mock import MagicMock, patch os.environ.setdefault("DATABASE_HOST", "localhost") -os.environ.setdefault("ADMIN_DATABASE_USER", "root") -os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") -os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") from handlers.channel_sync import ( # noqa: E402 diff --git a/tests/test_db.py b/tests/test_db.py index 11ac10d..b96a25a 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -6,9 +6,9 @@ import pytest os.environ.setdefault("DATABASE_HOST", "localhost") -os.environ.setdefault("ADMIN_DATABASE_USER", "root") -os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") -os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") from sqlalchemy import inspect @@ -81,9 +81,9 @@ class TestEngineConfig: { "DATABASE_BACKEND": "mysql", "DATABASE_HOST": "localhost", - "ADMIN_DATABASE_USER": "root", - "ADMIN_DATABASE_PASSWORD": "test", - "ADMIN_DATABASE_SCHEMA": "syncbot", + "DATABASE_USER": "root", + "DATABASE_PASSWORD": "test", + "DATABASE_SCHEMA": "syncbot", }, clear=False, ) @@ -179,7 +179,7 @@ def test_get_required_db_vars_mysql_without_url(self): required = get_required_db_vars() assert "DATABASE_HOST" in required - assert "ADMIN_DATABASE_USER" in required + assert "DATABASE_USER" in required def test_get_required_db_vars_sqlite(self): with patch.dict(os.environ, {"DATABASE_BACKEND": "sqlite"}, clear=False): diff --git a/tests/test_export_import_handlers.py b/tests/test_export_import_handlers.py index c7828ba..ac39edd 100644 --- a/tests/test_export_import_handlers.py +++ b/tests/test_export_import_handlers.py @@ -4,9 +4,9 @@ from unittest.mock import MagicMock, patch os.environ.setdefault("DATABASE_HOST", "localhost") -os.environ.setdefault("ADMIN_DATABASE_USER", "root") -os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") -os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") from handlers.export_import import handle_backup_restore_submit # noqa: E402 diff --git a/tests/test_groups_handlers.py b/tests/test_groups_handlers.py index 1864b67..e02e289 100644 --- a/tests/test_groups_handlers.py +++ b/tests/test_groups_handlers.py @@ -5,9 +5,9 @@ from unittest.mock import MagicMock, patch os.environ.setdefault("DATABASE_HOST", "localhost") -os.environ.setdefault("ADMIN_DATABASE_USER", "root") -os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") -os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") from handlers.groups import handle_join_group_submit # noqa: E402 diff --git a/tests/test_handlers.py b/tests/test_handlers.py index 4a1109a..5fa5a3f 100644 --- a/tests/test_handlers.py +++ b/tests/test_handlers.py @@ -4,9 +4,9 @@ from unittest.mock import MagicMock, patch os.environ.setdefault("DATABASE_HOST", "localhost") -os.environ.setdefault("ADMIN_DATABASE_USER", "root") -os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") -os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") from handlers import ( diff --git a/tests/test_helpers.py b/tests/test_helpers.py index fe4d995..e268ccf 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -8,9 +8,9 @@ # Ensure minimal env vars are set before importing app code os.environ.setdefault("DATABASE_HOST", "localhost") -os.environ.setdefault("ADMIN_DATABASE_USER", "root") -os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") -os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") # Placeholder only; never a real token (avoids secret scanners) os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") diff --git a/tests/test_oauth.py b/tests/test_oauth.py index c322fd5..ab62700 100644 --- a/tests/test_oauth.py +++ b/tests/test_oauth.py @@ -4,9 +4,9 @@ from unittest.mock import patch os.environ.setdefault("DATABASE_HOST", "localhost") -os.environ.setdefault("ADMIN_DATABASE_USER", "root") -os.environ.setdefault("ADMIN_DATABASE_PASSWORD", "test") -os.environ.setdefault("ADMIN_DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") from helpers.oauth import get_oauth_flow From 4dde68f6f936da6351e67c8d37de033dd6770eb7 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Mon, 16 Mar 2026 20:17:54 -0500 Subject: [PATCH 16/45] Update to Python 3.12 for Amazon Linux 2023. Updated to SQLAlchemy 2.x to avoid deprecation. --- .github/dependabot.yml | 25 +++++ .github/workflows/deploy-aws.yml | 8 +- Dockerfile | 13 ++- README.md | 9 +- docs/DEPLOYMENT.md | 10 ++ docs/IMPROVEMENTS.md | 20 +++- docs/INFRA_CONTRACT.md | 15 +++ infra/aws/db_setup/Makefile | 6 ++ infra/aws/db_setup/handler.py | 14 ++- infra/aws/db_setup/requirements.txt | 2 +- infra/aws/template.yaml | 6 +- poetry.lock | 156 ++++++++++++++++------------ pyproject.toml | 6 +- syncbot/constants.py | 2 +- syncbot/db/__init__.py | 2 +- syncbot/db/schemas.py | 6 +- syncbot/requirements.txt | 28 ++--- tests/test_db.py | 2 + 18 files changed, 222 insertions(+), 108 deletions(-) create mode 100644 .github/dependabot.yml create mode 100644 infra/aws/db_setup/Makefile diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..b3283b5 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,25 @@ +version: 2 +updates: + - package-ecosystem: "pip" + directory: "/" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + + - package-ecosystem: "pip" + directory: "/infra/aws/db_setup" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index b99b283..7e5f832 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: - python-version: '3.11' + python-version: '3.12' - uses: aws-actions/setup-sam@v2 with: use-installer: true @@ -37,6 +37,12 @@ jobs: - run: sam build -t infra/aws/template.yaml + - name: Security audit Python dependencies + run: | + python -m pip install --upgrade pip pip-audit + pip-audit -r syncbot/requirements.txt + pip-audit -r infra/aws/db_setup/requirements.txt + - name: Publish artifact uses: actions/upload-artifact@v4 with: diff --git a/Dockerfile b/Dockerfile index 7c06ab1..7f81e73 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.11-slim +FROM python:3.12-slim WORKDIR /app @@ -10,12 +10,11 @@ RUN apt-get update && \ default-libmysqlclient-dev \ && rm -rf /var/lib/apt/lists/* -# Export and install runtime Python dependencies from Poetry lockfile. -COPY pyproject.toml poetry.lock ./ -RUN python -m pip install --no-cache-dir --upgrade pip poetry poetry-plugin-export && \ - poetry export --only main --format requirements.txt --without-hashes --output requirements.txt && \ - pip install --no-cache-dir -r requirements.txt -RUN pip install --no-cache-dir boto3 +# Install runtime dependencies from pinned requirements. +COPY syncbot/requirements.txt /app/requirements.txt +RUN python -m pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r /app/requirements.txt && \ + pip install --no-cache-dir boto3 # Copy application code COPY syncbot/ ./syncbot/ diff --git a/README.md b/README.md index 130bffe..12a570c 100644 --- a/README.md +++ b/README.md @@ -190,7 +190,7 @@ docker compose down -v # stop + delete DB vo ### Option C: Native Python -**Prerequisites:** Python 3.11+, Poetry 1.6+, Docker *(optional, for MySQL)* +**Prerequisites:** Python 3.12+, Poetry 1.6+ (2.x recommended), Docker *(optional, for MySQL)* ```bash git clone https://github.com/GITHUB_ORG_NAME/syncbot.git @@ -198,6 +198,13 @@ cd syncbot poetry install --with dev ``` +If you change dependencies in `pyproject.toml`, refresh lock and deployment pins: + +```bash +poetry lock +poetry export --only main --format requirements.txt --without-hashes --output syncbot/requirements.txt +``` + Start a local MySQL instance: ```bash diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 3212b40..0745908 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -7,6 +7,8 @@ This guide covers deploying SyncBot on **AWS** (default) or **GCP**, with two pa The app code and [infrastructure contract](INFRA_CONTRACT.md) are provider-agnostic; only the infrastructure in `infra//` and the CI workflow differ. +**Runtime baseline:** Python 3.12. Keep `pyproject.toml`, `syncbot/requirements.txt`, Lambda runtimes, and CI Python version aligned. + --- ## Fork-First Model (Recommended) @@ -96,6 +98,14 @@ You need: **GitHubDeployRoleArn** → `AWS_ROLE_TO_ASSUME`, **DeploymentBucketNa 3. **GitHub:** Create environments `test` and `prod`. In **Settings → Secrets and variables → Actions**, set **Variables** (per env): `AWS_ROLE_TO_ASSUME`, `AWS_REGION`, `AWS_S3_BUCKET`, `AWS_STACK_NAME`, `STAGE_NAME`, `SLACK_CLIENT_ID` (Slack app Client ID from Basic Information → App Credentials), `EXISTING_DATABASE_HOST`, `EXISTING_DATABASE_ADMIN_USER` (when using existing RDS host), `DATABASE_USER` (when creating new RDS), `DATABASE_SCHEMA`. Set **Secrets**: `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET`, `EXISTING_DATABASE_ADMIN_PASSWORD` (when using existing host), `DATABASE_PASSWORD` (when creating new RDS). No access keys — the workflow uses OIDC. 4. Push to `test` or `prod` to build and deploy. The workflow file is `.github/workflows/deploy-aws.yml` (runs when `DEPLOY_TARGET` is not `gcp`). + - The AWS workflow runs `pip-audit` against `syncbot/requirements.txt` and `infra/aws/db_setup/requirements.txt`, so dependency pins should be kept current. + +When dependency constraints change in `pyproject.toml`, refresh both lock and deployment requirements: + +```bash +poetry lock +poetry export --only main --format requirements.txt --without-hashes --output syncbot/requirements.txt +``` **Important (token encryption key):** Non-local deploys require a secure `TOKEN_ENCRYPTION_KEY`. The AWS app stack **auto-generates** it in Secrets Manager by default. You must **back up the generated key** after first deploy; if it is lost, existing workspaces must reinstall to re-authorize bot tokens. For local development you may set the key manually in `.env` or leave it unset. diff --git a/docs/IMPROVEMENTS.md b/docs/IMPROVEMENTS.md index 3374d9a..71bb16a 100644 --- a/docs/IMPROVEMENTS.md +++ b/docs/IMPROVEMENTS.md @@ -2,6 +2,9 @@ This document outlines the improvements made to the SyncBot application and additional recommendations for future enhancements. +> Historical changelog note: this file tracks work over time and may reference superseded implementation details. +> For current deployment/runtime requirements, use `docs/INFRA_CONTRACT.md` and `docs/DEPLOYMENT.md` as the source of truth. + ## ✅ Completed Improvements ### 1. Database Management Fixes @@ -441,13 +444,24 @@ This document outlines the improvements made to the SyncBot application and addi - **HEIC and Pillow removed** — HEIC-to-PNG conversion and `upload_photos` (S3) were removed; direct upload is the only media path. Dropped `pillow` and `pillow-heif` from dependencies. - **Template and docs** — `infra/aws/template.yaml` no longer creates OAuth or image buckets; README, DEPLOYMENT, ARCHITECTURE, USER_GUIDE, `.env.example`, and IMPROVEMENTS updated to describe MySQL OAuth and artifact-bucket-only S3. +### 48. Infra Contract + Pre-Release DB Abstraction (Completed) +- **Contract rename for clarity** — renamed deployment contract docs to **Infrastructure Contract** (`docs/INFRA_CONTRACT.md`) and updated all references across docs, workflows, and infra comments. +- **Backend-neutral DB runtime contract** — standardized on `DATABASE_BACKEND`, `DATABASE_URL`, and `DATABASE_*` runtime names (`DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA`) across app, tests, infra, and docs. +- **SQLite-capable runtime path** — app startup and reset flows are Alembic-driven and dialect-aware (MySQL + SQLite), with fresh-install assumptions for this pre-release. +- **Token key naming cleanup** — renamed `PASSWORD_ENCRYPT_KEY` to `TOKEN_ENCRYPTION_KEY` everywhere (code, tests, infra, workflows, and docs). +- **Generate-once token key** — cloud deploy paths now generate `TOKEN_ENCRYPTION_KEY` once and persist it in provider secret manager by default. +- **Disaster recovery override** — added explicit key reuse overrides for rebuild scenarios: + - AWS SAM parameter: `TokenEncryptionKeyOverride` + - GCP Terraform variable: `token_encryption_key_override` +- **Admin/operator warning surface** — deploy helper scripts and deployment docs now explicitly warn that losing the token key requires workspace reinstall/re-authorization. + ## Remaining Recommendations ### Low Priority 1. **Dependencies** - - Update SQLAlchemy to 2.0+ (currently pinned to <2.0) - - Review and update other dependencies + - Keep dependency pins current with regular lock refreshes and security audits + - Review major-version upgrades for Slack SDK/Bolt and provider tooling on a planned cadence 2. **Database Migrations** - Startup now bootstraps schema via Alembic (`alembic upgrade head`) for fresh installs. @@ -465,7 +479,7 @@ This document outlines the improvements made to the SyncBot application and addi - Database layer benefits from connection pooling, automatic retry with safe disposal, and `SELECT COUNT(*)` for counting - All Slack API calls have rate-limit handling with exponential backoff - Error isolation in sync loops ensures partial failures don't cascade -- 60 unit tests cover core helper functions, encryption, caching, event parsing, bot filtering, invite codes, and sync creation +- The pytest suite covers core helper functions, encryption, caching, event parsing, bot filtering, invite codes, DB behavior, and sync creation - Structured JSON logging with correlation IDs enables fast CloudWatch Logs Insights queries - Pre-commit hooks enforce consistent code style on every commit - Admin/owner authorization enforced on all configuration actions with defense-in-depth diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index fb7b252..569df4d 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -8,6 +8,21 @@ This document defines what any infrastructure provider (AWS, GCP, Azure, etc.) m The application reads configuration from environment variables. Providers must inject these at runtime (e.g. Lambda env, Cloud Run env, or a compatible secret/config layer). +## Toolchain Baseline + +- Runtime baseline: **Python 3.12**. +- Keep runtime/tooling aligned across: + - Lambda/Cloud Run runtime configuration + - CI Python version + - `pyproject.toml` Python constraint + - `syncbot/requirements.txt` deployment pins +- When dependency constraints change in `pyproject.toml`, refresh both lock and deployment requirements: + +```bash +poetry lock +poetry export --only main --format requirements.txt --without-hashes --output syncbot/requirements.txt +``` + ### Database (backend-agnostic) | Variable | Description | diff --git a/infra/aws/db_setup/Makefile b/infra/aws/db_setup/Makefile new file mode 100644 index 0000000..ca40881 --- /dev/null +++ b/infra/aws/db_setup/Makefile @@ -0,0 +1,6 @@ +# SAM build: copy handler and install dependencies so pymysql is in the deployment package. +build-DbSetupFunction: + cp handler.py $(ARTIFACTS_DIR)/ + cp requirements.txt $(ARTIFACTS_DIR)/ + python -m pip install -r requirements.txt -t $(ARTIFACTS_DIR) --quiet + rm -rf $(ARTIFACTS_DIR)/bin diff --git a/infra/aws/db_setup/handler.py b/infra/aws/db_setup/handler.py index 09935fe..3fe3142 100644 --- a/infra/aws/db_setup/handler.py +++ b/infra/aws/db_setup/handler.py @@ -29,13 +29,21 @@ def send(event, context, status, data=None, reason=None, physical_resource_id=No event["ResponseURL"], data=body, method="PUT", - headers={"Content-Type": ""}, + headers={"Content-Type": "application/json"}, ) - with urllib.request.urlopen(req) as f: + with urllib.request.urlopen(req, timeout=30) as f: f.read() def handler(event, context): + try: + return _handler_impl(event, context) + except Exception as e: + send(event, context, "FAILED", reason=f"Unhandled error: {e}") + raise + + +def _handler_impl(event, context): request_type = event.get("RequestType", "Create") props = event.get("ResourceProperties", {}) host = props.get("Host", "").strip() @@ -96,6 +104,7 @@ def setup_database( app_username: str, app_password: str, ) -> None: + # Fail fast if RDS is unreachable (e.g. not publicly accessible or SG blocks Lambda) conn = pymysql.connect( host=host, user=admin_user, @@ -103,6 +112,7 @@ def setup_database( port=3306, charset="utf8mb4", cursorclass=DictCursor, + connect_timeout=15, ) try: with conn.cursor() as cur: diff --git a/infra/aws/db_setup/requirements.txt b/infra/aws/db_setup/requirements.txt index 76db31a..75650ed 100644 --- a/infra/aws/db_setup/requirements.txt +++ b/infra/aws/db_setup/requirements.txt @@ -1 +1 @@ -pymysql>=1.1.0 +pymysql==1.1.2 diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index 279059f..95b58ef 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -353,10 +353,12 @@ Resources: DbSetupFunction: Type: AWS::Serverless::Function Condition: UseExistingDatabase + Metadata: + BuildMethod: makefile Properties: CodeUri: db_setup/ Handler: handler.handler - Runtime: python3.11 + Runtime: python3.12 Architectures: - x86_64 Timeout: 60 @@ -390,7 +392,7 @@ Resources: Properties: CodeUri: ../../syncbot/ Handler: app.handler - Runtime: python3.11 + Runtime: python3.12 Architectures: - x86_64 Timeout: 30 diff --git a/poetry.lock b/poetry.lock index ef00121..47422e1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.3.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "alembic" @@ -746,10 +746,10 @@ files = [ ] [package.dependencies] -botocore = ">=1.12.36,<2.0a0" +botocore = ">=1.12.36,<2.0a.0" [package.extras] -crt = ["botocore[crt] (>=1.20.29,<2.0a0)"] +crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] [[package]] name = "six" @@ -795,85 +795,105 @@ optional = ["SQLAlchemy (>=1.4,<3)", "aiodns (>1.0)", "aiohttp (>=3.7.3,<4)", "b [[package]] name = "sqlalchemy" -version = "1.4.49" +version = "2.0.48" description = "Database Abstraction Library" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +python-versions = ">=3.7" groups = ["main"] files = [ - {file = "SQLAlchemy-1.4.49-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2e126cf98b7fd38f1e33c64484406b78e937b1a280e078ef558b95bf5b6895f6"}, - {file = "SQLAlchemy-1.4.49-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:03db81b89fe7ef3857b4a00b63dedd632d6183d4ea5a31c5d8a92e000a41fc71"}, - {file = "SQLAlchemy-1.4.49-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:95b9df9afd680b7a3b13b38adf6e3a38995da5e162cc7524ef08e3be4e5ed3e1"}, - {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a63e43bf3f668c11bb0444ce6e809c1227b8f067ca1068898f3008a273f52b09"}, - {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca46de16650d143a928d10842939dab208e8d8c3a9a8757600cae9b7c579c5cd"}, - {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f835c050ebaa4e48b18403bed2c0fda986525896efd76c245bdd4db995e51a4c"}, - {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c21b172dfb22e0db303ff6419451f0cac891d2e911bb9fbf8003d717f1bcf91"}, - {file = "SQLAlchemy-1.4.49-cp310-cp310-win32.whl", hash = "sha256:5fb1ebdfc8373b5a291485757bd6431de8d7ed42c27439f543c81f6c8febd729"}, - {file = "SQLAlchemy-1.4.49-cp310-cp310-win_amd64.whl", hash = "sha256:f8a65990c9c490f4651b5c02abccc9f113a7f56fa482031ac8cb88b70bc8ccaa"}, - {file = "SQLAlchemy-1.4.49-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8923dfdf24d5aa8a3adb59723f54118dd4fe62cf59ed0d0d65d940579c1170a4"}, - {file = "SQLAlchemy-1.4.49-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9ab2c507a7a439f13ca4499db6d3f50423d1d65dc9b5ed897e70941d9e135b0"}, - {file = "SQLAlchemy-1.4.49-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5debe7d49b8acf1f3035317e63d9ec8d5e4d904c6e75a2a9246a119f5f2fdf3d"}, - {file = "SQLAlchemy-1.4.49-cp311-cp311-win32.whl", hash = "sha256:82b08e82da3756765c2e75f327b9bf6b0f043c9c3925fb95fb51e1567fa4ee87"}, - {file = "SQLAlchemy-1.4.49-cp311-cp311-win_amd64.whl", hash = "sha256:171e04eeb5d1c0d96a544caf982621a1711d078dbc5c96f11d6469169bd003f1"}, - {file = "SQLAlchemy-1.4.49-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f23755c384c2969ca2f7667a83f7c5648fcf8b62a3f2bbd883d805454964a800"}, - {file = "SQLAlchemy-1.4.49-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8396e896e08e37032e87e7fbf4a15f431aa878c286dc7f79e616c2feacdb366c"}, - {file = "SQLAlchemy-1.4.49-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66da9627cfcc43bbdebd47bfe0145bb662041472393c03b7802253993b6b7c90"}, - {file = "SQLAlchemy-1.4.49-cp312-cp312-win32.whl", hash = "sha256:9a06e046ffeb8a484279e54bda0a5abfd9675f594a2e38ef3133d7e4d75b6214"}, - {file = "SQLAlchemy-1.4.49-cp312-cp312-win_amd64.whl", hash = "sha256:7cf8b90ad84ad3a45098b1c9f56f2b161601e4670827d6b892ea0e884569bd1d"}, - {file = "SQLAlchemy-1.4.49-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:36e58f8c4fe43984384e3fbe6341ac99b6b4e083de2fe838f0fdb91cebe9e9cb"}, - {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b31e67ff419013f99ad6f8fc73ee19ea31585e1e9fe773744c0f3ce58c039c30"}, - {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebc22807a7e161c0d8f3da34018ab7c97ef6223578fcdd99b1d3e7ed1100a5db"}, - {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c14b29d9e1529f99efd550cd04dbb6db6ba5d690abb96d52de2bff4ed518bc95"}, - {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c40f3470e084d31247aea228aa1c39bbc0904c2b9ccbf5d3cfa2ea2dac06f26d"}, - {file = "SQLAlchemy-1.4.49-cp36-cp36m-win32.whl", hash = "sha256:706bfa02157b97c136547c406f263e4c6274a7b061b3eb9742915dd774bbc264"}, - {file = "SQLAlchemy-1.4.49-cp36-cp36m-win_amd64.whl", hash = "sha256:a7f7b5c07ae5c0cfd24c2db86071fb2a3d947da7bd487e359cc91e67ac1c6d2e"}, - {file = "SQLAlchemy-1.4.49-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:4afbbf5ef41ac18e02c8dc1f86c04b22b7a2125f2a030e25bbb4aff31abb224b"}, - {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24e300c0c2147484a002b175f4e1361f102e82c345bf263242f0449672a4bccf"}, - {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:393cd06c3b00b57f5421e2133e088df9cabcececcea180327e43b937b5a7caa5"}, - {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:201de072b818f8ad55c80d18d1a788729cccf9be6d9dc3b9d8613b053cd4836d"}, - {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7653ed6817c710d0c95558232aba799307d14ae084cc9b1f4c389157ec50df5c"}, - {file = "SQLAlchemy-1.4.49-cp37-cp37m-win32.whl", hash = "sha256:647e0b309cb4512b1f1b78471fdaf72921b6fa6e750b9f891e09c6e2f0e5326f"}, - {file = "SQLAlchemy-1.4.49-cp37-cp37m-win_amd64.whl", hash = "sha256:ab73ed1a05ff539afc4a7f8cf371764cdf79768ecb7d2ec691e3ff89abbc541e"}, - {file = "SQLAlchemy-1.4.49-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:37ce517c011560d68f1ffb28af65d7e06f873f191eb3a73af5671e9c3fada08a"}, - {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1878ce508edea4a879015ab5215546c444233881301e97ca16fe251e89f1c55"}, - {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95ab792ca493891d7a45a077e35b418f68435efb3e1706cb8155e20e86a9013c"}, - {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0e8e608983e6f85d0852ca61f97e521b62e67969e6e640fe6c6b575d4db68557"}, - {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccf956da45290df6e809ea12c54c02ace7f8ff4d765d6d3dfb3655ee876ce58d"}, - {file = "SQLAlchemy-1.4.49-cp38-cp38-win32.whl", hash = "sha256:f167c8175ab908ce48bd6550679cc6ea20ae169379e73c7720a28f89e53aa532"}, - {file = "SQLAlchemy-1.4.49-cp38-cp38-win_amd64.whl", hash = "sha256:45806315aae81a0c202752558f0df52b42d11dd7ba0097bf71e253b4215f34f4"}, - {file = "SQLAlchemy-1.4.49-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:b6d0c4b15d65087738a6e22e0ff461b407533ff65a73b818089efc8eb2b3e1de"}, - {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a843e34abfd4c797018fd8d00ffffa99fd5184c421f190b6ca99def4087689bd"}, - {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:738d7321212941ab19ba2acf02a68b8ee64987b248ffa2101630e8fccb549e0d"}, - {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1c890421651b45a681181301b3497e4d57c0d01dc001e10438a40e9a9c25ee77"}, - {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d26f280b8f0a8f497bc10573849ad6dc62e671d2468826e5c748d04ed9e670d5"}, - {file = "SQLAlchemy-1.4.49-cp39-cp39-win32.whl", hash = "sha256:ec2268de67f73b43320383947e74700e95c6770d0c68c4e615e9897e46296294"}, - {file = "SQLAlchemy-1.4.49-cp39-cp39-win_amd64.whl", hash = "sha256:bbdf16372859b8ed3f4d05f925a984771cd2abd18bd187042f24be4886c2a15f"}, - {file = "SQLAlchemy-1.4.49.tar.gz", hash = "sha256:06ff25cbae30c396c4b7737464f2a7fc37a67b7da409993b182b024cec80aed9"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7001dc9d5f6bb4deb756d5928eaefe1930f6f4179da3924cbd95ee0e9f4dce89"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1a89ce07ad2d4b8cfc30bd5889ec40613e028ed80ef47da7d9dd2ce969ad30e0"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10853a53a4a00417a00913d270dddda75815fcb80675874285f41051c094d7dd"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fac0fa4e4f55f118fd87177dacb1c6522fe39c28d498d259014020fec9164c29"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3713e21ea67bca727eecd4a24bf68bcd414c403faae4989442be60994301ded0"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-win32.whl", hash = "sha256:d404dc897ce10e565d647795861762aa2d06ca3f4a728c5e9a835096c7059018"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-win_amd64.whl", hash = "sha256:841a94c66577661c1f088ac958cd767d7c9bf507698f45afffe7a4017049de76"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b4c575df7368b3b13e0cebf01d4679f9a28ed2ae6c1cd0b1d5beffb6b2007dc"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e83e3f959aaa1c9df95c22c528096d94848a1bc819f5d0ebf7ee3df0ca63db6c"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6f7b7243850edd0b8b97043f04748f31de50cf426e939def5c16bedb540698f7"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:82745b03b4043e04600a6b665cb98697c4339b24e34d74b0a2ac0a2488b6f94d"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e5e088bf43f6ee6fec7dbf1ef7ff7774a616c236b5c0cb3e00662dd71a56b571"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-win32.whl", hash = "sha256:9c7d0a77e36b5f4b01ca398482230ab792061d243d715299b44a0b55c89fe617"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-win_amd64.whl", hash = "sha256:583849c743e0e3c9bb7446f5b5addeacedc168d657a69b418063dfdb2d90081c"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:348174f228b99f33ca1f773e85510e08927620caa59ffe7803b37170df30332b"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:53667b5f668991e279d21f94ccfa6e45b4e3f4500e7591ae59a8012d0f010dcb"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34634e196f620c7a61d18d5cf7dc841ca6daa7961aed75d532b7e58b309ac894"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:546572a1793cc35857a2ffa1fe0e58571af1779bcc1ffa7c9fb0839885ed69a9"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:07edba08061bc277bfdc772dd2a1a43978f5a45994dd3ede26391b405c15221e"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-win32.whl", hash = "sha256:908a3fa6908716f803b86896a09a2c4dde5f5ce2bb07aacc71ffebb57986ce99"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-win_amd64.whl", hash = "sha256:68549c403f79a8e25984376480959975212a670405e3913830614432b5daa07a"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e3070c03701037aa418b55d36532ecb8f8446ed0135acb71c678dbdf12f5b6e4"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2645b7d8a738763b664a12a1542c89c940daa55196e8d73e55b169cc5c99f65f"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b19151e76620a412c2ac1c6f977ab1b9fa7ad43140178345136456d5265b32ed"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b193a7e29fd9fa56e502920dca47dffe60f97c863494946bd698c6058a55658"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:36ac4ddc3d33e852da9cb00ffb08cea62ca05c39711dc67062ca2bb1fae35fd8"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-win32.whl", hash = "sha256:389b984139278f97757ea9b08993e7b9d1142912e046ab7d82b3fbaeb0209131"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-win_amd64.whl", hash = "sha256:d612c976cbc2d17edfcc4c006874b764e85e990c29ce9bd411f926bbfb02b9a2"}, + {file = "sqlalchemy-2.0.48-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:69f5bc24904d3bc3640961cddd2523e361257ef68585d6e364166dfbe8c78fae"}, + {file = "sqlalchemy-2.0.48-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd08b90d211c086181caed76931ecfa2bdfc83eea3cfccdb0f82abc6c4b876cb"}, + {file = "sqlalchemy-2.0.48-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:1ccd42229aaac2df431562117ac7e667d702e8e44afdb6cf0e50fa3f18160f0b"}, + {file = "sqlalchemy-2.0.48-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f0dcbc588cd5b725162c076eb9119342f6579c7f7f55057bb7e3c6ff27e13121"}, + {file = "sqlalchemy-2.0.48-cp313-cp313t-win32.whl", hash = "sha256:9764014ef5e58aab76220c5664abb5d47d5bc858d9debf821e55cfdd0f128485"}, + {file = "sqlalchemy-2.0.48-cp313-cp313t-win_amd64.whl", hash = "sha256:e2f35b4cccd9ed286ad62e0a3c3ac21e06c02abc60e20aa51a3e305a30f5fa79"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:e2d0d88686e3d35a76f3e15a34e8c12d73fc94c1dea1cd55782e695cc14086dd"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49b7bddc1eebf011ea5ab722fdbe67a401caa34a350d278cc7733c0e88fecb1f"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:426c5ca86415d9b8945c7073597e10de9644802e2ff502b8e1f11a7a2642856b"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:288937433bd44e3990e7da2402fabc44a3c6c25d3704da066b85b89a85474ae0"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8183dc57ae7d9edc1346e007e840a9f3d6aa7b7f165203a99e16f447150140d2"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-win32.whl", hash = "sha256:1182437cb2d97988cfea04cf6cdc0b0bb9c74f4d56ec3d08b81e23d621a28cc6"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-win_amd64.whl", hash = "sha256:144921da96c08feb9e2b052c5c5c1d0d151a292c6135623c6b2c041f2a45f9e0"}, + {file = "sqlalchemy-2.0.48-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5aee45fd2c6c0f2b9cdddf48c48535e7471e42d6fb81adfde801da0bd5b93241"}, + {file = "sqlalchemy-2.0.48-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7cddca31edf8b0653090cbb54562ca027c421c58ddde2c0685f49ff56a1690e0"}, + {file = "sqlalchemy-2.0.48-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7a936f1bb23d370b7c8cc079d5fce4c7d18da87a33c6744e51a93b0f9e97e9b3"}, + {file = "sqlalchemy-2.0.48-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e004aa9248e8cb0a5f9b96d003ca7c1c0a5da8decd1066e7b53f59eb8ce7c62b"}, + {file = "sqlalchemy-2.0.48-cp314-cp314t-win32.whl", hash = "sha256:b8438ec5594980d405251451c5b7ea9aa58dda38eb7ac35fb7e4c696712ee24f"}, + {file = "sqlalchemy-2.0.48-cp314-cp314t-win_amd64.whl", hash = "sha256:d854b3970067297f3a7fbd7a4683587134aa9b3877ee15aa29eea478dc68f933"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f8649a14caa5f8a243628b1d61cf530ad9ae4578814ba726816adb1121fc493e"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6bb85c546591569558571aa1b06aba711b26ae62f111e15e56136d69920e1616"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a6b764fb312bd35e47797ad2e63f0d323792837a6ac785a4ca967019357d2bc7"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:7c998f2ace8bf76b453b75dbcca500d4f4b9dd3908c13e89b86289b37784848b"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d64177f443594c8697369c10e4bbcac70ef558e0f7921a1de7e4a3d1734bcf67"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-win32.whl", hash = "sha256:01f6bbd4308b23240cf7d3ef117557c8fd097ec9549d5d8a52977544e35b40ad"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-win_amd64.whl", hash = "sha256:858e433f12b0e5b3ed2f8da917433b634f4937d0e8793e5cb33c54a1a01df565"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4599a95f9430ae0de82b52ff0d27304fe898c17cb5f4099f7438a51b9998ac77"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f27f9da0a7d22b9f981108fd4b62f8b5743423388915a563e651c20d06c1f457"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d8fcccbbc0c13c13702c471da398b8cd72ba740dca5859f148ae8e0e8e0d3e7e"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a5b429eb84339f9f05e06083f119ad814e6d85e27ecbdf9c551dfdbb128eaf8a"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:bcb8ebbf2e2c36cfe01a94f2438012c6a9d494cf80f129d9753bcdf33bfc35a6"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-win32.whl", hash = "sha256:e214d546c8ecb5fc22d6e6011746082abf13a9cf46eefb45769c7b31407c97b5"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-win_amd64.whl", hash = "sha256:b8fc3454b4f3bd0a368001d0e968852dad45a873f8b4babd41bc302ec851a099"}, + {file = "sqlalchemy-2.0.48-py3-none-any.whl", hash = "sha256:a66fe406437dd65cacd96a72689a3aaaecaebbcd62d81c5ac1c0fdbeac835096"}, + {file = "sqlalchemy-2.0.48.tar.gz", hash = "sha256:5ca74f37f3369b45e1f6b7b06afb182af1fd5dde009e4ffd831830d98cbe5fe7"}, ] [package.dependencies] -greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} +greenlet = {version = ">=1", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +typing-extensions = ">=4.6.0" [package.extras] -aiomysql = ["aiomysql ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\""] -aiosqlite = ["aiosqlite ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\"", "typing-extensions (!=3.10.0.1)"] -asyncio = ["greenlet (!=0.4.17) ; python_version >= \"3\""] -asyncmy = ["asyncmy (>=0.2.3,!=0.2.4) ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\""] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2) ; python_version >= \"3\""] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (>=1)"] +aioodbc = ["aioodbc", "greenlet (>=1)"] +aiosqlite = ["aiosqlite", "greenlet (>=1)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (>=1)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (>=1)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] mssql = ["pyodbc"] mssql-pymssql = ["pymssql"] mssql-pyodbc = ["pyodbc"] -mypy = ["mypy (>=0.910) ; python_version >= \"3\"", "sqlalchemy2-stubs"] -mysql = ["mysqlclient (>=1.4.0) ; python_version >= \"3\"", "mysqlclient (>=1.4.0,<2) ; python_version < \"3\""] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] mysql-connector = ["mysql-connector-python"] -oracle = ["cx-oracle (>=7) ; python_version >= \"3\"", "cx-oracle (>=7,<8) ; python_version < \"3\""] +oracle = ["cx_oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] postgresql = ["psycopg2 (>=2.7)"] -postgresql-asyncpg = ["asyncpg ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\""] -postgresql-pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"] +postgresql-asyncpg = ["asyncpg", "greenlet (>=1)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] postgresql-psycopg2binary = ["psycopg2-binary"] postgresql-psycopg2cffi = ["psycopg2cffi"] -pymysql = ["pymysql (<1) ; python_version < \"3\"", "pymysql ; python_version >= \"3\""] -sqlcipher = ["sqlcipher3-binary ; python_version >= \"3\""] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3_binary"] [[package]] name = "typing-extensions" @@ -906,5 +926,5 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [metadata] lock-version = "2.1" -python-versions = "^3.11" -content-hash = "a73183522f531f6f23e9c83b2d78f3a23725778358039b9e2d04693b5e6e55a1" +python-versions = "^3.12" +content-hash = "1eafe9bbbd2df990fb90980ddbf1db11e29fb5ae52b6455c9353cf8cda1db894" diff --git a/pyproject.toml b/pyproject.toml index 08f6de0..55eb663 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,11 +6,11 @@ authors = ["Klint Van Tassel ", "Evan Petzoldt tuple[str, dict]: - """Build MySQL URL and connect_args from legacy env vars.""" + """Build MySQL URL and connect_args from DATABASE_* env vars.""" host = os.environ[constants.DATABASE_HOST] user = quote_plus(os.environ[constants.DATABASE_USER]) passwd = quote_plus(os.environ[constants.DATABASE_PASSWORD]) diff --git a/syncbot/db/schemas.py b/syncbot/db/schemas.py index a044a9b..2f2f29c 100644 --- a/syncbot/db/schemas.py +++ b/syncbot/db/schemas.py @@ -19,13 +19,11 @@ from typing import Any -import sqlalchemy from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import relationship +from sqlalchemy.orm import declarative_base, relationship from sqlalchemy.types import DECIMAL -BaseClass = declarative_base(mapper=sqlalchemy.orm.mapper) +BaseClass = declarative_base() class GetDBClass: diff --git a/syncbot/requirements.txt b/syncbot/requirements.txt index 5d3dded..d3631ee 100644 --- a/syncbot/requirements.txt +++ b/syncbot/requirements.txt @@ -1,14 +1,14 @@ -certifi==2026.1.4 ; python_version >= "3.11" and python_version < "4.0" -cffi==2.0.0 ; python_version >= "3.11" and python_version < "4.0" -charset-normalizer==3.4.4 ; python_version >= "3.11" and python_version < "4.0" -cryptography==46.0.5 ; python_version >= "3.11" and python_version < "4.0" -greenlet==3.1.1 ; python_version >= "3.11" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") and python_version < "4.0" -idna==3.11 ; python_version >= "3.11" and python_version < "4.0" -pycparser==2.23 ; python_version >= "3.11" and python_version < "4.0" -pymysql==1.1.2 ; python_version >= "3.11" and python_version < "4.0" -python-dotenv==1.2.1 ; python_version >= "3.11" and python_version < "4.0" -requests==2.32.5 ; python_version >= "3.11" and python_version < "4.0" -slack-bolt==1.27.0 ; python_version >= "3.11" and python_version < "4.0" -slack-sdk==3.40.0 ; python_version >= "3.11" and python_version < "4.0" -sqlalchemy==1.4.54 ; python_version >= "3.11" and python_version < "4.0" -urllib3==2.6.3 ; python_version >= "3.11" and python_version < "4.0" +certifi==2026.1.4 ; python_version >= "3.12" and python_version < "4.0" +cffi==2.0.0 ; python_version >= "3.12" and python_version < "4.0" +charset-normalizer==3.4.4 ; python_version >= "3.12" and python_version < "4.0" +cryptography==46.0.5 ; python_version >= "3.12" and python_version < "4.0" +greenlet==3.1.1 ; python_version >= "3.12" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") and python_version < "4.0" +idna==3.11 ; python_version >= "3.12" and python_version < "4.0" +pycparser==2.23 ; python_version >= "3.12" and python_version < "4.0" +pymysql==1.1.2 ; python_version >= "3.12" and python_version < "4.0" +python-dotenv==1.2.1 ; python_version >= "3.12" and python_version < "4.0" +requests==2.32.5 ; python_version >= "3.12" and python_version < "4.0" +slack-bolt==1.27.0 ; python_version >= "3.12" and python_version < "4.0" +slack-sdk==3.40.0 ; python_version >= "3.12" and python_version < "4.0" +sqlalchemy==2.0.38 ; python_version >= "3.12" and python_version < "4.0" +urllib3==2.6.3 ; python_version >= "3.12" and python_version < "4.0" diff --git a/tests/test_db.py b/tests/test_db.py index b96a25a..db95a99 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -180,6 +180,8 @@ def test_get_required_db_vars_mysql_without_url(self): required = get_required_db_vars() assert "DATABASE_HOST" in required assert "DATABASE_USER" in required + assert "DATABASE_PASSWORD" in required + assert "DATABASE_SCHEMA" in required def test_get_required_db_vars_sqlite(self): with patch.dict(os.environ, {"DATABASE_BACKEND": "sqlite"}, clear=False): From e43013b9b1aee43de1e276cc7d34a1a73572009c Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 18 Mar 2026 09:15:30 -0500 Subject: [PATCH 17/45] Improvements to SAM config. --- docs/DEPLOYMENT.md | 22 ++++++++++++++ samconfig.toml | 71 +++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 86 insertions(+), 7 deletions(-) diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 0745908..c4091ae 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -181,6 +181,28 @@ If using GitHub Actions, set optional secret `TOKEN_ENCRYPTION_KEY_OVERRIDE`; th Or use a dedicated IAM user with the same policy. See [Deployment Guide (legacy detail)](#sharing-infrastructure-across-apps-aws) for shared RDS and parameter overrides. +#### Optional: `samconfig` deploy profiles + +This repo includes pre-defined SAM config environments in `samconfig.toml` to reduce guided prompts: + +- `test-new-rds` — test stack, creates new RDS +- `test-existing-rds` — test stack, uses existing RDS host/admin credentials +- `prod-new-rds` — prod stack, creates new RDS +- `prod-existing-rds` — prod stack, uses existing RDS host/admin credentials + +Examples: + +```bash +sam build --config-env test-new-rds +sam deploy --config-env test-new-rds + +sam deploy --config-env test-existing-rds +sam deploy --config-env prod-existing-rds +``` + +For the `*-existing-rds` profiles, replace `REPLACE_ME_*` placeholders in `samconfig.toml` before deploy. +For disaster recovery (preserve token decryption), add `TokenEncryptionKeyOverride=` to that profile's `parameter_overrides`. + --- ## GCP diff --git a/samconfig.toml b/samconfig.toml index dff16b5..4a571de 100644 --- a/samconfig.toml +++ b/samconfig.toml @@ -1,8 +1,22 @@ -# SAM CLI configuration for local deployment -# Usage: -# sam build --use-container -# sam deploy # uses [default.deploy.parameters] -# sam deploy --config-env prod # uses [prod.deploy.parameters] +# SAM CLI configuration for local deployment. +# +# Usage examples: +# sam build --config-env test-new-rds +# sam deploy --config-env test-new-rds +# +# sam build --config-env test-existing-rds +# sam deploy --config-env test-existing-rds +# +# sam deploy --config-env prod-new-rds +# sam deploy --config-env prod-existing-rds +# +# NOTE: +# - Fill in placeholders (DB host/admin user) before using *-existing-rds configs. +# - Secrets like SlackSigningSecret, SlackClientSecret, and DatabasePassword are +# expected to be provided securely during deploy workflows. +# - For disaster recovery with existing encrypted tokens, add: +# TokenEncryptionKeyOverride= +# to the relevant environment's parameter_overrides. version = 0.1 @@ -19,11 +33,54 @@ capabilities = "CAPABILITY_IAM" confirm_changeset = true parameter_overrides = "Stage=test" -[prod.deploy.parameters] +[test-new-rds.build.parameters] +template_file = "infra/aws/template.yaml" +use_container = true + +[test-new-rds.deploy.parameters] +stack_name = "syncbot-test" +resolve_s3 = true +s3_prefix = "syncbot-test" +region = "us-east-2" +capabilities = "CAPABILITY_IAM" +confirm_changeset = true +parameter_overrides = "Stage=test ExistingDatabaseHost= ExistingDatabaseAdminUser= ExistingDatabaseAdminPassword=" + +[test-existing-rds.build.parameters] +template_file = "infra/aws/template.yaml" +use_container = true + +[test-existing-rds.deploy.parameters] +stack_name = "syncbot-test" +resolve_s3 = true +s3_prefix = "syncbot-test" +region = "us-east-2" +capabilities = "CAPABILITY_IAM" +confirm_changeset = true +parameter_overrides = "Stage=test ExistingDatabaseHost=REPLACE_ME_RDS_HOST ExistingDatabaseAdminUser=REPLACE_ME_ADMIN_USER ExistingDatabaseAdminPassword=REPLACE_ME_ADMIN_PASSWORD" + +[prod-new-rds.build.parameters] +template_file = "infra/aws/template.yaml" +use_container = true + +[prod-new-rds.deploy.parameters] +stack_name = "syncbot-prod" +resolve_s3 = true +s3_prefix = "syncbot-prod" +region = "us-east-2" +capabilities = "CAPABILITY_IAM" +confirm_changeset = true +parameter_overrides = "Stage=prod ExistingDatabaseHost= ExistingDatabaseAdminUser= ExistingDatabaseAdminPassword=" + +[prod-existing-rds.build.parameters] +template_file = "infra/aws/template.yaml" +use_container = true + +[prod-existing-rds.deploy.parameters] stack_name = "syncbot-prod" resolve_s3 = true s3_prefix = "syncbot-prod" region = "us-east-2" capabilities = "CAPABILITY_IAM" confirm_changeset = true -parameter_overrides = "Stage=prod" +parameter_overrides = "Stage=prod ExistingDatabaseHost=REPLACE_ME_RDS_HOST ExistingDatabaseAdminUser=REPLACE_ME_ADMIN_USER ExistingDatabaseAdminPassword=REPLACE_ME_ADMIN_PASSWORD" From 516bca944fade1a142b9be7e6a652feb0e7bd6a2 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Fri, 20 Mar 2026 16:18:37 -0500 Subject: [PATCH 18/45] Added deploy script. Added user scopes to Slack app. --- .env.example | 2 +- docs/DEPLOYMENT.md | 42 +++- infra/aws/scripts/deploy.sh | 411 ++++++++++++++++++++++++++++++++++++ infra/aws/template.yaml | 72 ++++++- samconfig.toml | 12 ++ slack-manifest.yaml | 18 ++ 6 files changed, 546 insertions(+), 11 deletions(-) create mode 100755 infra/aws/scripts/deploy.sh diff --git a/.env.example b/.env.example index 2a1498a..686979a 100644 --- a/.env.example +++ b/.env.example @@ -43,7 +43,7 @@ DATABASE_SCHEMA=syncbot # SLACK_SIGNING_SECRET=your-signing-secret # ENV_SLACK_CLIENT_ID=your-client-id # ENV_SLACK_CLIENT_SECRET=your-client-secret -# ENV_SLACK_SCOPES=app_mentions:read,channels:history,channels:join,channels:manage,channels:read,chat:write,chat:write.customize,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email +# ENV_SLACK_SCOPES=app_mentions:read,channels:history,channels:join,channels:manage,channels:read,chat:write,chat:write:user,chat:write.customize,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email # OAuth state and installation data are stored in the same database (MySQL or SQLite). # ----------------------------------------------------------------------------- diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index c4091ae..fc56e2f 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -79,6 +79,26 @@ You need: **GitHubDeployRoleArn** → `AWS_ROLE_TO_ASSUME`, **DeploymentBucketNa --- +### Fast path: interactive AWS deploy script + +For local, end-to-end deploys (bootstrap + build + deploy), use: + +```bash +./infra/aws/scripts/deploy.sh +``` + +The script: +- prompts for stage (`test`/`prod`) and DB mode (new RDS vs existing host), +- prompts for required secrets/credentials, +- auto-detects bootstrap outputs (region, deploy bucket, suggested stack names) when available, +- supports existing-RDS `public` or `private` network mode (with VPC subnet/security-group prompts for private mode), +- supports disaster recovery with `TokenEncryptionKeyOverride`, +- runs `sam build` and `sam deploy` for you. + +If bootstrap is missing, it can deploy bootstrap first. + +--- + ### Fork and Deploy (AWS, GitHub CI) 1. Complete [One-Time Bootstrap (AWS)](#one-time-bootstrap-aws-both-paths). @@ -122,21 +142,22 @@ To **reuse only the DB host** and have the deploy create the schema and a dedica - **Schema name:** A dedicated schema per app or environment (e.g. `syncbot_test`, `syncbot_prod`). The deploy creates this schema and the app user with full access to it; the app runs Alembic migrations on startup. 3. **Connectivity:** - When using an existing host, Lambda is **not** put in a VPC. It can only reach **publicly accessible** endpoints. Your RDS must be: - - Set to **publicly accessible** (in RDS settings), and - - Protected by a security group that allows **inbound TCP 3306** from the internet (or restrict to known IPs). - For production, consider a VPC-enabled Lambda and private RDS; that would require template changes. + Existing-host deploys support two modes: + - **public** (default): Lambda is not VPC-attached; existing RDS must be publicly reachable on 3306. + - **private**: Lambda is VPC-attached using `ExistingDatabaseSubnetIdsCsv` and `ExistingDatabaseLambdaSecurityGroupId`. + + For private mode, ensure: + - the Lambda security group can reach the DB on TCP 3306, and + - the app Lambda has outbound internet egress (typically NAT) so Slack API calls succeed. 4. **First deploy (local `sam deploy`):** - Pass the **existing-host** parameters (admin user/password). When using **guided** mode, SAM will still prompt for **DatabaseUser** and **DatabasePassword**; the stack ignores these when using an existing host (app user/password are auto-generated). If the **DatabasePassword** prompt repeats in a loop (SAM often rejects empty for password fields), type any placeholder (e.g. `ignored`) and continue — it is never used. To avoid interactive prompts, use **parameter-overrides** and set `DatabaseUser=` and `DatabasePassword=ignored` (or any value) for existing-host deploys: + Pass the **existing-host** parameters (admin user/password). When using **guided** mode, SAM may still prompt for **DatabaseUser** and **DatabasePassword**; the stack ignores these when using an existing host (app user/password are auto-generated). If a prompt repeats, provide any placeholder and continue. For non-guided deploys, pass only the existing-host parameters you actually use: ```bash sam deploy --guided ... \ --parameter-overrides \ ExistingDatabaseHost=your-db.xxxx.us-east-2.rds.amazonaws.com \ ExistingDatabaseAdminUser=admin \ ExistingDatabaseAdminPassword=your_master_password \ - DatabaseUser= \ - DatabasePassword=ignored \ DatabaseSchema=syncbot_test \ SlackClientID=your_slack_app_client_id \ SlackSigningSecret=... \ @@ -161,6 +182,12 @@ sam deploy ... --parameter-overrides "... TokenEncryptionKeyOverride=" If using GitHub Actions, set optional secret `TOKEN_ENCRYPTION_KEY_OVERRIDE`; the AWS workflow will pass it automatically. +If a previous failed deploy already created `syncbot--token-encryption-key`, you can also reuse that secret directly (instead of creating a new one) by passing: + +```bash +sam deploy ... --parameter-overrides "... ExistingTokenEncryptionKeySecretArn=" +``` + --- ### Download and Deploy (AWS, local) @@ -275,6 +302,7 @@ No changes are needed under `syncbot/` or to the deployment contract; only `infr | Provider | Script | Use | |----------|--------|-----| | AWS | `./infra/aws/scripts/print-bootstrap-outputs.sh` | Print bootstrap stack outputs and suggested GitHub variables (run from repo root). | +| AWS | `./infra/aws/scripts/deploy.sh` | Interactive local deploy helper (optional bootstrap, build, deploy, existing/new RDS prompts). | | GCP | `./infra/gcp/scripts/print-bootstrap-outputs.sh` | Print Terraform outputs and suggested GitHub variables (run from repo root). | --- diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh new file mode 100755 index 0000000..a1dcfa5 --- /dev/null +++ b/infra/aws/scripts/deploy.sh @@ -0,0 +1,411 @@ +#!/usr/bin/env bash +# Interactive AWS deploy helper for SyncBot. +# Handles: bootstrap (optional), sam build, sam deploy (new RDS or existing RDS). +# +# Run from repo root: +# ./infra/aws/scripts/deploy.sh + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" + +BOOTSTRAP_TEMPLATE="$REPO_ROOT/infra/aws/template.bootstrap.yaml" +APP_TEMPLATE="$REPO_ROOT/infra/aws/template.yaml" + +require_cmd() { + if ! command -v "$1" >/dev/null 2>&1; then + echo "Error: required command '$1' not found in PATH." >&2 + exit 1 + fi +} + +prompt_default() { + local prompt="$1" + local default="$2" + local value + read -r -p "$prompt [$default]: " value + if [[ -z "$value" ]]; then + value="$default" + fi + echo "$value" +} + +prompt_secret() { + local prompt="$1" + local value + read -r -s -p "$prompt: " value + # Keep the visual newline on the terminal even when called via $(...). + printf '\n' >&2 + echo "$value" +} + +prompt_yes_no() { + local prompt="$1" + local default="${2:-y}" + local answer + local shown="y/N" + [[ "$default" == "y" ]] && shown="Y/n" + read -r -p "$prompt [$shown]: " answer + if [[ -z "$answer" ]]; then + answer="$default" + fi + [[ "$answer" =~ ^[Yy]$ ]] +} + +bootstrap_describe_outputs() { + local stack_name="$1" + local region="$2" + aws cloudformation describe-stacks \ + --stack-name "$stack_name" \ + --query 'Stacks[0].Outputs[*].[OutputKey,OutputValue]' \ + --output text \ + --region "$region" 2>/dev/null || true +} + +output_value() { + local outputs="$1" + local key="$2" + echo "$outputs" | awk -F'\t' -v k="$key" '$1==k {print $2}' +} + +secret_arn_by_name() { + local secret_name="$1" + local region="$2" + aws secretsmanager describe-secret \ + --secret-id "$secret_name" \ + --region "$region" \ + --query 'ARN' \ + --output text 2>/dev/null || true +} + +rds_lookup_network_defaults() { + local db_host="$1" + local region="$2" + aws rds describe-db-instances \ + --region "$region" \ + --query "DBInstances[?Endpoint.Address=='$db_host']|[0].[PubliclyAccessible,join(',',DBSubnetGroup.Subnets[].SubnetIdentifier),join(',',VpcSecurityGroups[].VpcSecurityGroupId),DBSubnetGroup.VpcId,DBInstanceIdentifier]" \ + --output text 2>/dev/null || true +} + +stack_status() { + local stack_name="$1" + local region="$2" + aws cloudformation describe-stacks \ + --stack-name "$stack_name" \ + --region "$region" \ + --query 'Stacks[0].StackStatus' \ + --output text 2>/dev/null || true +} + +print_recent_stack_failures() { + local stack_name="$1" + local region="$2" + echo "Recent failure events for $stack_name:" + aws cloudformation describe-stack-events \ + --stack-name "$stack_name" \ + --region "$region" \ + --query "StackEvents[?contains(ResourceStatus, 'FAILED')].[Timestamp,LogicalResourceId,ResourceStatus,ResourceStatusReason]" \ + --output table 2>/dev/null || true +} + +handle_unhealthy_stack_state() { + local stack_name="$1" + local region="$2" + local status + status="$(stack_status "$stack_name" "$region")" + if [[ -z "$status" || "$status" == "None" ]]; then + return 0 + fi + + case "$status" in + CREATE_FAILED|ROLLBACK_COMPLETE|ROLLBACK_FAILED|UPDATE_ROLLBACK_FAILED|DELETE_FAILED) + echo + echo "Stack $stack_name is in a failed state: $status" + print_recent_stack_failures "$stack_name" "$region" + echo + if prompt_yes_no "Delete failed stack '$stack_name' now so deploy can continue?" "y"; then + aws cloudformation delete-stack --stack-name "$stack_name" --region "$region" + echo "Waiting for stack deletion to complete..." + aws cloudformation wait stack-delete-complete --stack-name "$stack_name" --region "$region" + else + echo "Cannot continue deploy while stack is in $status." + exit 1 + fi + ;; + *_IN_PROGRESS) + echo "Error: stack $stack_name is currently $status. Wait for it to finish, then rerun." >&2 + exit 1 + ;; + *) + ;; + esac +} + +require_cmd aws +require_cmd sam + +if [[ ! -f "$APP_TEMPLATE" ]]; then + echo "Error: app template not found at $APP_TEMPLATE" >&2 + exit 1 +fi +if [[ ! -f "$BOOTSTRAP_TEMPLATE" ]]; then + echo "Error: bootstrap template not found at $BOOTSTRAP_TEMPLATE" >&2 + exit 1 +fi + +echo "=== SyncBot AWS Deploy Helper ===" +echo + +DEFAULT_REGION="${AWS_REGION:-us-east-2}" +REGION="$(prompt_default "AWS region" "$DEFAULT_REGION")" +BOOTSTRAP_STACK="$(prompt_default "Bootstrap stack name" "syncbot-bootstrap")" + +BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" +if [[ -z "$BOOTSTRAP_OUTPUTS" ]]; then + echo + echo "Bootstrap stack not found (or has no outputs): $BOOTSTRAP_STACK in $REGION" + if prompt_yes_no "Deploy bootstrap stack now?" "y"; then + GITHUB_REPO="$(prompt_default "GitHub repository (owner/repo)" "REPLACE_ME_OWNER/REPLACE_ME_REPO")" + CREATE_OIDC="$(prompt_default "Create OIDC provider (true/false)" "true")" + BUCKET_PREFIX="$(prompt_default "Deployment bucket prefix" "syncbot-deploy")" + echo + echo "Deploying bootstrap stack..." + aws cloudformation deploy \ + --template-file "$BOOTSTRAP_TEMPLATE" \ + --stack-name "$BOOTSTRAP_STACK" \ + --parameter-overrides \ + "GitHubRepository=$GITHUB_REPO" \ + "CreateOIDCProvider=$CREATE_OIDC" \ + "DeploymentBucketPrefix=$BUCKET_PREFIX" \ + --capabilities CAPABILITY_NAMED_IAM \ + --region "$REGION" + BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" + else + echo "Skipping bootstrap. You must provide deploy bucket manually." + fi +fi + +S3_BUCKET="$(output_value "$BOOTSTRAP_OUTPUTS" "DeploymentBucketName")" +if [[ -n "$S3_BUCKET" ]]; then + echo "Detected deploy bucket from bootstrap: $S3_BUCKET" +else + S3_BUCKET="$(prompt_default "Deployment S3 bucket name" "REPLACE_ME_DEPLOY_BUCKET")" +fi + +SUGGESTED_TEST_STACK="$(output_value "$BOOTSTRAP_OUTPUTS" "SuggestedTestStackName")" +SUGGESTED_PROD_STACK="$(output_value "$BOOTSTRAP_OUTPUTS" "SuggestedProdStackName")" +[[ -z "$SUGGESTED_TEST_STACK" ]] && SUGGESTED_TEST_STACK="syncbot-test" +[[ -z "$SUGGESTED_PROD_STACK" ]] && SUGGESTED_PROD_STACK="syncbot-prod" + +echo +STAGE="$(prompt_default "Deploy stage (test/prod)" "test")" +if [[ "$STAGE" != "test" && "$STAGE" != "prod" ]]; then + echo "Error: stage must be 'test' or 'prod'." >&2 + exit 1 +fi + +DEFAULT_STACK="$SUGGESTED_TEST_STACK" +[[ "$STAGE" == "prod" ]] && DEFAULT_STACK="$SUGGESTED_PROD_STACK" +STACK_NAME="$(prompt_default "App stack name" "$DEFAULT_STACK")" + +echo +echo "Database mode:" +echo " 1) Create new RDS in stack" +echo " 2) Use existing RDS host (deploy creates schema/app user)" +DB_MODE="$(prompt_default "Choose 1 or 2" "1")" +if [[ "$DB_MODE" != "1" && "$DB_MODE" != "2" ]]; then + echo "Error: invalid database mode." >&2 + exit 1 +fi + +echo +SLACK_SIGNING_SECRET="$(prompt_secret "SlackSigningSecret")" +SLACK_CLIENT_SECRET="$(prompt_secret "SlackClientSecret")" +SLACK_CLIENT_ID="$(prompt_default "SlackClientID (optional; blank uses template stage default)" "")" + +EXISTING_DATABASE_HOST="" +EXISTING_DATABASE_ADMIN_USER="" +EXISTING_DATABASE_ADMIN_PASSWORD="" +EXISTING_DATABASE_NETWORK_MODE="public" +EXISTING_DATABASE_SUBNET_IDS_CSV="" +EXISTING_DATABASE_LAMBDA_SG_ID="" +DATABASE_USER="" +DATABASE_PASSWORD="" +DATABASE_SCHEMA="" + +if [[ "$DB_MODE" == "2" ]]; then + EXISTING_DATABASE_HOST="$(prompt_default "ExistingDatabaseHost (RDS endpoint hostname)" "REPLACE_ME_RDS_HOST")" + EXISTING_DATABASE_ADMIN_USER="$(prompt_default "ExistingDatabaseAdminUser" "admin")" + EXISTING_DATABASE_ADMIN_PASSWORD="$(prompt_secret "ExistingDatabaseAdminPassword")" + DATABASE_SCHEMA="$(prompt_default "DatabaseSchema" "syncbot_${STAGE}")" + + RDS_LOOKUP="$(rds_lookup_network_defaults "$EXISTING_DATABASE_HOST" "$REGION")" + DETECTED_PUBLIC="" + DETECTED_SUBNETS="" + DETECTED_SGS="" + DETECTED_VPC="" + DETECTED_DB_ID="" + if [[ -n "$RDS_LOOKUP" && "$RDS_LOOKUP" != "None" ]]; then + IFS=$'\t' read -r DETECTED_PUBLIC DETECTED_SUBNETS DETECTED_SGS DETECTED_VPC DETECTED_DB_ID <<< "$RDS_LOOKUP" + [[ "$DETECTED_PUBLIC" == "None" ]] && DETECTED_PUBLIC="" + [[ "$DETECTED_SUBNETS" == "None" ]] && DETECTED_SUBNETS="" + [[ "$DETECTED_SGS" == "None" ]] && DETECTED_SGS="" + [[ "$DETECTED_VPC" == "None" ]] && DETECTED_VPC="" + [[ "$DETECTED_DB_ID" == "None" ]] && DETECTED_DB_ID="" + echo + echo "Detected RDS instance details:" + [[ -n "$DETECTED_DB_ID" ]] && echo " DB instance: $DETECTED_DB_ID" + [[ -n "$DETECTED_VPC" ]] && echo " VPC: $DETECTED_VPC" + [[ -n "$DETECTED_PUBLIC" ]] && echo " Public access: $DETECTED_PUBLIC" + else + echo + echo "Could not auto-detect existing RDS network settings from host." + echo "You can still continue by entering network values manually." + fi + + DEFAULT_EXISTING_DB_NETWORK_MODE="public" + if [[ "$DETECTED_PUBLIC" == "False" ]]; then + DEFAULT_EXISTING_DB_NETWORK_MODE="private" + fi + EXISTING_DATABASE_NETWORK_MODE="$(prompt_default "Existing DB network mode (public/private)" "$DEFAULT_EXISTING_DB_NETWORK_MODE")" + if [[ "$EXISTING_DATABASE_NETWORK_MODE" != "public" && "$EXISTING_DATABASE_NETWORK_MODE" != "private" ]]; then + echo "Error: existing DB network mode must be 'public' or 'private'." >&2 + exit 1 + fi + + if [[ "$EXISTING_DATABASE_NETWORK_MODE" == "private" ]]; then + DEFAULT_SUBNETS="$DETECTED_SUBNETS" + [[ -z "$DEFAULT_SUBNETS" ]] && DEFAULT_SUBNETS="REPLACE_ME_SUBNET_1,REPLACE_ME_SUBNET_2" + DEFAULT_SG="${DETECTED_SGS%%,*}" + [[ -z "$DEFAULT_SG" ]] && DEFAULT_SG="REPLACE_ME_LAMBDA_SG_ID" + + echo + echo "Private DB mode selected: Lambdas will run in VPC." + echo "Note: app Lambda needs internet egress (usually NAT) to call Slack APIs." + EXISTING_DATABASE_SUBNET_IDS_CSV="$(prompt_default "ExistingDatabaseSubnetIdsCsv (comma-separated)" "$DEFAULT_SUBNETS")" + EXISTING_DATABASE_LAMBDA_SG_ID="$(prompt_default "ExistingDatabaseLambdaSecurityGroupId" "$DEFAULT_SG")" + + if [[ -z "$EXISTING_DATABASE_SUBNET_IDS_CSV" || "$EXISTING_DATABASE_SUBNET_IDS_CSV" == REPLACE_ME* ]]; then + echo "Error: valid ExistingDatabaseSubnetIdsCsv is required for private mode." >&2 + exit 1 + fi + if [[ -z "$EXISTING_DATABASE_LAMBDA_SG_ID" || "$EXISTING_DATABASE_LAMBDA_SG_ID" == REPLACE_ME* ]]; then + echo "Error: valid ExistingDatabaseLambdaSecurityGroupId is required for private mode." >&2 + exit 1 + fi + fi +else + DATABASE_USER="$(prompt_default "DatabaseUser (new RDS master username)" "syncbot_admin")" + DATABASE_PASSWORD="$(prompt_secret "DatabasePassword (new RDS master password)")" + DATABASE_SCHEMA="$(prompt_default "DatabaseSchema" "syncbot_${STAGE}")" +fi + +TOKEN_OVERRIDE="$(prompt_default "TokenEncryptionKeyOverride (optional DR key; leave blank for normal deploy)" "")" +EXISTING_TOKEN_SECRET_ARN="" +TOKEN_SECRET_NAME="syncbot-${STAGE}-token-encryption-key" +if [[ -z "$TOKEN_OVERRIDE" ]]; then + DETECTED_TOKEN_SECRET_ARN="$(secret_arn_by_name "$TOKEN_SECRET_NAME" "$REGION")" + if [[ -n "$DETECTED_TOKEN_SECRET_ARN" && "$DETECTED_TOKEN_SECRET_ARN" != "None" ]]; then + echo "Detected existing token secret: $TOKEN_SECRET_NAME" + if prompt_yes_no "Reuse this existing token secret ARN to avoid name-collision failures?" "y"; then + EXISTING_TOKEN_SECRET_ARN="$DETECTED_TOKEN_SECRET_ARN" + fi + fi +fi + +echo +echo "=== Deploy Summary ===" +echo "Region: $REGION" +echo "Stack: $STACK_NAME" +echo "Stage: $STAGE" +echo "Deploy bucket: $S3_BUCKET" +if [[ "$DB_MODE" == "2" ]]; then + echo "DB mode: existing host" + echo "DB host: $EXISTING_DATABASE_HOST" + echo "DB network: $EXISTING_DATABASE_NETWORK_MODE" + if [[ "$EXISTING_DATABASE_NETWORK_MODE" == "private" ]]; then + echo "DB subnets: $EXISTING_DATABASE_SUBNET_IDS_CSV" + echo "Lambda SG: $EXISTING_DATABASE_LAMBDA_SG_ID" + fi + echo "DB schema: $DATABASE_SCHEMA" +else + echo "DB mode: create new RDS" + echo "DB user: $DATABASE_USER" + echo "DB schema: $DATABASE_SCHEMA" +fi +if [[ -n "$TOKEN_OVERRIDE" ]]; then + echo "DR key override: YES (TokenEncryptionKeyOverride)" +else + echo "DR key override: NO (auto-generated TOKEN_ENCRYPTION_KEY)" + if [[ -n "$EXISTING_TOKEN_SECRET_ARN" ]]; then + echo "Token secret: Reusing existing secret ARN" + fi +fi +echo + +if ! prompt_yes_no "Proceed with build + deploy?" "y"; then + echo "Aborted." + exit 0 +fi + +handle_unhealthy_stack_state "$STACK_NAME" "$REGION" + +echo +echo "Building app..." +sam build -t "$APP_TEMPLATE" --use-container + +PARAMS=( + "Stage=$STAGE" + "SlackSigningSecret=$SLACK_SIGNING_SECRET" + "SlackClientSecret=$SLACK_CLIENT_SECRET" + "DatabaseSchema=$DATABASE_SCHEMA" +) + +if [[ -n "$SLACK_CLIENT_ID" ]]; then + PARAMS+=("SlackClientID=$SLACK_CLIENT_ID") +fi + +if [[ "$DB_MODE" == "2" ]]; then + PARAMS+=( + "ExistingDatabaseHost=$EXISTING_DATABASE_HOST" + "ExistingDatabaseAdminUser=$EXISTING_DATABASE_ADMIN_USER" + "ExistingDatabaseAdminPassword=$EXISTING_DATABASE_ADMIN_PASSWORD" + "ExistingDatabaseNetworkMode=$EXISTING_DATABASE_NETWORK_MODE" + ) + if [[ "$EXISTING_DATABASE_NETWORK_MODE" == "private" ]]; then + PARAMS+=( + "ExistingDatabaseSubnetIdsCsv=$EXISTING_DATABASE_SUBNET_IDS_CSV" + "ExistingDatabaseLambdaSecurityGroupId=$EXISTING_DATABASE_LAMBDA_SG_ID" + ) + fi +else + PARAMS+=( + "DatabaseUser=$DATABASE_USER" + "DatabasePassword=$DATABASE_PASSWORD" + ) +fi + +if [[ -n "$TOKEN_OVERRIDE" ]]; then + PARAMS+=("TokenEncryptionKeyOverride=$TOKEN_OVERRIDE") +fi +if [[ -n "$EXISTING_TOKEN_SECRET_ARN" ]]; then + PARAMS+=("ExistingTokenEncryptionKeySecretArn=$EXISTING_TOKEN_SECRET_ARN") +fi + +echo "Deploying stack..." +sam deploy \ + -t .aws-sam/build/template.yaml \ + --stack-name "$STACK_NAME" \ + --s3-bucket "$S3_BUCKET" \ + --capabilities CAPABILITY_IAM \ + --region "$REGION" \ + --no-fail-on-empty-changeset \ + --parameter-overrides "${PARAMS[@]}" + +echo +echo "Deploy complete." +echo "IMPORTANT: back up TOKEN_ENCRYPTION_KEY from Secrets Manager." +echo "Expected secret name: syncbot-${STAGE}-token-encryption-key" +echo "Example read command:" +echo " aws secretsmanager get-secret-value --secret-id syncbot-${STAGE}-token-encryption-key --query SecretString --output text --region $REGION" diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index 95b58ef..b6c343a 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -57,7 +57,7 @@ Parameters: SlackOauthScopes: Description: Comma-separated list of Slack OAuth scopes Type: String - Default: "app_mentions:read,channels:history,channels:join,chat:write,chat:write.customize,commands,files:read,files:write,team:read,users:read,channels:manage,users:read.email,reactions:read,reactions:write" + Default: "app_mentions:read,channels:history,channels:join,chat:write,chat:write:user,chat:write.customize,commands,files:read,files:write,team:read,users:read,channels:manage,users:read.email,reactions:read,reactions:write" # --- Database (RDS) --- @@ -82,6 +82,33 @@ Parameters: NoEcho: true Default: "" + ExistingDatabaseNetworkMode: + Description: > + Network mode for existing database host. Use "public" when the existing RDS + endpoint is reachable from the public internet. Use "private" when it is only + reachable from within a VPC. + Type: String + Default: public + AllowedValues: + - public + - private + + ExistingDatabaseSubnetIdsCsv: + Description: > + Comma-separated subnet IDs for Lambda VPC attachment when using an existing + private database host (for example "subnet-aaa,subnet-bbb"). + Ignored unless ExistingDatabaseHost is set and ExistingDatabaseNetworkMode=private. + Type: String + Default: "" + + ExistingDatabaseLambdaSecurityGroupId: + Description: > + Security group ID for Lambda VPC attachment when using an existing private + database host. This security group must be allowed to connect to the DB on port 3306. + Ignored unless ExistingDatabaseHost is set and ExistingDatabaseNetworkMode=private. + Type: String + Default: "" + DatabaseUser: Description: > Database username for new RDS only. When using an existing database @@ -136,6 +163,14 @@ Parameters: NoEcho: true Default: "" + ExistingTokenEncryptionKeySecretArn: + Description: > + Optional existing Secrets Manager secret ARN containing TOKEN_ENCRYPTION_KEY. + Use this when the secret already exists (for example after a failed create/delete cycle) + to avoid secret name collisions. + Type: String + Default: "" + RequireAdmin: Description: > When "true" (default), only workspace admins and owners can @@ -153,7 +188,14 @@ Parameters: Conditions: CreateDatabase: !Equals [!Ref ExistingDatabaseHost, ""] UseExistingDatabase: !Not [!Equals [!Ref ExistingDatabaseHost, ""]] + UseExistingDatabasePrivateVpc: !And + - !Condition UseExistingDatabase + - !Equals [!Ref ExistingDatabaseNetworkMode, "private"] HasTokenEncryptionKeyOverride: !Not [!Equals [!Ref TokenEncryptionKeyOverride, ""]] + HasExistingTokenEncryptionKeySecretArn: !Not [!Equals [!Ref ExistingTokenEncryptionKeySecretArn, ""]] + CreateTokenEncryptionKeySecret: !And + - !Not [!Condition HasTokenEncryptionKeyOverride] + - !Not [!Condition HasExistingTokenEncryptionKeySecretArn] HasSlackClientID: !Not [!Equals [!Ref SlackClientID, ""]] Mappings: @@ -328,6 +370,7 @@ Resources: TokenEncryptionKeySecret: Type: AWS::SecretsManager::Secret + Condition: CreateTokenEncryptionKeySecret DeletionPolicy: Retain UpdateReplacePolicy: Retain Properties: @@ -364,11 +407,18 @@ Resources: Timeout: 60 MemorySize: 256 Policies: + - AWSLambdaVPCAccessExecutionRole - Version: "2012-10-17" Statement: - Effect: Allow Action: secretsmanager:GetSecretValue Resource: !Ref AppDbCredentialsSecret + VpcConfig: !If + - UseExistingDatabasePrivateVpc + - SubnetIds: !Split [",", !Ref ExistingDatabaseSubnetIdsCsv] + SecurityGroupIds: + - !Ref ExistingDatabaseLambdaSecurityGroupId + - !Ref AWS::NoValue AppDbSetup: Type: Custom::ExistingRDSSetup @@ -397,6 +447,14 @@ Resources: - x86_64 Timeout: 30 MemorySize: 128 + Policies: + - AWSLambdaVPCAccessExecutionRole + VpcConfig: !If + - UseExistingDatabasePrivateVpc + - SubnetIds: !Split [",", !Ref ExistingDatabaseSubnetIdsCsv] + SecurityGroupIds: + - !Ref ExistingDatabaseLambdaSecurityGroupId + - !Ref AWS::NoValue Events: SyncBot: Type: Api @@ -452,7 +510,12 @@ Resources: TOKEN_ENCRYPTION_KEY: !If - HasTokenEncryptionKeyOverride - !Ref TokenEncryptionKeyOverride - - !Sub "{{resolve:secretsmanager:${TokenEncryptionKeySecret}:SecretString}}" + - !If + - HasExistingTokenEncryptionKeySecretArn + - !Sub + - "{{resolve:secretsmanager:${SecretArn}:SecretString}}" + - { SecretArn: !Ref ExistingTokenEncryptionKeySecretArn } + - !Sub "{{resolve:secretsmanager:${TokenEncryptionKeySecret}:SecretString}}" REQUIRE_ADMIN: !Ref RequireAdmin # ============================================================ @@ -574,4 +637,7 @@ Outputs: TokenEncryptionSecretArn: Description: Secrets Manager ARN containing TOKEN_ENCRYPTION_KEY - Value: !Ref TokenEncryptionKeySecret + Value: !If + - HasExistingTokenEncryptionKeySecretArn + - !Ref ExistingTokenEncryptionKeySecretArn + - !Ref TokenEncryptionKeySecret diff --git a/samconfig.toml b/samconfig.toml index 4a571de..0990350 100644 --- a/samconfig.toml +++ b/samconfig.toml @@ -84,3 +84,15 @@ region = "us-east-2" capabilities = "CAPABILITY_IAM" confirm_changeset = true parameter_overrides = "Stage=prod ExistingDatabaseHost=REPLACE_ME_RDS_HOST ExistingDatabaseAdminUser=REPLACE_ME_ADMIN_USER ExistingDatabaseAdminPassword=REPLACE_ME_ADMIN_PASSWORD" + +[test.deploy.parameters] +stack_name = "syncbot-test" +resolve_s3 = true +s3_prefix = "syncbot-test" +confirm_changeset = true +capabilities = "CAPABILITY_IAM" +parameter_overrides = "Stage=\"test\" SlackClientID=\"10361912548384.10524713300870\" SlackOauthScopes=\"app_mentions:read,channels:history,channels:join,chat:write,chat:write:user,chat:write.customize,commands,files:read,files:write,team:read,users:read,channels:manage,users:read.email,reactions:read,reactions:write\" ExistingDatabaseHost=\"f3ttown1.c7im4saakwcy.us-east-2.rds.amazonaws.com\" ExistingDatabaseAdminUser=\"f3ttown1dba\" DatabaseUser=\"syncbot_test_user\" DatabaseSchema=\"syncbot_test\" DatabaseInstanceClass=\"db.t3.micro\" AllowedDBCidr=\"0.0.0.0/0\" VpcCidr=\"10.0.0.0/16\" RequireAdmin=\"true\"" +image_repositories = [] + +[test.global.parameters] +region = "us-east-2" diff --git a/slack-manifest.yaml b/slack-manifest.yaml index 69dd12c..7dc6694 100644 --- a/slack-manifest.yaml +++ b/slack-manifest.yaml @@ -39,6 +39,24 @@ oauth_config: - team:read - users:read - users:read.email + user: + - app_mentions:read + - channels:history + - channels:join + - channels:read + - channels:manage + - files:read + - files:write + - groups:history + - groups:read + - groups:write + - im:write + - chat:write:user + - reactions:read + - reactions:write + - team:read + - users:read + - users:read.email settings: event_subscriptions: From 43496534a9300c363480f25e7d3c96efb6c8bf0c Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Sat, 21 Mar 2026 11:57:05 -0500 Subject: [PATCH 19/45] Updated database options to default to Aurora DSQL. --- .env.example | 25 +++-- README.md | 4 +- docs/ARCHITECTURE.md | 4 +- docs/DEPLOYMENT.md | 10 +- docs/IMPROVEMENTS.md | 7 ++ docs/INFRA_CONTRACT.md | 27 ++--- infra/aws/db_setup/handler.py | 147 ++++++++++++++++++++++------ infra/aws/db_setup/requirements.txt | 1 + infra/aws/scripts/deploy.sh | 12 ++- infra/aws/template.yaml | 121 ++++++++++++++++++----- poetry.lock | 79 ++++++++++++++- pyproject.toml | 3 +- samconfig.toml | 8 +- syncbot/constants.py | 18 ++-- syncbot/db/__init__.py | 132 ++++++++++++++++++++----- syncbot/requirements.txt | 1 + tests/conftest.py | 6 ++ tests/test_db.py | 63 ++++++++++++ tests/test_db_setup.py | 75 ++++++++++++++ 19 files changed, 625 insertions(+), 118 deletions(-) create mode 100644 tests/conftest.py create mode 100644 tests/test_db_setup.py diff --git a/.env.example b/.env.example index 686979a..6861d36 100644 --- a/.env.example +++ b/.env.example @@ -8,19 +8,28 @@ # For native Python development, source it: source .env or export $(cat .env | xargs) # ----------------------------------------------------------------------------- -# Database (backend: mysql or sqlite) — pre-release: fresh installs only +# Database (postgresql, mysql, or sqlite) — pre-release: fresh installs only # ----------------------------------------------------------------------------- -# Option A — MySQL (default): legacy vars or DATABASE_URL -DATABASE_BACKEND=mysql +# Option A — PostgreSQL / Aurora DSQL (default): legacy vars or DATABASE_URL +DATABASE_BACKEND=postgresql DATABASE_HOST=127.0.0.1 -DATABASE_USER=root -DATABASE_PASSWORD=rootpass +# DATABASE_PORT=5432 +DATABASE_USER=postgres +DATABASE_PASSWORD=postgres DATABASE_SCHEMA=syncbot -# Optional MySQL TLS controls (provider-dependent) +# Optional TLS (provider-dependent) # DATABASE_TLS_ENABLED=true # DATABASE_SSL_CA_PATH=/etc/pki/tls/certs/ca-bundle.crt -# Option B — SQLite (forks / local): set backend and URL only +# Option B — MySQL (legacy): set backend and MySQL vars or DATABASE_URL +# DATABASE_BACKEND=mysql +# DATABASE_HOST=127.0.0.1 +# DATABASE_PORT=3306 +# DATABASE_USER=root +# DATABASE_PASSWORD=rootpass +# DATABASE_SCHEMA=syncbot + +# Option C — SQLite (forks / local): set backend and URL only # DATABASE_BACKEND=sqlite # DATABASE_URL=sqlite:///syncbot.db @@ -44,7 +53,7 @@ DATABASE_SCHEMA=syncbot # ENV_SLACK_CLIENT_ID=your-client-id # ENV_SLACK_CLIENT_SECRET=your-client-secret # ENV_SLACK_SCOPES=app_mentions:read,channels:history,channels:join,channels:manage,channels:read,chat:write,chat:write:user,chat:write.customize,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email -# OAuth state and installation data are stored in the same database (MySQL or SQLite). +# OAuth state and installation data are stored in the same database (PostgreSQL, MySQL, or SQLite). # ----------------------------------------------------------------------------- # Encryption (optional) diff --git a/README.md b/README.md index 12a570c..210bbfb 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ SyncBot ships with a full AWS SAM template (`infra/aws/template.yaml`) that prov |----------|---------|-----------------| | Compute | Lambda (128 MB) | 1M requests/month free | | API | API Gateway v1 | 1M calls/month free | -| Database | RDS MySQL (db.t3.micro) | 750 hrs/month free (12 months) | +| Database | RDS PostgreSQL or MySQL (db.t3.micro) | Engine-specific; see AWS free tier | OAuth and app data are stored in RDS. Media is uploaded directly to Slack (no runtime S3). SAM deploy uses an S3 artifact bucket for packaging only. @@ -46,7 +46,7 @@ OAuth and app data are stored in RDS. Media is uploaded directly to Slack (no ru |------|---------|---------| | **AWS SAM CLI** | latest | Build & deploy Lambda + infra | | **Docker** | latest | SAM uses a container to build the Lambda package | -| **MySQL client** *(optional)* | any | Run schema scripts against the DB | +| **psql client** *(optional)* | any | Ad-hoc RDS PostgreSQL checks | ### First-Time Deploy diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index d32f535..94a6a74 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -25,7 +25,7 @@ sequenceDiagram participant S as Slack API participant AG as API Gateway participant L as Lambda (SyncBot) - participant DB as RDS MySQL + participant DB as RDS participant SB as Slack API (Workspace B) U->>S: Posts message in #general @@ -81,7 +81,7 @@ flowchart TB FED["federation/"] end - subgraph Database["RDS MySQL"] + subgraph Database["RDS PostgreSQL or MySQL"] T1["workspaces"] T2["workspace_groups"] T2a["workspace_group_members"] diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index fc56e2f..74f0606 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -40,10 +40,11 @@ See [Swapping providers](#swapping-providers) for changing providers in a fork. ## Database backend -The app supports **MySQL** (default) or **SQLite**. See [INFRA_CONTRACT.md](INFRA_CONTRACT.md) for required variables per backend. **Pre-release:** DB flow assumes **fresh installs only**; schema is created at startup via Alembic. +The app supports **PostgreSQL** (default, including Aurora DSQL and RDS PostgreSQL), **MySQL** (legacy), and **SQLite**. See [INFRA_CONTRACT.md](INFRA_CONTRACT.md) for required variables per backend. **Pre-release:** DB flow assumes **fresh installs only**; schema is created at startup via Alembic. -- **MySQL:** Use for production and when using AWS/GCP templates (RDS, Cloud SQL). Set `DATABASE_BACKEND=mysql` (or leave unset) and either `DATABASE_URL` or `DATABASE_HOST` + `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA`. -- **SQLite:** Use for forks or local runs where you prefer no DB server. Set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:///path/to/syncbot.db`. Single-writer; ensure backups and file durability. AWS/GCP reference templates assume MySQL; for SQLite you deploy the app (e.g. container or Lambda with a writable volume) and set the env vars only. +- **PostgreSQL / Aurora DSQL (default):** Set `DATABASE_BACKEND=postgresql` (or rely on the app default) and either `DATABASE_URL` (`postgresql+psycopg2://...`) or `DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA`. The AWS SAM template parameter **`DatabaseEngine`** defaults to **`postgresql`** (new RDS PostgreSQL in stack, or existing host with the custom-resource setup). +- **MySQL:** Set `DATABASE_BACKEND=mysql` and either `DATABASE_URL` or the four host/user/password/schema vars. On AWS, choose **Advanced: legacy MySQL** in `./infra/aws/scripts/deploy.sh` or pass `DatabaseEngine=mysql` to `sam deploy`. +- **SQLite:** Use for forks or local runs where you prefer no DB server. Set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:///path/to/syncbot.db`. Single-writer; ensure backups and file durability. For SQLite on Lambda you need durable shared storage (e.g. EFS); the reference SAM template targets PostgreSQL/MySQL. --- @@ -89,6 +90,7 @@ For local, end-to-end deploys (bootstrap + build + deploy), use: The script: - prompts for stage (`test`/`prod`) and DB mode (new RDS vs existing host), +- defaults to **PostgreSQL** (`DatabaseEngine=postgresql`); optional advanced prompt for **legacy MySQL** (`DatabaseEngine=mysql`), - prompts for required secrets/credentials, - auto-detects bootstrap outputs (region, deploy bucket, suggested stack names) when available, - supports existing-RDS `public` or `private` network mode (with VPC subnet/security-group prompts for private mode), @@ -116,7 +118,7 @@ If bootstrap is missing, it can deploy bootstrap first. Use the bootstrap **DeploymentBucketName**. Set parameters (Stage, DB, Slack, etc.) when prompted. -3. **GitHub:** Create environments `test` and `prod`. In **Settings → Secrets and variables → Actions**, set **Variables** (per env): `AWS_ROLE_TO_ASSUME`, `AWS_REGION`, `AWS_S3_BUCKET`, `AWS_STACK_NAME`, `STAGE_NAME`, `SLACK_CLIENT_ID` (Slack app Client ID from Basic Information → App Credentials), `EXISTING_DATABASE_HOST`, `EXISTING_DATABASE_ADMIN_USER` (when using existing RDS host), `DATABASE_USER` (when creating new RDS), `DATABASE_SCHEMA`. Set **Secrets**: `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET`, `EXISTING_DATABASE_ADMIN_PASSWORD` (when using existing host), `DATABASE_PASSWORD` (when creating new RDS). No access keys — the workflow uses OIDC. +3. **GitHub:** Create environments `test` and `prod`. In **Settings → Secrets and variables → Actions**, set **Variables** (per env): `AWS_ROLE_TO_ASSUME`, `AWS_REGION`, `AWS_S3_BUCKET`, `AWS_STACK_NAME`, `STAGE_NAME`, `SLACK_CLIENT_ID` (Slack app Client ID from Basic Information → App Credentials), `EXISTING_DATABASE_HOST`, `EXISTING_DATABASE_ADMIN_USER` (when using existing RDS host), `DATABASE_USER` (when creating new RDS), `DATABASE_SCHEMA`. Set **Secrets**: `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET`, `EXISTING_DATABASE_ADMIN_PASSWORD` (when using existing host), `DATABASE_PASSWORD` (when creating new RDS). No access keys — the workflow uses OIDC. The SAM template defaults **`DatabaseEngine=postgresql`** (Aurora DSQL / RDS PostgreSQL). To deploy **legacy MySQL** from CI, extend the workflow `parameter-overrides` to include `DatabaseEngine=mysql` (or add a repository variable and wire it through). 4. Push to `test` or `prod` to build and deploy. The workflow file is `.github/workflows/deploy-aws.yml` (runs when `DEPLOY_TARGET` is not `gcp`). - The AWS workflow runs `pip-audit` against `syncbot/requirements.txt` and `infra/aws/db_setup/requirements.txt`, so dependency pins should be kept current. diff --git a/docs/IMPROVEMENTS.md b/docs/IMPROVEMENTS.md index 71bb16a..d9802a7 100644 --- a/docs/IMPROVEMENTS.md +++ b/docs/IMPROVEMENTS.md @@ -455,6 +455,13 @@ This document outlines the improvements made to the SyncBot application and addi - GCP Terraform variable: `token_encryption_key_override` - **Admin/operator warning surface** — deploy helper scripts and deployment docs now explicitly warn that losing the token key requires workspace reinstall/re-authorization. +### 49. PostgreSQL / Aurora DSQL Parallel Backend (Completed) +- **Runtime** — Added `DATABASE_BACKEND=postgresql` (default), `psycopg2` + `postgresql+psycopg2://` URLs, `DATABASE_PORT` (default 5432), PostgreSQL-safe `CREATE DATABASE`, table drop/reset, and TLS via `sslmode`/`sslrootcert`. +- **AWS SAM** — `DatabaseEngine` parameter (`postgresql` default, `mysql` legacy); split `RDSInstanceMysql` / `RDSInstancePostgres`; Lambda env sets `DATABASE_BACKEND`, `DATABASE_PORT`, and `DATABASE_HOST` accordingly. +- **Custom resource** — `infra/aws/db_setup/handler.py` branches on `DatabaseEngine` for MySQL vs PostgreSQL user/database creation. +- **Deploy UX** — `./infra/aws/scripts/deploy.sh` defaults to PostgreSQL; **Advanced: legacy MySQL** toggles `DatabaseEngine=mysql`. `samconfig.toml` profiles pass `DatabaseEngine=postgresql` where applicable. +- **Docs/tests** — `INFRA_CONTRACT.md`, `DEPLOYMENT.md`, `README.md`, `.env.example` updated; `tests/conftest.py` defaults tests to `mysql` for compatibility; added PostgreSQL pool/required-vars tests and `tests/test_db_setup.py`. + ## Remaining Recommendations ### Low Priority diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index 569df4d..6965ec0 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -27,18 +27,21 @@ poetry export --only main --format requirements.txt --without-hashes --output sy | Variable | Description | |----------|-------------| -| `DATABASE_BACKEND` | `mysql` (default) or `sqlite`. | -| `DATABASE_URL` | Full SQLAlchemy URL. When set, overrides legacy MySQL vars. **Required for SQLite** (e.g. `sqlite:///path/to/syncbot.db`). For MySQL, optional (if unset, legacy vars below are used). | -| `DATABASE_HOST` | MySQL hostname (IP or FQDN). Required when backend is `mysql` and `DATABASE_URL` is unset. | -| `DATABASE_USER` | MySQL username. Required when backend is `mysql` and `DATABASE_URL` is unset. | -| `DATABASE_PASSWORD` | MySQL password. Required when backend is `mysql` and `DATABASE_URL` is unset. | -| `DATABASE_SCHEMA` | MySQL database/schema name (e.g. `syncbot`, `syncbot_prod`). Required when backend is `mysql` and `DATABASE_URL` is unset. | -| `DATABASE_TLS_ENABLED` | Optional MySQL TLS toggle (`true`/`false`). Defaults to enabled outside local dev. | -| `DATABASE_SSL_CA_PATH` | Optional CA bundle path used when TLS is enabled (default `/etc/pki/tls/certs/ca-bundle.crt`). | +| `DATABASE_BACKEND` | `postgresql` (default), `mysql`, or `sqlite`. | +| `DATABASE_URL` | Full SQLAlchemy URL. When set, overrides host/user/password/schema. **Required for SQLite** (e.g. `sqlite:///path/to/syncbot.db`). For `mysql` / `postgresql`, optional if unset (legacy vars below are used). | +| `DATABASE_HOST` | Database hostname (IP or FQDN). Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | +| `DATABASE_PORT` | Optional. Defaults to **5432** for `postgresql`, **3306** for `mysql`. | +| `DATABASE_USER` | Username. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | +| `DATABASE_PASSWORD` | Password. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | +| `DATABASE_SCHEMA` | Database name (MySQL) or PostgreSQL database name (same convention as MySQL). Use alphanumeric and underscore only for PostgreSQL when the app must `CREATE DATABASE` at bootstrap. | +| `DATABASE_TLS_ENABLED` | Optional TLS toggle (`true`/`false`). Defaults to enabled outside local dev. | +| `DATABASE_SSL_CA_PATH` | Optional CA bundle path when TLS is enabled (default `/etc/pki/tls/certs/ca-bundle.crt`). | -**SQLite (forks / local):** Set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:///path/to/file.db`. Single-writer; suitable for small teams and dev. Caveats: single-writer behavior, file durability, and backup expectations are your responsibility. For production at scale, prefer MySQL. +**SQLite (forks / local):** Set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:///path/to/file.db`. Single-writer; suitable for small teams and dev. -**MySQL (default):** Set `DATABASE_BACKEND=mysql` (or leave unset) and either `DATABASE_URL` or the four legacy vars above. Deploy-time bootstrap credentials (e.g. `ExistingDatabaseAdmin*` in AWS) are used only for one-time schema setup; the app reads `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA` at runtime. +**PostgreSQL / Aurora DSQL (default):** Set `DATABASE_BACKEND=postgresql` (or rely on the default) and either `DATABASE_URL` (`postgresql+psycopg2://...`) or `DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA`. The AWS SAM template parameter `DatabaseEngine=postgresql` matches this backend. + +**MySQL (legacy):** Set `DATABASE_BACKEND=mysql` and either `DATABASE_URL` (`mysql+pymysql://...`) or the four host/user/password/schema vars. Deploy-time bootstrap credentials (e.g. `ExistingDatabaseAdmin*` in AWS) are used only for one-time setup; the app reads `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA` at runtime. ### Required in production (non–local) @@ -79,13 +82,13 @@ The provider must deliver: Slack and DB credentials must be available as environment variables (or equivalent) at process start. No assumption of a specific secret store; provider chooses (e.g. Lambda env, Secret Manager, Parameter Store). 3. **Database** - **MySQL:** In non–local environments the app uses TLS; the provider must allow outbound TCP to the MySQL host (typically 3306). **SQLite:** No network; the app uses a local file. Single-writer; ensure backups and file durability for production use. + **PostgreSQL / MySQL:** In non–local environments the app uses TLS by default; allow outbound TCP to the DB host (typically **5432** for PostgreSQL, **3306** for MySQL). **SQLite:** No network; the app uses a local file. Single-writer; ensure backups and file durability for production use. 4. **Keep-warm / scheduled ping (optional but recommended)** To avoid cold-start latency, the app supports a periodic HTTP GET to a configurable path. The provider should support a scheduled job (e.g. CloudWatch Events, Cloud Scheduler) that hits the service on an interval (e.g. 5 minutes). 5. **Stateless execution** - The app is stateless; state lives in the configured database (MySQL or SQLite). Horizontal scaling is supported with MySQL as long as all instances share the same DB and env; SQLite is single-writer. + The app is stateless; state lives in the configured database (PostgreSQL, MySQL, or SQLite). Horizontal scaling is supported with PostgreSQL/MySQL as long as all instances share the same DB and env; SQLite is single-writer. ## CI Auth Model diff --git a/infra/aws/db_setup/handler.py b/infra/aws/db_setup/handler.py index 3fe3142..c0d078a 100644 --- a/infra/aws/db_setup/handler.py +++ b/infra/aws/db_setup/handler.py @@ -1,48 +1,74 @@ """ -Custom CloudFormation resource: create schema and app user on an existing MySQL (RDS) host. +Custom CloudFormation resource: create database and app user on an existing RDS host. -Run during stack create/update when ExistingDatabaseHost is set. Uses bootstrap credentials -to create the schema and a dedicated app user; the app password is read from the generated -Secrets Manager secret (created by the template) so the app Lambda can use it. +Supports MySQL (port 3306) and PostgreSQL / Aurora DSQL (port 5432). Uses bootstrap +credentials to create the database and a dedicated app user; the app password is read +from the generated Secrets Manager secret. """ import json -import os +import re + import boto3 +import psycopg2 import pymysql +from psycopg2 import sql as psql from pymysql.cursors import DictCursor + # CloudFormation custom resource response helper (no cfnresponse in Lambda by default for Python 3) def send(event, context, status, data=None, reason=None, physical_resource_id=None): + import urllib.error import urllib.request + pid = physical_resource_id or event.get("PhysicalResourceId") or event["LogicalResourceId"] - body = json.dumps({ - "Status": status, - "Reason": reason or f"See CloudWatch Log Stream: {context.log_stream_name}", - "PhysicalResourceId": pid, - "StackId": event["StackId"], - "RequestId": event["RequestId"], - "LogicalResourceId": event["LogicalResourceId"], - "Data": data or {}, - }).encode("utf-8") + log_ref = getattr(context, "log_stream_name", None) or "n/a" + body = json.dumps( + { + "Status": status, + "Reason": reason or f"See CloudWatch Log Stream: {log_ref}", + "PhysicalResourceId": pid, + "StackId": event["StackId"], + "RequestId": event["RequestId"], + "LogicalResourceId": event["LogicalResourceId"], + "Data": data or {}, + } + ).encode("utf-8") req = urllib.request.Request( event["ResponseURL"], data=body, method="PUT", headers={"Content-Type": "application/json"}, ) - with urllib.request.urlopen(req, timeout=30) as f: - f.read() + # Custom resource responses must reach CloudFormation or the stack hangs (delete/update failures). + try: + with urllib.request.urlopen(req, timeout=60) as f: + f.read() + except urllib.error.HTTPError as e: + raise RuntimeError(f"CFN response HTTP {e.code}: {e.read()!r}") from e + except urllib.error.URLError as e: + raise RuntimeError(f"CFN response URL error: {e}") from e def handler(event, context): try: return _handler_impl(event, context) except Exception as e: - send(event, context, "FAILED", reason=f"Unhandled error: {e}") + try: + send(event, context, "FAILED", reason=f"Unhandled error: {e}") + except Exception as send_err: + raise RuntimeError( + f"Unhandled error in handler: {e}; failed to notify CloudFormation: {send_err}" + ) from e raise +def _safe_ident(name: str) -> str: + if not re.match(r"^[A-Za-z_][A-Za-z0-9_]*$", name): + raise ValueError(f"Invalid identifier: {name}") + return name + + def _handler_impl(event, context): request_type = event.get("RequestType", "Create") props = event.get("ResourceProperties", {}) @@ -52,15 +78,19 @@ def _handler_impl(event, context): schema = (props.get("Schema") or "syncbot").strip() stage = (props.get("Stage") or "test").strip() secret_arn = (props.get("SecretArn") or "").strip() + database_engine = (props.get("DatabaseEngine") or "postgresql").strip().lower() if request_type == "Delete": - # Leave schema and user for manual cleanup if desired - send(event, context, "SUCCESS", {"Username": ""}, physical_resource_id=event.get("PhysicalResourceId", "n/a")) + # Must return the same PhysicalResourceId as Create; never use a placeholder. + delete_pid = event.get("PhysicalResourceId") or event["LogicalResourceId"] + send(event, context, "SUCCESS", {"Username": ""}, physical_resource_id=delete_pid) return if not all([host, admin_user, admin_password, schema, stage, secret_arn]): send( - event, context, "FAILED", + event, + context, + "FAILED", reason="Missing Host, AdminUser, AdminPassword, Schema, Stage, or SecretArn", ) return @@ -73,14 +103,24 @@ def _handler_impl(event, context): return try: - setup_database( - host=host, - admin_user=admin_user, - admin_password=admin_password, - schema=schema, - app_username=app_username, - app_password=app_password, - ) + if database_engine == "mysql": + setup_database_mysql( + host=host, + admin_user=admin_user, + admin_password=admin_password, + schema=schema, + app_username=app_username, + app_password=app_password, + ) + else: + setup_database_postgresql( + host=host, + admin_user=admin_user, + admin_password=admin_password, + schema=schema, + app_username=app_username, + app_password=app_password, + ) except Exception as e: send(event, context, "FAILED", reason=f"Database setup failed: {e}") return @@ -95,7 +135,7 @@ def get_app_password(secret_arn: str) -> str: return (resp.get("SecretString") or "").strip() -def setup_database( +def setup_database_mysql( *, host: str, admin_user: str, @@ -104,7 +144,6 @@ def setup_database( app_username: str, app_password: str, ) -> None: - # Fail fast if RDS is unreachable (e.g. not publicly accessible or SG blocks Lambda) conn = pymysql.connect( host=host, user=admin_user, @@ -117,7 +156,6 @@ def setup_database( try: with conn.cursor() as cur: cur.execute(f"CREATE DATABASE IF NOT EXISTS `{schema}`") - # MySQL 5.7: CREATE USER ... IDENTIFIED BY; 8.0 supports IF NOT EXISTS cur.execute( "CREATE USER IF NOT EXISTS %s@'%%' IDENTIFIED BY %s", (app_username, app_password), @@ -127,3 +165,50 @@ def setup_database( conn.commit() finally: conn.close() + + +def setup_database_postgresql( + *, + host: str, + admin_user: str, + admin_password: str, + schema: str, + app_username: str, + app_password: str, +) -> None: + _safe_ident(schema) + _safe_ident(app_username) + conn = psycopg2.connect( + host=host, + user=admin_user, + password=admin_password, + port=5432, + dbname="postgres", + connect_timeout=15, + sslmode="require", + ) + conn.autocommit = True + try: + with conn.cursor() as cur: + cur.execute("SELECT 1 FROM pg_roles WHERE rolname = %s", (app_username,)) + if cur.fetchone() is None: + q = psql.SQL("CREATE ROLE {name} WITH LOGIN PASSWORD %s").format( + name=psql.Identifier(app_username), + ) + cur.execute(q, (app_password,)) + else: + q = psql.SQL("ALTER ROLE {name} WITH LOGIN PASSWORD %s").format( + name=psql.Identifier(app_username), + ) + cur.execute(q, (app_password,)) + + cur.execute("SELECT 1 FROM pg_database WHERE datname = %s", (schema,)) + if cur.fetchone() is None: + cur.execute( + psql.SQL("CREATE DATABASE {db} OWNER {owner}").format( + db=psql.Identifier(schema), + owner=psql.Identifier(app_username), + ) + ) + finally: + conn.close() diff --git a/infra/aws/db_setup/requirements.txt b/infra/aws/db_setup/requirements.txt index 75650ed..f4d7528 100644 --- a/infra/aws/db_setup/requirements.txt +++ b/infra/aws/db_setup/requirements.txt @@ -1 +1,2 @@ pymysql==1.1.2 +psycopg2-binary==2.9.11 diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index a1dcfa5..b7613bb 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -211,14 +211,19 @@ STACK_NAME="$(prompt_default "App stack name" "$DEFAULT_STACK")" echo echo "Database mode:" -echo " 1) Create new RDS in stack" -echo " 2) Use existing RDS host (deploy creates schema/app user)" +echo " 1) Create new RDS in stack (PostgreSQL by default)" +echo " 2) Use existing RDS / Aurora DSQL host (deploy creates DB and app user)" DB_MODE="$(prompt_default "Choose 1 or 2" "1")" if [[ "$DB_MODE" != "1" && "$DB_MODE" != "2" ]]; then echo "Error: invalid database mode." >&2 exit 1 fi +DATABASE_ENGINE="postgresql" +if prompt_yes_no "Advanced: use legacy MySQL RDS instead of PostgreSQL (Aurora DSQL / RDS PG)?" "n"; then + DATABASE_ENGINE="mysql" +fi + echo SLACK_SIGNING_SECRET="$(prompt_secret "SlackSigningSecret")" SLACK_CLIENT_SECRET="$(prompt_secret "SlackClientSecret")" @@ -322,6 +327,7 @@ echo "Stage: $STAGE" echo "Deploy bucket: $S3_BUCKET" if [[ "$DB_MODE" == "2" ]]; then echo "DB mode: existing host" + echo "DB engine: $DATABASE_ENGINE" echo "DB host: $EXISTING_DATABASE_HOST" echo "DB network: $EXISTING_DATABASE_NETWORK_MODE" if [[ "$EXISTING_DATABASE_NETWORK_MODE" == "private" ]]; then @@ -331,6 +337,7 @@ if [[ "$DB_MODE" == "2" ]]; then echo "DB schema: $DATABASE_SCHEMA" else echo "DB mode: create new RDS" + echo "DB engine: $DATABASE_ENGINE" echo "DB user: $DATABASE_USER" echo "DB schema: $DATABASE_SCHEMA" fi @@ -357,6 +364,7 @@ sam build -t "$APP_TEMPLATE" --use-container PARAMS=( "Stage=$STAGE" + "DatabaseEngine=$DATABASE_ENGINE" "SlackSigningSecret=$SLACK_SIGNING_SECRET" "SlackClientSecret=$SLACK_CLIENT_SECRET" "DatabaseSchema=$DATABASE_SCHEMA" diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index b6c343a..01a0d05 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -2,8 +2,8 @@ AWSTemplateFormatVersion: "2010-09-09" Transform: AWS::Serverless-2016-10-31 Description: > SyncBot - Slack app that syncs posts and replies across workspaces. - Free-tier compatible: Lambda, API Gateway, RDS db.t3.micro. - OAuth and app data use RDS (MySQL); media is uploaded directly to Slack. + Free-tier compatible: Lambda, API Gateway, RDS PostgreSQL or MySQL (db.t3.micro). + OAuth and app data use RDS; media is uploaded directly to Slack. SAM deploy uses an S3 artifact bucket for packaging only (not runtime). Template lives under infra/aws; CodeUri points at repo-root syncbot/. @@ -33,6 +33,16 @@ Parameters: - test - prod + DatabaseEngine: + Description: > + SQL engine for new or existing RDS host. Use postgresql for Aurora DSQL or RDS PostgreSQL; + use mysql for legacy RDS MySQL. + Type: String + Default: postgresql + AllowedValues: + - postgresql + - mysql + # --- Slack --- SlackSigningSecret: @@ -104,7 +114,7 @@ Parameters: ExistingDatabaseLambdaSecurityGroupId: Description: > Security group ID for Lambda VPC attachment when using an existing private - database host. This security group must be allowed to connect to the DB on port 3306. + database host. This security group must be allowed to connect to the DB (3306 MySQL, 5432 PostgreSQL). Ignored unless ExistingDatabaseHost is set and ExistingDatabaseNetworkMode=private. Type: String Default: "" @@ -188,6 +198,10 @@ Parameters: Conditions: CreateDatabase: !Equals [!Ref ExistingDatabaseHost, ""] UseExistingDatabase: !Not [!Equals [!Ref ExistingDatabaseHost, ""]] + IsMysqlEngine: !Equals [!Ref DatabaseEngine, mysql] + IsPostgresqlEngine: !Equals [!Ref DatabaseEngine, postgresql] + CreateDatabaseMysql: !And [!Condition CreateDatabase, !Condition IsMysqlEngine] + CreateDatabasePostgresql: !And [!Condition CreateDatabase, !Condition IsPostgresqlEngine] UseExistingDatabasePrivateVpc: !And - !Condition UseExistingDatabase - !Equals [!Ref ExistingDatabaseNetworkMode, "private"] @@ -305,26 +319,41 @@ Resources: FromPort: 3306 ToPort: 3306 CidrIp: !Ref AllowedDBCidr - Description: "MySQL access (Lambda connects over public internet)" + Description: "MySQL (if DatabaseEngine=mysql)" + - IpProtocol: tcp + FromPort: 5432 + ToPort: 5432 + CidrIp: !Ref AllowedDBCidr + Description: "PostgreSQL (if DatabaseEngine=postgresql)" Tags: - Key: Name Value: !Sub "syncbot-${Stage}-rds-sg" # ============================================================ - # RDS MySQL Database + # RDS MySQL / PostgreSQL # ============================================================ - RDSParameterGroup: + RDSParameterGroupMysql: Type: AWS::RDS::DBParameterGroup - Condition: CreateDatabase + Condition: CreateDatabaseMysql Properties: Family: mysql8.0 - Description: !Sub "SyncBot ${Stage} - enforces SSL connections" + Description: !Sub "SyncBot ${Stage} MySQL - SSL" Parameters: require_secure_transport: "1" Tags: - Key: Name - Value: !Sub "syncbot-${Stage}-db-params" + Value: !Sub "syncbot-${Stage}-db-params-mysql" + + RDSParameterGroupPostgres: + Type: AWS::RDS::DBParameterGroup + Condition: CreateDatabasePostgresql + Properties: + Family: postgres16 + Description: !Sub "SyncBot ${Stage} PostgreSQL" + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-db-params-pg" DBSubnetGroup: Type: AWS::RDS::DBSubnetGroup @@ -338,13 +367,13 @@ Resources: - Key: Name Value: !Sub "syncbot-${Stage}-db-subnet-group" - RDSInstance: + RDSInstanceMysql: Type: AWS::RDS::DBInstance - Condition: CreateDatabase + Condition: CreateDatabaseMysql DeletionPolicy: Snapshot UpdateReplacePolicy: Snapshot Properties: - DBInstanceIdentifier: !Sub "syncbot-${Stage}" + DBInstanceIdentifier: !Sub "syncbot-${Stage}-mysql" DBInstanceClass: !Ref DatabaseInstanceClass Engine: mysql EngineVersion: "8.0" @@ -357,7 +386,37 @@ Resources: PubliclyAccessible: true MultiAZ: false DBSubnetGroupName: !Ref DBSubnetGroup - DBParameterGroupName: !Ref RDSParameterGroup + DBParameterGroupName: !Ref RDSParameterGroupMysql + VPCSecurityGroups: + - !Ref RDSSecurityGroup + BackupRetentionPeriod: 7 + PreferredBackupWindow: "03:00-04:00" + PreferredMaintenanceWindow: "sun:04:00-sun:05:00" + DeletionProtection: true + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-db-mysql" + + RDSInstancePostgres: + Type: AWS::RDS::DBInstance + Condition: CreateDatabasePostgresql + DeletionPolicy: Snapshot + UpdateReplacePolicy: Snapshot + Properties: + DBInstanceIdentifier: !Sub "syncbot-${Stage}-pg" + DBInstanceClass: !Ref DatabaseInstanceClass + Engine: postgres + EngineVersion: "16.6" + MasterUsername: !Ref DatabaseUser + MasterUserPassword: !Ref DatabasePassword + DBName: !Ref DatabaseSchema + AllocatedStorage: 20 + StorageType: gp2 + StorageEncrypted: true + PubliclyAccessible: true + MultiAZ: false + DBSubnetGroupName: !Ref DBSubnetGroup + DBParameterGroupName: !Ref RDSParameterGroupPostgres VPCSecurityGroups: - !Ref RDSSecurityGroup BackupRetentionPeriod: 7 @@ -366,7 +425,7 @@ Resources: DeletionProtection: true Tags: - Key: Name - Value: !Sub "syncbot-${Stage}-db" + Value: !Sub "syncbot-${Stage}-db-pg" TokenEncryptionKeySecret: Type: AWS::SecretsManager::Secret @@ -432,6 +491,7 @@ Resources: Schema: !Ref DatabaseSchema Stage: !Ref Stage SecretArn: !Ref AppDbCredentialsSecret + DatabaseEngine: !Ref DatabaseEngine # ============================================================ # Lambda Function @@ -492,10 +552,18 @@ Resources: - StagesMap - !Ref Stage - SlackClientID + DATABASE_BACKEND: !Ref DatabaseEngine + DATABASE_PORT: !If + - IsMysqlEngine + - "3306" + - "5432" DATABASE_HOST: !If - - CreateDatabase - - !GetAtt RDSInstance.Endpoint.Address + - UseExistingDatabase - !Ref ExistingDatabaseHost + - !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.Endpoint.Address + - !GetAtt RDSInstancePostgres.Endpoint.Address DATABASE_USER: !If - UseExistingDatabase - !GetAtt AppDbSetup.Username @@ -616,19 +684,28 @@ Outputs: DatabaseHostInUse: Description: Database host the Lambda is configured to connect to Value: !If - - CreateDatabase - - !GetAtt RDSInstance.Endpoint.Address + - UseExistingDatabase - !Ref ExistingDatabaseHost + - !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.Endpoint.Address + - !GetAtt RDSInstancePostgres.Endpoint.Address RDSEndpoint: Condition: CreateDatabase - Description: RDS MySQL endpoint address (only when RDS is created by this stack) - Value: !GetAtt RDSInstance.Endpoint.Address + Description: RDS endpoint (MySQL or PostgreSQL) when created by this stack + Value: !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.Endpoint.Address + - !GetAtt RDSInstancePostgres.Endpoint.Address RDSPort: Condition: CreateDatabase - Description: RDS MySQL port (only when RDS is created by this stack) - Value: !GetAtt RDSInstance.Endpoint.Port + Description: RDS port when created by this stack + Value: !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.Endpoint.Port + - !GetAtt RDSInstancePostgres.Endpoint.Port VpcId: Condition: CreateDatabase diff --git a/poetry.lock b/poetry.lock index 47422e1..2b0a970 100644 --- a/poetry.lock +++ b/poetry.lock @@ -615,6 +615,83 @@ files = [ dev = ["pre-commit", "tox"] testing = ["coverage", "pytest", "pytest-benchmark"] +[[package]] +name = "psycopg2-binary" +version = "2.9.11" +description = "psycopg2 - Python-PostgreSQL Database Adapter" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "psycopg2-binary-2.9.11.tar.gz", hash = "sha256:b6aed9e096bf63f9e75edf2581aa9a7e7186d97ab5c177aa6c87797cd591236c"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6fe6b47d0b42ce1c9f1fa3e35bb365011ca22e39db37074458f27921dca40f2"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6c0e4262e089516603a09474ee13eabf09cb65c332277e39af68f6233911087"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c47676e5b485393f069b4d7a811267d3168ce46f988fa602658b8bb901e9e64d"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:a28d8c01a7b27a1e3265b11250ba7557e5f72b5ee9e5f3a2fa8d2949c29bf5d2"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5f3f2732cf504a1aa9e9609d02f79bea1067d99edf844ab92c247bbca143303b"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:865f9945ed1b3950d968ec4690ce68c55019d79e4497366d36e090327ce7db14"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:91537a8df2bde69b1c1db01d6d944c831ca793952e4f57892600e96cee95f2cd"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4dca1f356a67ecb68c81a7bc7809f1569ad9e152ce7fd02c2f2036862ca9f66b"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:0da4de5c1ac69d94ed4364b6cbe7190c1a70d325f112ba783d83f8440285f152"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37d8412565a7267f7d79e29ab66876e55cb5e8e7b3bbf94f8206f6795f8f7e7e"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:c665f01ec8ab273a61c62beeb8cce3014c214429ced8a308ca1fc410ecac3a39"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0e8480afd62362d0a6a27dd09e4ca2def6fa50ed3a4e7c09165266106b2ffa10"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:763c93ef1df3da6d1a90f86ea7f3f806dc06b21c198fa87c3c25504abec9404a"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e164359396576a3cc701ba8af4751ae68a07235d7a380c631184a611220d9a4"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:d57c9c387660b8893093459738b6abddbb30a7eab058b77b0d0d1c7d521ddfd7"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2c226ef95eb2250974bf6fa7a842082b31f68385c4f3268370e3f3870e7859ee"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a311f1edc9967723d3511ea7d2708e2c3592e3405677bf53d5c7246753591fbb"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ebb415404821b6d1c47353ebe9c8645967a5235e6d88f914147e7fd411419e6f"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f07c9c4a5093258a03b28fab9b4f151aa376989e7f35f855088234e656ee6a94"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:00ce1830d971f43b667abe4a56e42c1e2d594b32da4802e44a73bacacb25535f"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cffe9d7697ae7456649617e8bb8d7a45afb71cd13f7ab22af3e5c61f04840908"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:304fd7b7f97eef30e91b8f7e720b3db75fee010b520e434ea35ed1ff22501d03"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:be9b840ac0525a283a96b556616f5b4820e0526addb8dcf6525a0fa162730be4"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f090b7ddd13ca842ebfe301cd587a76a4cf0913b1e429eb92c1be5dbeb1a19bc"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ab8905b5dcb05bf3fb22e0cf90e10f469563486ffb6a96569e51f897c750a76a"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:bf940cd7e7fec19181fdbc29d76911741153d51cab52e5c21165f3262125685e"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fa0f693d3c68ae925966f0b14b8edda71696608039f4ed61b1fe9ffa468d16db"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a1cf393f1cdaf6a9b57c0a719a1068ba1069f022a59b8b1fe44b006745b59757"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ef7a6beb4beaa62f88592ccc65df20328029d721db309cb3250b0aae0fa146c3"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:31b32c457a6025e74d233957cc9736742ac5a6cb196c6b68499f6bb51390bd6a"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:edcb3aeb11cb4bf13a2af3c53a15b3d612edeb6409047ea0b5d6a21a9d744b34"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b6d93d7c0b61a1dd6197d208ab613eb7dcfdcca0a49c42ceb082257991de9d"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:b33fabeb1fde21180479b2d4667e994de7bbf0eec22832ba5d9b5e4cf65b6c6d"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b8fb3db325435d34235b044b199e56cdf9ff41223a4b9752e8576465170bb38c"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:366df99e710a2acd90efed3764bb1e28df6c675d33a7fb40df9b7281694432ee"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8c55b385daa2f92cb64b12ec4536c66954ac53654c7f15a203578da4e78105c0"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c0377174bf1dd416993d16edc15357f6eb17ac998244cca19bc67cdc0e2e5766"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5c6ff3335ce08c75afaed19e08699e8aacf95d4a260b495a4a8545244fe2ceb3"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:84011ba3109e06ac412f95399b704d3d6950e386b7994475b231cf61eec2fc1f"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ba34475ceb08cccbdd98f6b46916917ae6eeb92b5ae111df10b544c3a4621dc4"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b31e90fdd0f968c2de3b26ab014314fe814225b6c324f770952f7d38abf17e3c"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:d526864e0f67f74937a8fce859bd56c979f5e2ec57ca7c627f5f1071ef7fee60"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:04195548662fa544626c8ea0f06561eb6203f1984ba5b4562764fbeb4c3d14b1"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:efff12b432179443f54e230fdf60de1f6cc726b6c832db8701227d089310e8aa"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:92e3b669236327083a2e33ccfa0d320dd01b9803b3e14dd986a4fc54aa00f4e1"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:e0deeb03da539fa3577fcb0b3f2554a97f7e5477c246098dbb18091a4a01c16f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b52a3f9bb540a3e4ec0f6ba6d31339727b2950c9772850d6545b7eae0b9d7c5"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:db4fd476874ccfdbb630a54426964959e58da4c61c9feba73e6094d51303d7d8"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47f212c1d3be608a12937cc131bd85502954398aaa1320cb4c14421a0ffccf4c"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e35b7abae2b0adab776add56111df1735ccc71406e56203515e228a8dc07089f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fcf21be3ce5f5659daefd2b3b3b6e4727b028221ddc94e6c1523425579664747"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:9bd81e64e8de111237737b29d68039b9c813bdf520156af36d26819c9a979e5f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:32770a4d666fbdafab017086655bcddab791d7cb260a16679cc5a7338b64343b"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c3cb3a676873d7506825221045bd70e0427c905b9c8ee8d6acd70cfcbd6e576d"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-win_amd64.whl", hash = "sha256:4012c9c954dfaccd28f94e84ab9f94e12df76b4afb22331b1f0d3154893a6316"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:20e7fb94e20b03dcc783f76c0865f9da39559dcc0c28dd1a3fce0d01902a6b9c"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4bdab48575b6f870f465b397c38f1b415520e9879fdf10a53ee4f49dcbdf8a21"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9d3a9edcfbe77a3ed4bc72836d466dfce4174beb79eda79ea155cc77237ed9e8"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:44fc5c2b8fa871ce7f0023f619f1349a0aa03a0857f2c96fbc01c657dcbbdb49"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9c55460033867b4622cda1b6872edf445809535144152e5d14941ef591980edf"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:2d11098a83cca92deaeaed3d58cfd150d49b3b06ee0d0852be466bf87596899e"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:691c807d94aecfbc76a14e1408847d59ff5b5906a04a23e12a89007672b9e819"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:8b81627b691f29c4c30a8f322546ad039c40c328373b11dff7490a3e1b517855"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:b637d6d941209e8d96a072d7977238eea128046effbf37d1d8b2c0764750017d"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:41360b01c140c2a03d346cec3280cf8a71aa07d94f3b1509fa0161c366af66b4"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:875039274f8a2361e5207857899706da840768e2a775bf8c65e82f60b197df02"}, +] + [[package]] name = "pycparser" version = "2.21" @@ -927,4 +1004,4 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [metadata] lock-version = "2.1" python-versions = "^3.12" -content-hash = "1eafe9bbbd2df990fb90980ddbf1db11e29fb5ae52b6455c9353cf8cda1db894" +content-hash = "1a838a06c2d452cf5cd57fa44cb4c195fc2fa1e0c75bce56b8b53312b2366ebb" diff --git a/pyproject.toml b/pyproject.toml index 55eb663..3ca7590 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,6 +12,7 @@ python-dotenv = "^1.2.0" slack-bolt = "^1.27.0" sqlalchemy = "^2.0" pymysql = "^1.1.2" +psycopg2-binary = "^2.9" cryptography = "^46.0.0" requests = "^2.32.0" @@ -22,7 +23,7 @@ pytest = "^9.0" [tool.pytest.ini_options] testpaths = ["tests"] -pythonpath = ["syncbot"] +pythonpath = ["syncbot", "infra/aws/db_setup"] [tool.ruff] target-version = "py312" diff --git a/samconfig.toml b/samconfig.toml index 0990350..e65ab67 100644 --- a/samconfig.toml +++ b/samconfig.toml @@ -44,7 +44,7 @@ s3_prefix = "syncbot-test" region = "us-east-2" capabilities = "CAPABILITY_IAM" confirm_changeset = true -parameter_overrides = "Stage=test ExistingDatabaseHost= ExistingDatabaseAdminUser= ExistingDatabaseAdminPassword=" +parameter_overrides = "Stage=test DatabaseEngine=postgresql ExistingDatabaseHost= ExistingDatabaseAdminUser= ExistingDatabaseAdminPassword=" [test-existing-rds.build.parameters] template_file = "infra/aws/template.yaml" @@ -57,7 +57,7 @@ s3_prefix = "syncbot-test" region = "us-east-2" capabilities = "CAPABILITY_IAM" confirm_changeset = true -parameter_overrides = "Stage=test ExistingDatabaseHost=REPLACE_ME_RDS_HOST ExistingDatabaseAdminUser=REPLACE_ME_ADMIN_USER ExistingDatabaseAdminPassword=REPLACE_ME_ADMIN_PASSWORD" +parameter_overrides = "Stage=test DatabaseEngine=postgresql ExistingDatabaseHost=REPLACE_ME_RDS_HOST ExistingDatabaseAdminUser=REPLACE_ME_ADMIN_USER ExistingDatabaseAdminPassword=REPLACE_ME_ADMIN_PASSWORD" [prod-new-rds.build.parameters] template_file = "infra/aws/template.yaml" @@ -70,7 +70,7 @@ s3_prefix = "syncbot-prod" region = "us-east-2" capabilities = "CAPABILITY_IAM" confirm_changeset = true -parameter_overrides = "Stage=prod ExistingDatabaseHost= ExistingDatabaseAdminUser= ExistingDatabaseAdminPassword=" +parameter_overrides = "Stage=prod DatabaseEngine=postgresql ExistingDatabaseHost= ExistingDatabaseAdminUser= ExistingDatabaseAdminPassword=" [prod-existing-rds.build.parameters] template_file = "infra/aws/template.yaml" @@ -83,7 +83,7 @@ s3_prefix = "syncbot-prod" region = "us-east-2" capabilities = "CAPABILITY_IAM" confirm_changeset = true -parameter_overrides = "Stage=prod ExistingDatabaseHost=REPLACE_ME_RDS_HOST ExistingDatabaseAdminUser=REPLACE_ME_ADMIN_USER ExistingDatabaseAdminPassword=REPLACE_ME_ADMIN_PASSWORD" +parameter_overrides = "Stage=prod DatabaseEngine=postgresql ExistingDatabaseHost=REPLACE_ME_RDS_HOST ExistingDatabaseAdminUser=REPLACE_ME_ADMIN_USER ExistingDatabaseAdminPassword=REPLACE_ME_ADMIN_PASSWORD" [test.deploy.parameters] stack_name = "syncbot-test" diff --git a/syncbot/constants.py b/syncbot/constants.py index 7c782c0..918a2eb 100644 --- a/syncbot/constants.py +++ b/syncbot/constants.py @@ -28,12 +28,13 @@ TOKEN_ENCRYPTION_KEY = "TOKEN_ENCRYPTION_KEY" REQUIRE_ADMIN = "REQUIRE_ADMIN" -# Database: backend-agnostic (mysql or sqlite) +# Database: backend-agnostic (postgresql, mysql, or sqlite) DATABASE_BACKEND = "DATABASE_BACKEND" DATABASE_URL = "DATABASE_URL" -# MySQL-only vars (used when DATABASE_URL is unset and backend is mysql) +# Network SQL backends (used when DATABASE_URL is unset) DATABASE_HOST = "DATABASE_HOST" +DATABASE_PORT = "DATABASE_PORT" DATABASE_USER = "DATABASE_USER" DATABASE_PASSWORD = "DATABASE_PASSWORD" DATABASE_SCHEMA = "DATABASE_SCHEMA" @@ -97,8 +98,11 @@ def _has_real_bot_token() -> bool: # --------------------------------------------------------------------------- def get_database_backend() -> str: - """Return 'mysql' or 'sqlite'. Defaults to 'mysql' when unset for backward compatibility.""" - return os.environ.get(DATABASE_BACKEND, "mysql").lower().strip() or "mysql" + """Return ``postgresql``, ``mysql``, or ``sqlite``. + + Defaults to ``postgresql`` (Aurora DSQL / RDS PostgreSQL) when unset. + """ + return os.environ.get(DATABASE_BACKEND, "postgresql").lower().strip() or "postgresql" def _env_bool(name: str, default: bool) -> bool: @@ -110,7 +114,7 @@ def _env_bool(name: str, default: bool) -> bool: def database_tls_enabled() -> bool: - """Return True when MySQL TLS should be used. + """Return True when MySQL/PostgreSQL TLS should be used. Defaults: - local dev: disabled @@ -131,7 +135,7 @@ def get_required_db_vars() -> list: backend = get_database_backend() if backend == "sqlite": return [DATABASE_URL] - # mysql: require URL or legacy host/user/password/schema + # mysql / postgresql: require URL or host/user/password/schema if os.environ.get(DATABASE_URL): return [] # URL is enough return [ @@ -179,7 +183,7 @@ def validate_config() -> None: In production this raises immediately so the Lambda fails on cold-start rather than silently misbehaving. In local development it only warns. - DB requirements depend on DATABASE_BACKEND (mysql vs sqlite). + DB requirements depend on DATABASE_BACKEND (postgresql, mysql, or sqlite). """ required = list(_REQUIRED_ALWAYS_NON_DB) + list(get_required_db_vars()) if not LOCAL_DEVELOPMENT: diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py index d7907a3..f5ecfcc 100644 --- a/syncbot/db/__init__.py +++ b/syncbot/db/__init__.py @@ -47,6 +47,14 @@ class DatabaseField: _ALEMBIC_SCRIPT_LOCATION = _PROJECT_ROOT / "db" / "alembic" +def _mysql_port() -> str: + return os.environ.get(constants.DATABASE_PORT, "3306") + + +def _pg_port() -> str: + return os.environ.get(constants.DATABASE_PORT, "5432") + + def _build_mysql_url(include_schema: bool = False) -> tuple[str, dict]: """Build MySQL URL and connect_args from DATABASE_* env vars.""" host = os.environ[constants.DATABASE_HOST] @@ -54,7 +62,8 @@ def _build_mysql_url(include_schema: bool = False) -> tuple[str, dict]: passwd = quote_plus(os.environ[constants.DATABASE_PASSWORD]) schema = os.environ.get(constants.DATABASE_SCHEMA, "syncbot") path = f"/{schema}" if include_schema else "" - db_url = f"mysql+pymysql://{user}:{passwd}@{host}:3306{path}?charset=utf8mb4" + port = _mysql_port() + db_url = f"mysql+pymysql://{user}:{passwd}@{host}:{port}{path}?charset=utf8mb4" connect_args: dict = {} if constants.database_tls_enabled(): ca_path = constants.database_ssl_ca_path() @@ -66,6 +75,43 @@ def _build_mysql_url(include_schema: bool = False) -> tuple[str, dict]: return db_url, connect_args +def _build_postgresql_url(include_schema: bool = False) -> tuple[str, dict]: + """Build PostgreSQL URL and connect_args from DATABASE_* env vars (RDS / Aurora DSQL).""" + host = os.environ[constants.DATABASE_HOST] + user = quote_plus(os.environ[constants.DATABASE_USER]) + passwd = quote_plus(os.environ[constants.DATABASE_PASSWORD]) + schema = os.environ.get(constants.DATABASE_SCHEMA, "syncbot") + port = _pg_port() + # Target database: schema name maps to PostgreSQL database name (same as MySQL DB name). + dbname = schema if include_schema else "postgres" + db_url = f"postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{dbname}" + connect_args: dict = {} + if constants.database_tls_enabled(): + ca_path = constants.database_ssl_ca_path() + connect_args["sslmode"] = "verify-full" + connect_args["sslrootcert"] = ca_path + return db_url, connect_args + + +def _network_sql_connect_args_from_url() -> dict: + """TLS connect_args when using DATABASE_URL for MySQL or PostgreSQL.""" + connect_args: dict = {} + if not constants.database_tls_enabled(): + return connect_args + backend = constants.get_database_backend() + ca_path = constants.database_ssl_ca_path() + if backend == "mysql": + try: + ssl_ctx = ssl.create_default_context(cafile=ca_path) + except (OSError, ssl.SSLError): + ssl_ctx = ssl.create_default_context() + connect_args["ssl"] = ssl_ctx + elif backend == "postgresql": + connect_args["sslmode"] = "verify-full" + connect_args["sslrootcert"] = ca_path + return connect_args + + def _get_database_url_and_args(schema: str = None) -> tuple[str, dict]: """Return (url, connect_args) for the configured backend. Dialect-aware.""" backend = constants.get_database_backend() @@ -78,18 +124,15 @@ def _get_database_url_and_args(schema: str = None) -> tuple[str, dict]: url = f"sqlite:///{_PROJECT_ROOT / path_part}" connect_args = {"check_same_thread": False} return url, connect_args + if backend == "postgresql": + if os.environ.get(constants.DATABASE_URL): + url = os.environ[constants.DATABASE_URL] + return url, _network_sql_connect_args_from_url() + return _build_postgresql_url(include_schema=True) # mysql if os.environ.get(constants.DATABASE_URL): url = os.environ[constants.DATABASE_URL] - connect_args = {} - if constants.database_tls_enabled(): - ca_path = constants.database_ssl_ca_path() - try: - ssl_ctx = ssl.create_default_context(cafile=ca_path) - except (OSError, ssl.SSLError): - ssl_ctx = ssl.create_default_context() - connect_args["ssl"] = ssl_ctx - return url, connect_args + return url, _network_sql_connect_args_from_url() return _build_mysql_url(include_schema=True) @@ -97,20 +140,49 @@ def _is_sqlite(engine) -> bool: return engine.dialect.name == "sqlite" +def _is_network_sql_backend() -> bool: + return constants.get_database_backend() in ("mysql", "postgresql") + + def _ensure_database_exists() -> None: - """Create the configured schema if it does not already exist (MySQL only).""" - if constants.get_database_backend() != "mysql": + """Create the configured database/schema if missing (MySQL or PostgreSQL).""" + backend = constants.get_database_backend() + if backend not in ("mysql", "postgresql"): return if os.environ.get(constants.DATABASE_URL): return # URL already points at a database schema = os.environ.get(constants.DATABASE_SCHEMA, "syncbot") - url_no_db, connect_args = _build_mysql_url(include_schema=False) - engine_no_db = create_engine(url_no_db, connect_args=connect_args, pool_pre_ping=True) + if backend == "mysql": + url_no_db, connect_args = _build_mysql_url(include_schema=False) + engine_no_db = create_engine(url_no_db, connect_args=connect_args, pool_pre_ping=True) + try: + with engine_no_db.begin() as conn: + conn.execute(text(f"CREATE DATABASE IF NOT EXISTS `{schema}` CHARACTER SET utf8mb4")) + finally: + engine_no_db.dispose() + return + + # postgresql: connect to maintenance DB, CREATE DATABASE if needed + url_admin, connect_args = _build_postgresql_url(include_schema=False) + safe = "".join(c for c in schema if c.isalnum() or c == "_") + if not safe or safe != schema: + raise ValueError(f"Invalid DATABASE_SCHEMA for PostgreSQL (use letters, digits, underscore): {schema}") + engine_admin = create_engine( + url_admin, + connect_args=connect_args, + pool_pre_ping=True, + isolation_level="AUTOCOMMIT", + ) try: - with engine_no_db.begin() as conn: - conn.execute(text(f"CREATE DATABASE IF NOT EXISTS `{schema}` CHARACTER SET utf8mb4")) + with engine_admin.connect() as conn: + exists = conn.execute( + text("SELECT 1 FROM pg_database WHERE datname = :n"), + {"n": schema}, + ).scalar() + if exists is None: + conn.execute(text(f'CREATE DATABASE "{safe}"')) finally: - engine_no_db.dispose() + engine_admin.dispose() def _alembic_config(): @@ -132,7 +204,7 @@ def _run_alembic_upgrade() -> None: def initialize_database() -> None: """Initialize schema via Alembic migrations (fresh install only; pre-release). - Ensures DB exists (MySQL only), then runs Alembic upgrade head. + Ensures DB exists (MySQL/PostgreSQL), then runs Alembic upgrade head. """ for attempt in range(1, _DB_INIT_MAX_ATTEMPTS + 1): try: @@ -154,15 +226,27 @@ def initialize_database() -> None: def _drop_all_tables_dialect_aware(engine) -> None: - """Drop all tables in the current schema. MySQL: information_schema + FK off; SQLite: metadata reflect + drop.""" + """Drop all tables in the current schema. MySQL / PostgreSQL / SQLite dialect-aware.""" if _is_sqlite(engine): from sqlalchemy import MetaData + meta = MetaData() meta.reflect(bind=engine) with engine.begin() as conn: for table in reversed(meta.sorted_tables): table.drop(conn, checkfirst=True) return + if engine.dialect.name == "postgresql": + with engine.begin() as conn: + result = conn.execute( + text( + "SELECT tablename FROM pg_tables " + "WHERE schemaname = 'public' ORDER BY tablename" + ) + ) + for (table_name,) in result: + conn.execute(text(f'DROP TABLE IF EXISTS "{table_name}" CASCADE')) + return with engine.begin() as conn: conn.execute(text("SET FOREIGN_KEY_CHECKS = 0")) result = conn.execute( @@ -194,7 +278,7 @@ def drop_and_init_db() -> None: db_url, connect_args=connect_args, poolclass=pool.NullPool if constants.get_database_backend() == "sqlite" else pool.QueuePool, - pool_pre_ping=constants.get_database_backend() == "mysql", + pool_pre_ping=_is_network_sql_backend(), ) _drop_all_tables_dialect_aware(engine) @@ -212,12 +296,16 @@ def drop_and_init_db() -> None: def get_engine(echo: bool = False, schema: str = None): """Return the global SQLAlchemy engine, creating it on first call. - Uses QueuePool with pool_pre_ping for MySQL; NullPool for SQLite. + Uses QueuePool with pool_pre_ping for MySQL/PostgreSQL; NullPool for SQLite. """ global GLOBAL_ENGINE, GLOBAL_SCHEMA backend = constants.get_database_backend() - target_schema = (schema or os.environ.get(constants.DATABASE_SCHEMA, "syncbot")) if backend == "mysql" else "" + target_schema = ( + (schema or os.environ.get(constants.DATABASE_SCHEMA, "syncbot")) + if backend in ("mysql", "postgresql") + else "" + ) cache_key = target_schema or backend if cache_key == GLOBAL_SCHEMA and GLOBAL_ENGINE is not None: diff --git a/syncbot/requirements.txt b/syncbot/requirements.txt index d3631ee..ea3a34b 100644 --- a/syncbot/requirements.txt +++ b/syncbot/requirements.txt @@ -5,6 +5,7 @@ cryptography==46.0.5 ; python_version >= "3.12" and python_version < "4.0" greenlet==3.1.1 ; python_version >= "3.12" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") and python_version < "4.0" idna==3.11 ; python_version >= "3.12" and python_version < "4.0" pycparser==2.23 ; python_version >= "3.12" and python_version < "4.0" +psycopg2-binary==2.9.11 ; python_version >= "3.12" and python_version < "4.0" pymysql==1.1.2 ; python_version >= "3.12" and python_version < "4.0" python-dotenv==1.2.1 ; python_version >= "3.12" and python_version < "4.0" requests==2.32.5 ; python_version >= "3.12" and python_version < "4.0" diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..713fc4c --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,6 @@ +"""Pytest configuration: default DB backend for unit tests (no live DB required).""" + +import os + +# Unit tests use MySQL-style env vars without a real server; keep mysql backend. +os.environ.setdefault("DATABASE_BACKEND", "mysql") diff --git a/tests/test_db.py b/tests/test_db.py index db95a99..3cb9dd5 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -107,6 +107,37 @@ def test_engine_uses_queue_pool_mysql(self): db_mod.GLOBAL_ENGINE = old_engine db_mod.GLOBAL_SCHEMA = old_schema + @patch.dict( + os.environ, + { + "DATABASE_BACKEND": "postgresql", + "DATABASE_HOST": "localhost", + "DATABASE_USER": "root", + "DATABASE_PASSWORD": "test", + "DATABASE_SCHEMA": "syncbot", + }, + clear=False, + ) + def test_engine_uses_queue_pool_postgresql(self): + from sqlalchemy.pool import QueuePool + + import db as db_mod + from db import get_engine + + old_engine = db_mod.GLOBAL_ENGINE + old_schema = db_mod.GLOBAL_SCHEMA + engine = None + try: + db_mod.GLOBAL_ENGINE = None + db_mod.GLOBAL_SCHEMA = None + engine = get_engine(schema="test_schema_unique_pg") + assert isinstance(engine.pool, QueuePool) + finally: + if engine: + engine.dispose() + db_mod.GLOBAL_ENGINE = old_engine + db_mod.GLOBAL_SCHEMA = old_schema + @patch.dict( os.environ, { @@ -189,3 +220,35 @@ def test_get_required_db_vars_sqlite(self): required = get_required_db_vars() assert required == ["DATABASE_URL"] + + def test_get_required_db_vars_postgresql_without_url(self): + with patch.dict( + os.environ, + {"DATABASE_BACKEND": "postgresql"}, + clear=False, + ): + if "DATABASE_URL" in os.environ: + del os.environ["DATABASE_URL"] + from constants import get_required_db_vars + + required = get_required_db_vars() + assert "DATABASE_HOST" in required + assert "DATABASE_USER" in required + assert "DATABASE_PASSWORD" in required + assert "DATABASE_SCHEMA" in required + + def test_default_database_backend_is_postgresql(self): + import importlib + + import constants as c + + old = os.environ.pop("DATABASE_BACKEND", None) + try: + importlib.reload(c) + assert c.get_database_backend() == "postgresql" + finally: + if old is not None: + os.environ["DATABASE_BACKEND"] = old + else: + os.environ.setdefault("DATABASE_BACKEND", "mysql") + importlib.reload(c) diff --git a/tests/test_db_setup.py b/tests/test_db_setup.py new file mode 100644 index 0000000..e51b479 --- /dev/null +++ b/tests/test_db_setup.py @@ -0,0 +1,75 @@ +"""Unit tests for infra/aws/db_setup/handler.py (MySQL vs PostgreSQL branches).""" + +from unittest.mock import MagicMock, patch + +import pytest + + +@pytest.fixture +def cfn_create_event(): + return { + "RequestType": "Create", + "ResponseURL": "https://example.invalid/", + "StackId": "arn:aws:cloudformation:us-east-1:123:stack/x", + "RequestId": "req", + "LogicalResourceId": "AppDbSetup", + "ResourceProperties": { + "Host": "db.example.com", + "AdminUser": "admin", + "AdminPassword": "adminpw", + "Schema": "syncbot_test", + "Stage": "test", + "SecretArn": "arn:aws:secretsmanager:us-east-1:123:secret:x", + "DatabaseEngine": "mysql", + }, + } + + +def test_handler_calls_mysql_setup(cfn_create_event): + with ( + patch("handler.send") as mock_send, + patch("handler.get_app_password", return_value="apppw"), + patch("handler.setup_database_mysql") as mock_mysql, + patch("handler.setup_database_postgresql") as mock_pg, + ): + import handler + + handler._handler_impl(cfn_create_event, MagicMock()) + mock_mysql.assert_called_once() + mock_pg.assert_not_called() + assert mock_send.call_args[0][2] == "SUCCESS" + + +def test_handler_delete_uses_physical_resource_id(): + """Delete must echo PhysicalResourceId from Create; never a placeholder.""" + delete_event = { + "RequestType": "Delete", + "ResponseURL": "https://example.invalid/", + "StackId": "arn:aws:cloudformation:us-east-1:123:stack/x", + "RequestId": "req", + "LogicalResourceId": "AppDbSetup", + "PhysicalResourceId": "syncbot_test", + } + with patch("handler.send") as mock_send: + import handler + + handler._handler_impl(delete_event, MagicMock()) + mock_send.assert_called_once() + assert mock_send.call_args[0][2] == "SUCCESS" + assert mock_send.call_args[1]["physical_resource_id"] == "syncbot_test" + + +def test_handler_calls_postgresql_setup(cfn_create_event): + cfn_create_event["ResourceProperties"]["DatabaseEngine"] = "postgresql" + with ( + patch("handler.send") as mock_send, + patch("handler.get_app_password", return_value="apppw"), + patch("handler.setup_database_mysql") as mock_mysql, + patch("handler.setup_database_postgresql") as mock_pg, + ): + import handler + + handler._handler_impl(cfn_create_event, MagicMock()) + mock_pg.assert_called_once() + mock_mysql.assert_not_called() + assert mock_send.call_args[0][2] == "SUCCESS" From ad9739b86a0f8d5f9cde99d622f331f39f657055 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Mon, 23 Mar 2026 22:31:14 -0500 Subject: [PATCH 20/45] Deploy script enhancements and debugging. Added deploy scripts for GCP. Support for Windows through WSL. Supporting existing RDS instances better. Enhaced automation and error checking during deploy. Bug fixes during deploy including Python deps. Bug fixes to OAuth scopes. Added log level support. --- .env.example | 31 +- .github/workflows/ci.yml | 40 + .github/workflows/deploy-aws.yml | 28 +- .gitignore | 4 + README.md | 321 +---- db/alembic.ini => alembic.ini | 5 +- deploy.ps1 | 235 ++++ deploy.sh | 361 +++++ docs/ARCHITECTURE.md | 2 + docs/DEPLOYMENT.md | 366 ++--- docs/IMPROVEMENTS.md | 10 +- docs/INFRA_CONTRACT.md | 18 +- docs/USER_GUIDE.md | 2 + infra/aws/db_setup/handler.py | 165 ++- infra/aws/db_setup/requirements.txt | 2 + infra/aws/scripts/deploy.sh | 1226 ++++++++++++++++- infra/aws/scripts/print-bootstrap-outputs.sh | 5 +- infra/aws/template.bootstrap.yaml | 4 +- infra/aws/template.yaml | 255 ++-- infra/aws/tests/test_sam_template_validate.py | 37 + infra/gcp/README.md | 5 +- infra/gcp/main.tf | 63 +- infra/gcp/scripts/deploy.sh | 619 +++++++++ infra/gcp/scripts/print-bootstrap-outputs.sh | 6 +- infra/gcp/tests/test_terraform_validate.py | 53 + infra/gcp/variables.tf | 28 +- pyproject.toml | 4 +- samconfig.toml | 83 +- slack-manifest.json | 88 ++ slack-manifest.yaml | 79 -- syncbot/constants.py | 13 +- syncbot/db/__init__.py | 14 +- {db => syncbot/db}/alembic/env.py | 18 +- {db => syncbot/db}/alembic/script.py.mako | 0 .../db}/alembic/versions/001_baseline.py | 0 syncbot/helpers/oauth.py | 21 +- syncbot/requirements.txt | 1 + syncbot/slack_manifest_scopes.py | 63 + tests/test_db.py | 4 +- tests/test_deploy_script_syntax.py | 25 + tests/test_oauth.py | 43 +- tests/test_slack_manifest_scopes.py | 62 + 42 files changed, 3544 insertions(+), 865 deletions(-) create mode 100644 .github/workflows/ci.yml rename db/alembic.ini => alembic.ini (84%) create mode 100644 deploy.ps1 create mode 100755 deploy.sh create mode 100644 infra/aws/tests/test_sam_template_validate.py create mode 100755 infra/gcp/scripts/deploy.sh create mode 100644 infra/gcp/tests/test_terraform_validate.py create mode 100644 slack-manifest.json delete mode 100644 slack-manifest.yaml rename {db => syncbot/db}/alembic/env.py (72%) rename {db => syncbot/db}/alembic/script.py.mako (100%) rename {db => syncbot/db}/alembic/versions/001_baseline.py (100%) create mode 100644 syncbot/slack_manifest_scopes.py create mode 100644 tests/test_deploy_script_syntax.py create mode 100644 tests/test_slack_manifest_scopes.py diff --git a/.env.example b/.env.example index 6861d36..629a19f 100644 --- a/.env.example +++ b/.env.example @@ -8,25 +8,25 @@ # For native Python development, source it: source .env or export $(cat .env | xargs) # ----------------------------------------------------------------------------- -# Database (postgresql, mysql, or sqlite) — pre-release: fresh installs only +# Database (mysql, postgresql, or sqlite) — pre-release: fresh installs only # ----------------------------------------------------------------------------- -# Option A — PostgreSQL / Aurora DSQL (default): legacy vars or DATABASE_URL -DATABASE_BACKEND=postgresql +# Option A — MySQL (default): legacy vars or DATABASE_URL +DATABASE_BACKEND=mysql DATABASE_HOST=127.0.0.1 -# DATABASE_PORT=5432 -DATABASE_USER=postgres -DATABASE_PASSWORD=postgres +# DATABASE_PORT=3306 +DATABASE_USER=root +DATABASE_PASSWORD=rootpass DATABASE_SCHEMA=syncbot # Optional TLS (provider-dependent) # DATABASE_TLS_ENABLED=true # DATABASE_SSL_CA_PATH=/etc/pki/tls/certs/ca-bundle.crt -# Option B — MySQL (legacy): set backend and MySQL vars or DATABASE_URL -# DATABASE_BACKEND=mysql +# Option B — PostgreSQL: set backend and PostgreSQL vars or DATABASE_URL +# DATABASE_BACKEND=postgresql # DATABASE_HOST=127.0.0.1 -# DATABASE_PORT=3306 -# DATABASE_USER=root -# DATABASE_PASSWORD=rootpass +# DATABASE_PORT=5432 +# DATABASE_USER=postgres +# DATABASE_PASSWORD=postgres # DATABASE_SCHEMA=syncbot # Option C — SQLite (forks / local): set backend and URL only @@ -50,9 +50,12 @@ DATABASE_SCHEMA=syncbot # ----------------------------------------------------------------------------- # SLACK_BOT_TOKEN=xoxb-your-bot-token # SLACK_SIGNING_SECRET=your-signing-secret -# ENV_SLACK_CLIENT_ID=your-client-id -# ENV_SLACK_CLIENT_SECRET=your-client-secret -# ENV_SLACK_SCOPES=app_mentions:read,channels:history,channels:join,channels:manage,channels:read,chat:write,chat:write:user,chat:write.customize,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email +# SLACK_CLIENT_ID=your-client-id +# SLACK_CLIENT_SECRET=your-client-secret +# SLACK_BOT_SCOPES — bot OAuth scopes; must match slack-manifest.json oauth_config.scopes.bot (see syncbot/slack_manifest_scopes.py). +# SLACK_BOT_SCOPES=app_mentions:read,channels:history,channels:join,channels:read,channels:manage,chat:write,chat:write.customize,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email +# SLACK_USER_SCOPES — user OAuth scopes; must match oauth_config.scopes.user and USER_SCOPES in slack_manifest_scopes.py. +# SLACK_USER_SCOPES=chat:write,channels:history,channels:read,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email # OAuth state and installation data are stored in the same database (PostgreSQL, MySQL, or SQLite). # ----------------------------------------------------------------------------- diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..163b8c1 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,40 @@ +# PR / branch checks without cloud credentials. Deploy workflows stay in deploy-*.yml. +name: CI + +on: + pull_request: + push: + branches: [main, master, test, prod] + +concurrency: + group: ci-${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + sam-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: aws-actions/setup-sam@v2 + with: + use-installer: true + - name: sam validate --lint + run: | + sam validate -t infra/aws/template.yaml --lint + sam validate -t infra/aws/template.bootstrap.yaml --lint + + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install poetry + poetry install --with dev + # Infra + deploy-script smoke tests (fast). Use `poetry run pytest` locally for the full suite. + - name: pytest (infra & deploy scripts) + run: poetry run pytest -q tests/test_deploy_script_syntax.py infra/aws/tests infra/gcp/tests diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index 7e5f832..8eb970e 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -3,7 +3,7 @@ # # Token key policy: Non-local deploys require a secure TOKEN_ENCRYPTION_KEY. The AWS app stack # auto-generates it in Secrets Manager by default. Back up the generated key after first deploy. -# Optional DR secret TOKEN_ENCRYPTION_KEY_OVERRIDE passes TokenEncryptionKeyOverride for restore. +# Optional disaster recovery secret TOKEN_ENCRYPTION_KEY_OVERRIDE passes TokenEncryptionKeyOverride for restore. name: Deploy (AWS) @@ -30,6 +30,12 @@ jobs: - uses: aws-actions/setup-sam@v2 with: use-installer: true + + - name: Validate SAM templates (cfn-lint) + run: | + sam validate -t infra/aws/template.yaml --lint + sam validate -t infra/aws/template.bootstrap.yaml --lint + - uses: aws-actions/configure-aws-credentials@v4 with: role-to-assume: ${{ vars.AWS_ROLE_TO_ASSUME }} @@ -85,20 +91,24 @@ jobs: --stack-name ${{ vars.AWS_STACK_NAME }} \ --s3-bucket ${{ vars.AWS_S3_BUCKET }} \ --capabilities CAPABILITY_IAM \ - --region us-east-2 \ + --region ${{ vars.AWS_REGION }} \ --no-disable-rollback \ --force-upload \ --parameter-overrides \ "Stage=${{ vars.STAGE_NAME }} \ + DatabaseEngine=${{ vars.DATABASE_ENGINE || 'mysql' }} \ ExistingDatabaseHost=${{ vars.EXISTING_DATABASE_HOST }} \ ExistingDatabaseAdminUser=${{ vars.EXISTING_DATABASE_ADMIN_USER }} \ ExistingDatabaseAdminPassword=${{ secrets.EXISTING_DATABASE_ADMIN_PASSWORD }} \ - DatabaseUser=${{ vars.DATABASE_USER }} \ + ExistingDatabaseNetworkMode=${{ vars.EXISTING_DATABASE_NETWORK_MODE || 'public' }} \ + ExistingDatabaseSubnetIdsCsv=${{ vars.EXISTING_DATABASE_SUBNET_IDS_CSV }} \ + ExistingDatabaseLambdaSecurityGroupId=${{ vars.EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ + LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ SlackClientID=${{ vars.SLACK_CLIENT_ID }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ - DatabasePassword=${{ secrets.DATABASE_PASSWORD }}$OVERRIDE_PARAM" + $OVERRIDE_PARAM" sam-deploy-prod: if: github.ref == 'refs/heads/prod' @@ -136,17 +146,21 @@ jobs: --stack-name ${{ vars.AWS_STACK_NAME }} \ --s3-bucket ${{ vars.AWS_S3_BUCKET }} \ --capabilities CAPABILITY_IAM \ - --region us-east-2 \ + --region ${{ vars.AWS_REGION }} \ --no-disable-rollback \ --force-upload \ --parameter-overrides \ "Stage=${{ vars.STAGE_NAME }} \ + DatabaseEngine=${{ vars.DATABASE_ENGINE || 'mysql' }} \ ExistingDatabaseHost=${{ vars.EXISTING_DATABASE_HOST }} \ ExistingDatabaseAdminUser=${{ vars.EXISTING_DATABASE_ADMIN_USER }} \ ExistingDatabaseAdminPassword=${{ secrets.EXISTING_DATABASE_ADMIN_PASSWORD }} \ - DatabaseUser=${{ vars.DATABASE_USER }} \ + ExistingDatabaseNetworkMode=${{ vars.EXISTING_DATABASE_NETWORK_MODE || 'public' }} \ + ExistingDatabaseSubnetIdsCsv=${{ vars.EXISTING_DATABASE_SUBNET_IDS_CSV }} \ + ExistingDatabaseLambdaSecurityGroupId=${{ vars.EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ + LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ SlackClientID=${{ vars.SLACK_CLIENT_ID }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ - DatabasePassword=${{ secrets.DATABASE_PASSWORD }}$OVERRIDE_PARAM" + $OVERRIDE_PARAM" diff --git a/.gitignore b/.gitignore index 85bebac..77c5db2 100644 --- a/.gitignore +++ b/.gitignore @@ -170,3 +170,7 @@ syncbot/.oauth-data/ # Cursor .cursor/ + +# Generated deploy artifacts +slack-manifest_*.json +deploy-receipts/ diff --git a/README.md b/README.md index 210bbfb..4746e56 100644 --- a/README.md +++ b/README.md @@ -1,310 +1,111 @@ # SyncBot SyncBot Icon -SyncBot is a Slack app originally developed for the [F3 Community](https://github.com/F3Nation-Community/syncbot) and has been forked here for general use by other Slack Workspace admins. It provides a replication ("Sync") service for messages and replies across Slack Workspaces on the free tier. Once configured, messages, threads, edits, deletes, reactions, images, videos, and GIFs are automatically mirrored to every channel in a Sync group. +SyncBot is a Slack app for replicating messages and replies across workspaces on the free tier. Once configured, messages, threads, edits, deletes, reactions, images, videos, and GIFs mirror to every channel in a Sync group. -> **New to SyncBot?** See the [User Guide](docs/USER_GUIDE.md) for a walkthrough of all features. +> **Using SyncBot in Slack?** See the [User Guide](docs/USER_GUIDE.md). --- -## Create a Slack App +## Deploy (AWS or GCP) -Before deploying (or developing locally) you need a Slack app: +From the **repository root**, use the infra-agnostic launcher: -1. Go to [api.slack.com/apps](https://api.slack.com/apps) and click **Create New App** → **From an app manifest** -2. Select your workspace, then paste the contents of [`slack-manifest.yaml`](slack-manifest.yaml) -3. After creating the app, upload the icon from [`assets/icon.png`](assets/icon.png) on the **Basic Information** page under **Display Information** -4. Note these values — you'll need them for deploy and/or local development: +| OS | Command | +|----|---------| +| macOS / Linux | `./deploy.sh` | +| Windows (PowerShell) | `.\deploy.ps1` | -| Where to find it | Value | Used for | -|-------------------|-------|----------| -| Basic Information → **App Credentials** | Signing Secret | Production deploy | -| Basic Information → **App Credentials** | Client ID, Client Secret | Production deploy (OAuth) | -| **OAuth & Permissions** → **Install to Workspace** → Install, then copy | Bot User OAuth Token (`xoxb-...`) | **Local development** | +The launcher lists providers under `infra//scripts/deploy.sh` (e.g. **aws**, **gcp**), prompts for a choice, and runs that script. Shortcuts: `./deploy.sh aws`, `./deploy.sh gcp`, `./deploy.sh 1`. On **Windows**, `deploy.ps1` checks for **Git Bash** or **WSL** bash, then runs the same `deploy.sh` paths (provider prerequisites are enforced inside those bash scripts). -5. After your first deploy, come back and replace the placeholder URLs in the app settings with your actual API Gateway endpoint (shown in the CloudFormation stack outputs) +### What to install first -> **Why do I need to install the app manually for local dev?** In production, SyncBot uses OAuth so each workspace gets its own token automatically. In local development mode, there's no OAuth flow — you connect to a single workspace using a bot token you copy from the Slack app settings. +| Tool | Why | +|------|-----| +| **Git** | Clone the repo; on Windows, **Git for Windows** supplies **Git Bash**, which the deploy scripts use. | +| **Bash** | Required for `./deploy.sh` and `infra/*/scripts/deploy.sh`. On Windows use Git Bash or **WSL** (then run `./deploy.sh` from Linux). | ---- - -## Deploying to AWS - -SyncBot ships with a full AWS SAM template (`infra/aws/template.yaml`) that provisions everything on the **free tier**: - -| Resource | Service | Free-Tier Detail | -|----------|---------|-----------------| -| Compute | Lambda (128 MB) | 1M requests/month free | -| API | API Gateway v1 | 1M calls/month free | -| Database | RDS PostgreSQL or MySQL (db.t3.micro) | Engine-specific; see AWS free tier | - -OAuth and app data are stored in RDS. Media is uploaded directly to Slack (no runtime S3). SAM deploy uses an S3 artifact bucket for packaging only. - -### Prerequisites - -| Tool | Version | Purpose | -|------|---------|---------| -| **AWS SAM CLI** | latest | Build & deploy Lambda + infra | -| **Docker** | latest | SAM uses a container to build the Lambda package | -| **psql client** *(optional)* | any | Ad-hoc RDS PostgreSQL checks | - -### First-Time Deploy - -1. **Build** the Lambda package: - -```bash -sam build --use-container -``` - -2. **Deploy** with guided prompts: - -```bash -sam deploy --guided -``` - -You'll be prompted for parameters like `DatabaseUser`, `DatabasePassword`, `SlackSigningSecret`, `SlackClientId`, `SlackClientSecret`, `EncryptionKey`, and `AllowedDBCidr`. These are stored as CloudFormation parameters (secrets use `NoEcho`). - -3. **Auto-initialize check** — on first startup, SyncBot creates the database schema and applies pending Alembic migrations automatically. - -4. **Update your Slack app URLs** to point at the API Gateway endpoint shown in the stack outputs (e.g., `https://xxxxx.execute-api.us-east-2.amazonaws.com/Prod/slack/events`). - -### Subsequent Deploys - -```bash -sam build --use-container -sam deploy # test (default profile) -sam deploy --config-env prod # production profile -``` - -The `samconfig.toml` file stores per-environment settings so you don't have to re-enter parameters. Each deploy automatically runs DB bootstrap/migrations during app startup. - -> For one-time bootstrap, **fork-and-deploy** (GitHub OIDC) and **download-and-deploy** (local limited credentials), see [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md). - ---- - -## Local Development - -### Option A: Dev Container (recommended) - -Opens the project inside a Docker container with full editor integration — IntelliSense, debugging, terminal, and linting all run in the container. No local Python or MySQL install needed. - -**Prerequisites:** Docker Desktop + the [Dev Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension - -#### 1. Clone the repo and create a `.env` file - -```bash -git clone https://github.com/GITHUB_ORG_NAME/syncbot.git -cd syncbot -cp .env.example .env -``` - -Set `SLACK_BOT_TOKEN` to the `xoxb-...` token you copied from **OAuth & Permissions** after installing the app. - -#### 2. Open in Dev Container - -Open the project folder in your editor, then: - -- Press `Cmd+Shift+P` → **Dev Containers: Reopen in Container** -- Or click the green remote indicator in the bottom-left corner → **Reopen in Container** - -The first build takes a minute or two. After that, your editor is running inside the container with Python, MySQL, and all dependencies ready. - -Want SQLite instead of MySQL in the dev container? Set in `.env` before reopening: - -```bash -DATABASE_BACKEND=sqlite -DATABASE_URL=sqlite:////app/syncbot/syncbot.db -``` - -The app will use SQLite and ignore MySQL connection vars. - -#### 3. Run the app - -```bash -cd syncbot && python app.py -``` - -The app starts on **port 3000** (auto-forwarded to your host). - -#### 4. Expose to Slack - -In a **local** terminal (outside the container), start a tunnel: - -```bash -cloudflared tunnel --url http://localhost:3000/ -``` -or -```bash -ngrok http 3000 -``` - -Then update your Slack app's **Event Subscriptions** and **Interactivity** URLs to the public URL. - -#### 5. Run tests +**AWS** (`infra/aws/scripts/deploy.sh`): **AWS CLI v2**, **AWS SAM CLI**, **Docker** (for `sam build --use-container`), **Python 3** (`python3`), **`curl`** (Slack manifest API). **Optional:** **`gh`** (GitHub Actions setup); if `gh` is missing, the script shows install hints and asks whether to continue. -```bash -python -m pytest tests -v -``` - -#### 6. Connect to the database - -```bash -mysql -h db -u root -prootpass syncbot -``` +**GCP** (`infra/gcp/scripts/deploy.sh`): **Terraform**, **Google Cloud SDK (`gcloud`)**, **Python 3**, **`curl`**. **Optional:** **`gh`** — same behavior as AWS. -The database schema is initialized automatically on first run. To reset it, rebuild the container with **Dev Containers: Rebuild Container**. +Full behavior, manual `sam` / Terraform steps, GitHub variables, and troubleshooting: **[docs/DEPLOYMENT.md](docs/DEPLOYMENT.md)**. --- -### Option B: Docker Compose (without Dev Container) - -Runs everything in containers but you edit files on your host. - -**Prerequisites:** Docker Desktop - -```bash -git clone https://github.com/GITHUB_ORG_NAME/syncbot.git -cd syncbot -cp .env.example .env # set SLACK_BOT_TOKEN -docker compose up --build -``` +## Slack app (before deploy or local dev) -The app listens on **port 3000**. Code changes require `docker compose restart app`. Only rebuild when `requirements.txt` changes. +1. [api.slack.com/apps](https://api.slack.com/apps) → **Create New App** → **From an app manifest** → paste [`slack-manifest.json`](slack-manifest.json). +2. Upload [`assets/icon.png`](assets/icon.png) under **Basic Information** → **Display Information**. +3. Copy **Signing Secret**, **Client ID**, and **Client Secret** (needed for deploy). For **local dev**, install the app under **OAuth & Permissions** and copy the **Bot User OAuth Token** (`xoxb-...`). -**SQLite mode (optional):** set this in `.env` before `docker compose up`: - -```bash -DATABASE_BACKEND=sqlite -DATABASE_URL=sqlite:////app/syncbot/syncbot.db -``` - -This stores the SQLite file inside the bind-mounted app folder. You can still leave the `db` service running; the app will ignore MySQL vars when `DATABASE_BACKEND=sqlite`. - -```bash -docker compose exec app python -m pytest /app/tests -v # run tests -docker compose exec db mysql -u root -prootpass syncbot # database shell -docker compose down -v # stop + delete DB volume -``` +After deployment, point Event Subscriptions and Interactivity at your real HTTPS URL (the deploy script can generate a stage-specific `slack-manifest_.json` and optional Slack API updates). Details: [DEPLOYMENT.md](docs/DEPLOYMENT.md). --- -### Option C: Native Python +## Local development -**Prerequisites:** Python 3.12+, Poetry 1.6+ (2.x recommended), Docker *(optional, for MySQL)* +### Dev Container (recommended) -```bash -git clone https://github.com/GITHUB_ORG_NAME/syncbot.git -cd syncbot -poetry install --with dev -``` +**Needs:** [Docker Desktop](https://www.docker.com/products/docker-desktop/) (or Docker Engine on Linux) + [Dev Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) in VS Code. -If you change dependencies in `pyproject.toml`, refresh lock and deployment pins: +1. `cp .env.example .env` and set `SLACK_BOT_TOKEN` (`xoxb-...`). +2. **Dev Containers: Reopen in Container** — Python, MySQL, and deps run inside the container. +3. `cd syncbot && python app.py` → app on **port 3000** (forwarded). +4. Expose to Slack with **cloudflared** or **ngrok** from the host; set Slack **Event Subscriptions** / **Interactivity** URLs to the public URL. -```bash -poetry lock -poetry export --only main --format requirements.txt --without-hashes --output syncbot/requirements.txt -``` +Optional **SQLite**: in `.env` set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:////app/syncbot/syncbot.db`. -Start a local MySQL instance: +### Docker Compose (no Dev Container) ```bash -docker run -d --name syncbot-db \ - -e MYSQL_ROOT_PASSWORD=rootpass \ - -e MYSQL_DATABASE=syncbot \ - -p 3306:3306 \ - mysql:8 +cp .env.example .env # set SLACK_BOT_TOKEN +docker compose up --build ``` -Configure and run: +App on port **3000**; restart the `app` service after code changes. -```bash -cp .env.example .env # set SLACK_BOT_TOKEN + verify DATABASE_HOST=127.0.0.1 -source .env -poetry run python syncbot/app.py -``` +### Native Python -The app starts on **port 3000**. Use a tunnel to expose it to Slack. Run tests with `poetry run pytest -v`. +**Needs:** Python 3.12+, Poetry. Run MySQL locally (e.g. `docker run ... mysql:8`) or SQLite. See `.env.example` and [INFRA_CONTRACT.md](docs/INFRA_CONTRACT.md). --- -## Environment Variables - -See [`.env.example`](.env.example) for all available options with descriptions. - -### Always Required - -| Variable | Description | -|----------|-------------| -| `DATABASE_HOST` | MySQL hostname | -| `DATABASE_USER` | MySQL username | -| `DATABASE_PASSWORD` | MySQL password | -| `DATABASE_SCHEMA` | MySQL database name | - -### Required in Production (Lambda) - -| Variable | Description | -|----------|-------------| -| `SLACK_SIGNING_SECRET` | Verifies incoming Slack requests | -| `ENV_SLACK_CLIENT_ID` | OAuth client ID | -| `ENV_SLACK_CLIENT_SECRET` | OAuth client secret | -| `ENV_SLACK_SCOPES` | Comma-separated OAuth scopes | -| `TOKEN_ENCRYPTION_KEY` | Passphrase for Fernet bot-token encryption | - -OAuth state and installation data are stored in the same RDS MySQL database. - -### Local Development Only - -| Variable | Description | -|----------|-------------| -| `SLACK_BOT_TOKEN` | Bot token (`xoxb-...`) — presence triggers local-dev mode | -| `LOCAL_DEVELOPMENT` | Set to `true` to skip token verification and use readable logs | - -### Optional +## Configuration reference -| Variable | Default | Description | -|----------|---------|-------------| -| `REQUIRE_ADMIN` | `true` | Only admins/owners can configure syncs | -| `SOFT_DELETE_RETENTION_DAYS` | `30` | Days before soft-deleted data is purged | -| `SYNCBOT_FEDERATION_ENABLED` | `false` | Enable External Connections | -| `SYNCBOT_PUBLIC_URL` | *(none)* | Public URL for external connections | -| `ENABLE_DB_RESET` | `false` | Show a "Reset Database" button on the Home tab | +- **[`.env.example`](.env.example)** — local env vars with comments. +- **[docs/INFRA_CONTRACT.md](docs/INFRA_CONTRACT.md)** — runtime contract for any cloud (DB, Slack, OAuth, production vs local). --- -## Further Reading +## Further reading -| Document | Description | -|----------|-------------| -| [User Guide](docs/USER_GUIDE.md) | End-user walkthrough of all features | -| [Architecture](docs/ARCHITECTURE.md) | Message sync flow, AWS infrastructure, caching | -| [Backup & Migration](docs/BACKUP_AND_MIGRATION.md) | Full-instance backup/restore, workspace data migration | -| [Deployment](docs/DEPLOYMENT.md) | Bootstrap, fork-and-deploy (GitHub OIDC), download-and-deploy | -| [API Reference](docs/API_REFERENCE.md) | HTTP endpoints and subscribed Slack events | -| [Improvements](docs/IMPROVEMENTS.md) | Completed and planned improvements | +| Doc | Contents | +|-----|----------| +| [USER_GUIDE.md](docs/USER_GUIDE.md) | End-user features (Home tab, syncs, groups) | +| [DEPLOYMENT.md](docs/DEPLOYMENT.md) | Guided + manual AWS/GCP deploy, CI, GitHub | +| [INFRA_CONTRACT.md](docs/INFRA_CONTRACT.md) | Environment variables and platform expectations | +| [ARCHITECTURE.md](docs/ARCHITECTURE.md) | Sync flow, AWS reference architecture | +| [BACKUP_AND_MIGRATION.md](docs/BACKUP_AND_MIGRATION.md) | Backup/restore and federation migration | +| [API_REFERENCE.md](docs/API_REFERENCE.md) | HTTP routes and Slack events | +| [IMPROVEMENTS.md](docs/IMPROVEMENTS.md) | Changelog / planned work | -## Project Structure +### Project layout ``` syncbot/ -├── syncbot/ # Application code (Lambda function) -│ ├── app.py # Entry point — Slack Bolt app + Lambda handler -│ ├── constants.py # Env-var names, startup validation -│ ├── routing.py # Event/action → handler dispatcher -│ ├── builders/ # Slack UI construction (Home tab, modals, forms) -│ ├── handlers/ # Slack event & action handlers -│ ├── helpers/ # Business logic, Slack API wrappers, utilities -│ ├── federation/ # Cross-instance sync (opt-in) -│ ├── db/ # Engine, session, ORM models -│ └── slack/ # Action IDs, forms, Block Kit helpers -├── db/alembic/ # Alembic migrations (backend-agnostic schema source) -├── db/alembic.ini # Alembic configuration -├── tests/ # pytest unit tests -├── docs/ # Extended documentation -├── infra/aws/ # AWS SAM templates (template.yaml, template.bootstrap.yaml) -├── slack-manifest.yaml # Slack app manifest -└── docker-compose.yml # Local development stack +├── syncbot/ # App (app.py); slack_manifest_scopes.py = bot/user OAuth scope lists (manifest + SLACK_BOT_SCOPES / SLACK_USER_SCOPES) +├── syncbot/db/alembic/ # Migrations (bundled with app for Lambda) +├── tests/ +├── docs/ +├── infra/aws/ # SAM, bootstrap stack +├── infra/gcp/ # Terraform +├── deploy.sh # Root launcher (macOS / Linux / Git Bash) +├── deploy.ps1 # Windows launcher → Git Bash or WSL → infra/.../deploy.sh +├── slack-manifest.json +└── docker-compose.yml ``` ## License -This project is licensed under **AGPL-3.0**, which means you can use and modify it, just keep it open and shareable. See [LICENSE](LICENSE) for details. +**AGPL-3.0** — see [LICENSE](LICENSE). diff --git a/db/alembic.ini b/alembic.ini similarity index 84% rename from db/alembic.ini rename to alembic.ini index b688396..b7e34df 100644 --- a/db/alembic.ini +++ b/alembic.ini @@ -1,8 +1,9 @@ -# Alembic config for SyncBot. Run from project root: alembic -c db/alembic.ini upgrade head +# Alembic config for SyncBot. Run from repo root: +# alembic -c alembic.ini upgrade head # The app runs migrations programmatically via db.initialize_database(). [alembic] -script_location = db/alembic +script_location = syncbot/db/alembic prepend_sys_path = . version_path_separator = os diff --git a/deploy.ps1 b/deploy.ps1 new file mode 100644 index 0000000..41f8bb2 --- /dev/null +++ b/deploy.ps1 @@ -0,0 +1,235 @@ +#requires -Version 5.1 +<# +.SYNOPSIS + SyncBot root deploy launcher for Windows (PowerShell). + + Verifies a bash environment (Git Bash or WSL), scans infra/*/scripts/deploy.sh, + then runs the selected script in bash — same contract as ./deploy.sh on macOS/Linux. + + Provider-specific prerequisite checks live in infra//scripts/deploy.sh + (sourcing repo-root deploy.sh for shared helpers). There are no deploy.ps1 files under infra/. + +.EXAMPLE + .\deploy.ps1 + .\deploy.ps1 aws + .\deploy.ps1 1 +#> +param( + [Parameter(Position = 0)] + [string] $Selection = "", + [Parameter(ValueFromRemainingArguments = $true)] + [string[]] $ScriptArgs +) + +$ErrorActionPreference = "Stop" + +function Find-GitBash { + $cmd = Get-Command bash -ErrorAction SilentlyContinue + if ($cmd) { return $cmd.Source } + $candidates = @( + "${env:ProgramFiles}\Git\bin\bash.exe", + "${env:ProgramFiles(x86)}\Git\bin\bash.exe", + "${env:LocalAppData}\Programs\Git\bin\bash.exe" + ) + foreach ($p in $candidates) { + if (Test-Path -LiteralPath $p) { return $p } + } + return $null +} + +function Test-WslBashWorks { + if (-not (Get-Command wsl.exe -ErrorAction SilentlyContinue)) { return $false } + try { + $null = & wsl.exe -e bash -c "echo wsl_ok" 2>&1 + return ($LASTEXITCODE -eq 0) + } catch { + return $false + } +} + +function Convert-WindowsPathToWsl { + param([string] $WindowsPath) + $full = (Resolve-Path -LiteralPath $WindowsPath).Path + if ($full -match '^([A-Za-z]):[\\/](.*)$') { + $drive = $Matches[1].ToLowerInvariant() + $tail = $Matches[2] -replace '\\', '/' + return "/mnt/$drive/$tail" + } + throw "Cannot map path to WSL (expected C:\...): $WindowsPath" +} + +function Find-DeployBash { + $gitBash = Find-GitBash + if ($gitBash) { + return [pscustomobject]@{ Kind = 'GitBash'; Executable = $gitBash } + } + if (Test-WslBashWorks) { + return [pscustomobject]@{ Kind = 'Wsl'; Executable = 'wsl.exe' } + } + $bashCmd = Get-Command bash -ErrorAction SilentlyContinue + if ($bashCmd) { + return [pscustomobject]@{ Kind = 'Path'; Executable = $bashCmd.Source } + } + return $null +} + +function Show-WindowsPrereqStatus { + param( + [Parameter(Mandatory = $true)] + [string] $RepoRoot, + [Parameter(Mandatory = $true)] + $BashInfo + ) + Write-Host "" + Write-Host "=== SyncBot Deploy (Windows) ===" + Write-Host "Repository: $RepoRoot" + Write-Host "" + Write-Host "Bash environment:" + switch ($BashInfo.Kind) { + 'GitBash' { + Write-Host " Git Bash: $($BashInfo.Executable)" -ForegroundColor Green + if (Test-WslBashWorks) { + Write-Host " WSL: available (not used; Git Bash preferred)" -ForegroundColor DarkGray + } else { + Write-Host " WSL: not found or not ready" -ForegroundColor DarkGray + } + } + 'Wsl' { + Write-Host " Git Bash: not found" -ForegroundColor DarkGray + Write-Host " WSL: bash (will run deploy.sh with Windows paths mapped to /mnt/...)" -ForegroundColor Green + } + 'Path' { + Write-Host " bash: $($BashInfo.Executable)" -ForegroundColor Green + } + } + Write-Host "" +} + +function Invoke-DeploySh { + param( + [Parameter(Mandatory = $true)] + $BashInfo, + [Parameter(Mandatory = $true)] + [string] $ScriptPath, + [string[]] $BashArgs + ) + $extra = if ($null -ne $BashArgs -and $BashArgs.Count -gt 0) { @($BashArgs) } else { @() } + if ($BashInfo.Kind -eq 'Wsl') { + $wslPath = Convert-WindowsPathToWsl -WindowsPath $ScriptPath + & wsl.exe -e bash $wslPath @extra + } else { + & $BashInfo.Executable $ScriptPath @extra + } +} + +function Show-Usage { + @" +Usage: .\deploy.ps1 [selection] [provider-script-args...] + +No args: + Scan infra/*/scripts/deploy.sh, show a numbered menu, and run your choice. + +With [selection]: + - provider name (e.g. aws, gcp), OR + - menu index (e.g. 1, 2) +"@ +} + +function Get-DeployScripts { + param([string] $RepoRoot) + $infraDir = Join-Path $RepoRoot "infra" + if (-not (Test-Path -LiteralPath $infraDir)) { return @() } + + $providers = Get-ChildItem -LiteralPath $infraDir -Directory -ErrorAction SilentlyContinue | Sort-Object Name + $results = @() + foreach ($provider in $providers) { + $scriptPath = Join-Path $provider.FullName "scripts/deploy.sh" + if (Test-Path -LiteralPath $scriptPath) { + $results += [pscustomobject]@{ + Provider = $provider.Name + Path = $scriptPath + } + } + } + return $results +} + +function Resolve-Selection { + param( + [array] $Entries, + [string] $Selection + ) + + if ($Selection -match '^\d+$') { + $index = [int]$Selection + if ($index -ge 1 -and $index -le $Entries.Count) { + return $Entries[$index - 1] + } + return $null + } + + foreach ($entry in $Entries) { + if ($entry.Provider -ieq $Selection) { + return $entry + } + } + return $null +} + +$RepoRoot = Split-Path -Parent $MyInvocation.MyCommand.Path + +if ($Selection -in @("-h", "--help", "help")) { + Show-Usage + exit 0 +} + +$bashInfo = Find-DeployBash +if (-not $bashInfo) { + Write-Host @" +Error: no bash found. Install one of: + + • Git for Windows (Git Bash): https://git-scm.com/download/win + • WSL (Windows Subsystem for Linux): https://learn.microsoft.com/windows/wsl/install + +Then re-run: .\deploy.ps1 +"@ -ForegroundColor Red + exit 1 +} + +Show-WindowsPrereqStatus -RepoRoot $RepoRoot -BashInfo $bashInfo + +$entries = Get-DeployScripts -RepoRoot $RepoRoot +if ($entries.Count -eq 0) { + Write-Error "No deploy scripts found under infra/*/scripts/deploy.sh" + exit 1 +} + +if ([string]::IsNullOrWhiteSpace($Selection)) { + Write-Host "Discovered deploy scripts:" + for ($i = 0; $i -lt $entries.Count; $i++) { + $n = $i + 1 + $relativePath = $entries[$i].Path + if ($relativePath.StartsWith($RepoRoot, [System.StringComparison]::OrdinalIgnoreCase)) { + $relativePath = $relativePath.Substring($RepoRoot.Length).TrimStart('\', '/') + } + Write-Host " $n) $($entries[$i].Provider) ($relativePath)" + } + Write-Host " 0) Exit" + Write-Host "" + $choice = Read-Host "Choose provider [1]" + if ([string]::IsNullOrWhiteSpace($choice)) { $choice = "1" } + if ($choice -eq "0") { exit 0 } + $Selection = $choice +} + +$selected = Resolve-Selection -Entries $entries -Selection $Selection +if (-not $selected) { + Write-Host "Invalid selection: $Selection" -ForegroundColor Red + Write-Host "" + Show-Usage + exit 1 +} + +Write-Host "Running: $($selected.Path)" +Invoke-DeploySh -BashInfo $bashInfo -ScriptPath $selected.Path -BashArgs $ScriptArgs +exit $LASTEXITCODE diff --git a/deploy.sh b/deploy.sh new file mode 100755 index 0000000..664a012 --- /dev/null +++ b/deploy.sh @@ -0,0 +1,361 @@ +#!/usr/bin/env bash +# SyncBot infra-agnostic deploy launcher. +# Discovers provider scripts at infra//scripts/deploy.sh and runs one. +# +# Phases when executed as ./deploy.sh (not when sourced): +# 1) Discover infra/*/scripts/deploy.sh +# 2) Interactive menu or CLI selection (provider name or index) +# 3) Resolve script path and exec the provider deploy script with bash +# +# Prerequisite helpers below are also sourced by infra/*/scripts/deploy.sh: +# source "$REPO_ROOT/deploy.sh" +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$SCRIPT_DIR" + +# --------------------------------------------------------------------------- +# Prerequisite helpers (shared with infra/aws and infra/gcp deploy scripts). +# macOS: Homebrew one-liners where common. Otherwise: vendor install documentation +# (Darwin / Linux / other uname from uname -s only — no platform-specific logic beyond that). +# Root: ./deploy.sh; alternate entrypoint: deploy.ps1 in repo root (see README). +# --------------------------------------------------------------------------- + +prereqs_hint_aws_cli() { + echo "Install AWS CLI v2:" + case "$(uname -s 2>/dev/null)" in + Darwin) echo " brew install awscli" ;; + Linux) echo " https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html" ;; + *) echo " https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html" ;; + esac + echo " User guide: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html" +} + +prereqs_hint_sam_cli() { + echo "Install AWS SAM CLI:" + case "$(uname -s 2>/dev/null)" in + Darwin) echo " brew install aws-sam-cli" ;; + *) + echo " https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/install-sam-cli.html" + ;; + esac + echo " Developer guide: https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/what-is-sam.html" +} + +prereqs_hint_terraform() { + echo "Install Terraform:" + echo " https://developer.hashicorp.com/terraform/install" + echo " Introduction: https://developer.hashicorp.com/terraform/docs" +} + +prereqs_hint_gcloud() { + echo "Install Google Cloud SDK:" + case "$(uname -s 2>/dev/null)" in + Darwin) echo " brew install --cask google-cloud-sdk" ;; + *) echo " https://cloud.google.com/sdk/docs/install" ;; + esac + echo " gcloud CLI reference: https://cloud.google.com/sdk/gcloud/reference" +} + +prereqs_hint_gh_cli() { + echo "Install GitHub CLI (gh):" + case "$(uname -s 2>/dev/null)" in + Darwin) echo " brew install gh" ;; + Linux) echo " https://github.com/cli/cli/blob/trunk/docs/install_linux.md" ;; + *) echo " https://cli.github.com/" ;; + esac + echo " Manual: https://cli.github.com/manual/" +} + +prereqs_hint_python3() { + echo "Install Python 3.12+ (the deploy helpers use python3 for manifest/JSON helpers):" + echo " https://www.python.org/downloads/" + echo " Documentation: https://docs.python.org/3/" +} + +prereqs_hint_docker() { + echo "Install Docker (used by sam build --use-container on AWS):" + case "$(uname -s 2>/dev/null)" in + Linux) echo " https://docs.docker.com/engine/install/" ;; + *) echo " https://www.docker.com/products/docker-desktop/" ;; + esac +} + +prereqs_hint_curl() { + echo "Install curl (used for Slack manifest API and downloads):" + echo " https://curl.se/download.html" +} + +prereqs_hint_slack_apps_docs() { + echo "Slack apps (browser) and API tokens (optional manifest automation):" + echo " https://api.slack.com/apps" + echo " https://api.slack.com/authentication/token-types" + echo "Manifest API (apps.manifest.update / create):" + echo " https://api.slack.com/reference/methods/apps.manifest.update" +} + +prereqs_icon_ok() { + printf '\033[0;32m✓\033[0m' +} + +prereqs_icon_optional() { + printf '\033[1;33m!\033[0m' +} + +prereqs_icon_required_missing() { + printf '\033[0;31m✗\033[0m' +} + +prereqs_prompt_continue_without_optional() { + local answer + read -r -p "Do you want to proceed? [Y/n]: " answer + if [[ -z "$answer" || "$answer" =~ ^[Yy]$ ]]; then + return 0 + fi + return 1 +} + +prereqs_print_cli_status_matrix() { + local provider="$1" + shift + local name + echo "" >&2 + echo "=== CLI Prerequisites ($provider) ===" >&2 + for name in "$@"; do + if command -v "$name" >/dev/null 2>&1; then + printf ' %s: %s\n' "$name" "$(prereqs_icon_ok)" >&2 + else + printf ' %s: %s\n' "$name" "$(prereqs_icon_required_missing)" >&2 + fi + done + if command -v gh >/dev/null 2>&1; then + printf ' gh: %s\n' "$(prereqs_icon_ok)" >&2 + else + printf ' gh: %s\n' "$(prereqs_icon_optional)" >&2 + echo "" >&2 + echo "The GitHub gh command was not found; install it for automated GitHub repository setup." >&2 + prereqs_hint_gh_cli >&2 + echo "" >&2 + if ! prereqs_prompt_continue_without_optional; then + echo "Exiting. Install gh and rerun, or answer Y to continue without it." >&2 + exit 1 + fi + fi + echo "" >&2 + prereqs_hint_slack_apps_docs >&2 + echo "" >&2 +} + +prereqs_require_cmd() { + local cmd="$1" + local hint_fn="${2:-}" + if ! command -v "$cmd" >/dev/null 2>&1; then + echo "Error: required command '$cmd' not found in PATH." >&2 + if [[ -n "$hint_fn" ]] && declare -F "$hint_fn" >/dev/null 2>&1; then + "$hint_fn" >&2 + fi + exit 1 + fi +} + +# Slack Web API responses can be large; avoid flooding the terminal on errors (AWS/GCP deploy scripts). +slack_api_echo_truncated_body() { + local body="$1" + local max_len="${2:-500}" + if [[ -z "$body" ]]; then + echo "(empty response)" + return 0 + fi + if [[ ${#body} -gt max_len ]]; then + echo "${body:0:max_len}... (truncated, ${#body} chars total)" + else + echo "$body" + fi +} + +# Log level (shared by infra/aws and infra/gcp deploy scripts; matches syncbot/logger.py LOG_LEVEL). +is_valid_log_level() { + case "$1" in + DEBUG | INFO | WARNING | ERROR | CRITICAL) return 0 ;; + *) return 1 ;; + esac +} + +normalize_log_level() { + echo "$1" | tr "[:lower:]" "[:upper:]" +} + +# Menu order: DEBUG first (1), then INFO..CRITICAL. Matches Python logging severity order. +log_level_to_menu_index() { + case "$(normalize_log_level "$1")" in + DEBUG) echo 1 ;; + INFO) echo 2 ;; + WARNING) echo 3 ;; + ERROR) echo 4 ;; + CRITICAL) echo 5 ;; + *) echo 2 ;; + esac +} + +menu_index_to_log_level() { + case "$1" in + 1) echo DEBUG ;; + 2) echo INFO ;; + 3) echo WARNING ;; + 4) echo ERROR ;; + 5) echo CRITICAL ;; + *) return 1 ;; + esac +} + +prompt_log_level() { + local default_level="$1" + local default_idx choice i name suf + default_idx="$(log_level_to_menu_index "$default_level")" + + echo >&2 + for i in 1 2 3 4 5; do + name="$(menu_index_to_log_level "$i")" + suf="" + [[ "$i" == "$default_idx" ]] && suf=" (default)" + echo " $i) $name$suf" >&2 + done + + while true; do + read -r -p "Choose level [$default_idx]: " choice + [[ -z "$choice" ]] && choice="$default_idx" + case "$choice" in + 1 | 2 | 3 | 4 | 5) + menu_index_to_log_level "$choice" + return 0 + ;; + esac + echo "Invalid choice: $choice. Enter a number from 1 to 5." >&2 + done +} + +# When sourced by infra/*/scripts/deploy.sh, only load helpers above. +if [[ "${BASH_SOURCE[0]}" != "${0}" ]]; then + return 0 +fi + +# --------------------------------------------------------------------------- +# Launcher (only when this file is executed: ./deploy.sh) +# --------------------------------------------------------------------------- + +usage() { + cat <&2 + echo "Repository: $REPO_ROOT" >&2 + echo >&2 + echo "Discovered deploy scripts:" >&2 + + while IFS=$'\t' read -r provider path; do + [[ -z "$provider" ]] && continue + rel_path="${path#$REPO_ROOT/}" + echo " $idx) $provider ($rel_path)" >&2 + idx=$((idx + 1)) + done <<< "$entries" + echo " 0) Exit" >&2 + echo >&2 + + local choice + read -r -p "Choose provider [1]: " choice >&2 + choice="${choice:-1}" + echo "$choice" +} + +resolve_script_from_selection() { + local entries="$1" + local selection="$2" + local line idx=1 provider path + + # Numeric selection + if [[ "$selection" =~ ^[0-9]+$ ]]; then + while IFS=$'\t' read -r provider path; do + [[ -z "$provider" ]] && continue + if [[ "$idx" -eq "$selection" ]]; then + echo "$path" + return 0 + fi + idx=$((idx + 1)) + done <<< "$entries" + return 1 + fi + + # Provider name selection + while IFS=$'\t' read -r provider path; do + [[ -z "$provider" ]] && continue + if [[ "$provider" == "$selection" ]]; then + echo "$path" + return 0 + fi + done <<< "$entries" + return 1 +} + +main() { + if [[ "${1:-}" == "-h" || "${1:-}" == "--help" || "${1:-}" == "help" ]]; then + usage + exit 0 + fi + + local entries + entries="$(discover_deploy_scripts)" + if [[ -z "$entries" ]]; then + echo "No deploy scripts found under infra/*/scripts/deploy.sh" >&2 + exit 1 + fi + + local selection="${1:-}" + if [[ -z "$selection" ]]; then + selection="$(select_script_interactive "$entries")" + fi + if [[ "$selection" == "0" ]]; then + exit 0 + fi + + local script_path + if ! script_path="$(resolve_script_from_selection "$entries" "$selection")"; then + echo "Invalid selection: $selection" >&2 + echo + usage + exit 1 + fi + + echo "=== Run Provider Script ===" + echo "Running: $script_path" + bash "$script_path" +} + +main "$@" diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index 94a6a74..80e3c6e 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -61,6 +61,8 @@ The same pattern applies to edits (`chat.update`), deletes (`chat.delete`), thre ## AWS Infrastructure +How to deploy or update this stack (guided script, `sam`, GitHub Actions) is documented in **[DEPLOYMENT.md](DEPLOYMENT.md)**. The diagram below reflects the **reference** SAM template (`infra/aws/template.yaml`). + ```mermaid flowchart TB subgraph Slack["Slack Platform"] diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 74f0606..9549c0e 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -1,332 +1,272 @@ # Deployment Guide -This guide covers deploying SyncBot on **AWS** (default) or **GCP**, with two paths per provider: +This guide explains **what the guided deploy scripts do**, how to perform the **same steps manually** on **AWS** or **GCP**, and how **GitHub Actions** fits in. For the runtime environment variables the app expects in any cloud, see [INFRA_CONTRACT.md](INFRA_CONTRACT.md). -- **Fork and deploy** — One-time bootstrap, then all deploys via GitHub Actions (OIDC on AWS, Workload Identity Federation on GCP; no long-lived keys). -- **Download and deploy** — One-time bootstrap, then updates via local CLI (`sam` on AWS, `gcloud`/Terraform on GCP) using limited credentials. - -The app code and [infrastructure contract](INFRA_CONTRACT.md) are provider-agnostic; only the infrastructure in `infra//` and the CI workflow differ. - -**Runtime baseline:** Python 3.12. Keep `pyproject.toml`, `syncbot/requirements.txt`, Lambda runtimes, and CI Python version aligned. +**Runtime baseline:** Python 3.12 — keep `pyproject.toml`, `syncbot/requirements.txt`, Lambda/Cloud Run runtimes, and CI aligned. --- -## Fork-First Model (Recommended) +## Quick start: root launcher -If your goal is "fork and deploy on a different cloud, while still PR'ing app improvements back to SyncBot", use this model: +From the **repository root**: -1. Keep `syncbot/` provider-neutral and depend only on env vars from [INFRA_CONTRACT.md](INFRA_CONTRACT.md). -2. Put provider implementation in `infra//` and `.github/workflows/deploy-.yml`. -3. Keep AWS path as the reference implementation; treat other providers as swappable scaffolds. -4. Send upstream PRs for provider-neutral changes (DB abstraction, docs contract, tests) and keep fork-only deploy glue isolated. +| OS | Command | +|----|---------| +| macOS / Linux | `./deploy.sh` | +| Windows (PowerShell) | `.\deploy.ps1` | -This is the intended maintenance path for long-lived forks. +The launcher discovers `infra//scripts/deploy.sh`, shows a numbered menu, and runs the script you pick. ---- +**Non-interactive:** `./deploy.sh aws`, `./deploy.sh gcp`, `./deploy.sh 1` (same for `deploy.ps1`). -## Provider selection +**Windows:** `deploy.ps1` requires **Git Bash** or **WSL** with bash, then runs the same `infra/.../deploy.sh` as macOS/Linux. Alternatively install [Git for Windows](https://git-scm.com/download/win) or [WSL](https://learn.microsoft.com/windows/wsl/install) and run `./deploy.sh` from Git Bash or a WSL shell. -| Provider | Infra folder | CI workflow | Default | -|----------|--------------|-------------|---------| -| **AWS** | `infra/aws/` | `.github/workflows/deploy-aws.yml` | Yes | -| **GCP** | `infra/gcp/` | `.github/workflows/deploy-gcp.yml` | No (opt-in) | +**Prerequisites** (also summarized in the root [README](../README.md)): -- **Use AWS:** Do nothing; the AWS workflow runs on push to `test`/`prod` unless you set `DEPLOY_TARGET=gcp`. -- **Use GCP:** Run `infra/gcp/` Terraform, configure Workload Identity Federation, set repository variable **`DEPLOY_TARGET`** = **`gcp`**, and disable or remove the AWS workflow so only `deploy-gcp.yml` runs. +- **AWS path:** AWS CLI v2, SAM CLI, Docker (`sam build --use-container`), Python 3 (`python3`), **`curl`** (Slack manifest API). **Optional:** `gh` (GitHub Actions setup). The script prints a CLI status line per tool (✓ / !) and Slack doc links; if `gh` is missing, it asks whether to continue. +- **GCP path:** Terraform, `gcloud`, Python 3, **`curl`**. **Optional:** `gh` — same behavior as AWS. -See [Swapping providers](#swapping-providers) for changing providers in a fork. +**Slack install error `invalid_scope` / “Invalid permissions requested”:** The OAuth authorize URL is built from **`SLACK_BOT_SCOPES`** and **`SLACK_USER_SCOPES`** in your deployed app (Lambda / Cloud Run). They must **exactly match** the scopes on your Slack app (`slack-manifest.json` → **OAuth & Permissions** after manifest update) and `BOT_SCOPES` / `USER_SCOPES` in `syncbot/slack_manifest_scopes.py`. SAM and GCP Terraform defaults include both bot and user scope strings; if your environment has **stale** overrides, redeploy with parameters matching the manifest or update the Slack app to match. On GCP, `slack_user_scopes` must stay aligned with `oauth_config.scopes.user`. **Renames (older stacks):** `SLACK_SCOPES` → `SLACK_BOT_SCOPES`; SAM `SlackOauthScopes` → `SlackOauthBotScopes`; SAM `SlackUserOauthScopes` → `SlackOauthUserScopes` (`SLACK_USER_SCOPES` unchanged). --- -## Database backend +## What the deploy scripts do -The app supports **PostgreSQL** (default, including Aurora DSQL and RDS PostgreSQL), **MySQL** (legacy), and **SQLite**. See [INFRA_CONTRACT.md](INFRA_CONTRACT.md) for required variables per backend. **Pre-release:** DB flow assumes **fresh installs only**; schema is created at startup via Alembic. +### Root: `deploy.sh` / `deploy.ps1` -- **PostgreSQL / Aurora DSQL (default):** Set `DATABASE_BACKEND=postgresql` (or rely on the app default) and either `DATABASE_URL` (`postgresql+psycopg2://...`) or `DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA`. The AWS SAM template parameter **`DatabaseEngine`** defaults to **`postgresql`** (new RDS PostgreSQL in stack, or existing host with the custom-resource setup). -- **MySQL:** Set `DATABASE_BACKEND=mysql` and either `DATABASE_URL` or the four host/user/password/schema vars. On AWS, choose **Advanced: legacy MySQL** in `./infra/aws/scripts/deploy.sh` or pass `DatabaseEngine=mysql` to `sam deploy`. -- **SQLite:** Use for forks or local runs where you prefer no DB server. Set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:///path/to/syncbot.db`. Single-writer; ensure backups and file durability. For SQLite on Lambda you need durable shared storage (e.g. EFS); the reference SAM template targets PostgreSQL/MySQL. +- Scans `infra/*/scripts/deploy.sh` and lists providers (e.g. **aws**, **gcp**). +- Runs the selected provider script in Bash. +- **`./deploy.sh` (macOS / Linux):** Invokes `bash` with the chosen `infra//scripts/deploy.sh`. +- **`.\deploy.ps1` (Windows):** Verifies **Git Bash** or **WSL** bash is available (shows which one will be used), then runs the same `deploy.sh` path. There are **no** `deploy.ps1` files under `infra/` — only the repo-root launcher uses PowerShell. Provider prerequisite checks (AWS/GCP tools, optional `gh`, Slack links) run **inside** the bash `deploy.sh` scripts. ---- +### AWS: `infra/aws/scripts/deploy.sh` -## AWS +Runs from repo root (or via `./deploy.sh` → **aws**). It: -### One-Time Bootstrap (AWS, both paths) +1. **Prerequisites** — Verifies `aws`, `sam`, `docker`, `python3`, `curl` are on `PATH` (with install hints). Prints a status matrix; if optional `gh` is missing, shows install hints and asks whether to continue. Prints Slack app / API token / manifest API links. +2. **AWS auth** — Checks credentials; suggests `aws login`, SSO, or `aws configure` as appropriate. +3. **Bootstrap** — Reads or deploys the CloudFormation bootstrap stack (`infra/aws/template.bootstrap.yaml`): OIDC deploy role, S3 artifact bucket, etc. +4. **App stack** — Prompts for stage (`test`/`prod`) and stack name; **database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). Then **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight**). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. After that: optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), and a **deploy summary** before you proceed to the build. +5. **Deploy artifacts** — `sam build -t infra/aws/template.yaml --use-container` then `sam deploy` with assembled parameters (including optional token/app-secret overrides for recovery). +6. **Post-deploy** — Prints stack outputs, can generate `slack-manifest_.json`, optional Slack API configure, **backup summary** of secrets, optional **`gh`** setup for GitHub environments/variables/secrets, and a local **deploy receipt** under `deploy-receipts/` (gitignored). -Deploy the bootstrap stack **once** from your machine with credentials that can create IAM roles, OIDC providers, and S3 buckets. +You can **skip infra** on an existing stack and jump to GitHub-only setup when prompted. -**Prerequisites:** AWS CLI, [SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/install-sam-cli.html). For fork-and-deploy: a GitHub repo `owner/repo`. +### GCP: `infra/gcp/scripts/deploy.sh` -From the project root: +Runs from repo root (or `./deploy.sh` → **gcp**). It: -```bash -aws cloudformation deploy \ - --template-file infra/aws/template.bootstrap.yaml \ - --stack-name syncbot-bootstrap \ - --parameter-overrides \ - GitHubRepository=YOUR_GITHUB_OWNER/YOUR_REPO \ - --capabilities CAPABILITY_NAMED_IAM \ - --region us-east-2 -``` +1. Verifies **Terraform**, **gcloud**, **python3**, **curl**; optional **gh** handling (same as AWS). +2. Guides **auth** (`gcloud auth application-default login` / quota project as needed). +3. **Terraform** — `init` / `plan` / `apply` in `infra/gcp` with prompts for project, stage, image, DB mode, Slack secrets, etc.; can detect existing Cloud Run / Cloud SQL for defaults. +4. **Post-deploy** — Manifest generation, optional Slack API, deploy receipt, optional **`gh`** for GitHub. -Replace `YOUR_GITHUB_OWNER/YOUR_REPO` with your repo. Optionally set `CreateOIDCProvider=false` if the account already has the GitHub OIDC provider. The bootstrap template only accepts `GitHubRepository`, `CreateOIDCProvider`, and `DeploymentBucketPrefix` (database options go in the main app deploy, not bootstrap). +See [infra/gcp/README.md](../infra/gcp/README.md) for Terraform variables and outputs. -**Capture outputs:** +--- -```bash -./infra/aws/scripts/print-bootstrap-outputs.sh -``` +## Fork-First model (recommended for forks) -You need: **GitHubDeployRoleArn** → `AWS_ROLE_TO_ASSUME`, **DeploymentBucketName** → `AWS_S3_BUCKET`, **BootstrapRegion** → `AWS_REGION`, and suggested stack names for test/prod. +1. Keep `syncbot/` provider-neutral; use only env vars from [INFRA_CONTRACT.md](INFRA_CONTRACT.md). +2. Put provider code in `infra//` and `.github/workflows/deploy-.yml`. +3. Prefer the AWS layout as reference; treat other providers as swappable scaffolds. --- -### Fast path: interactive AWS deploy script +## Provider selection (CI) -For local, end-to-end deploys (bootstrap + build + deploy), use: +| Provider | Infra | CI workflow | Default | +|----------|-------|-------------|---------| +| **AWS** | `infra/aws/` | `.github/workflows/deploy-aws.yml` | Yes | +| **GCP** | `infra/gcp/` | `.github/workflows/deploy-gcp.yml` | Opt-in | -```bash -./infra/aws/scripts/deploy.sh -``` +- **AWS only:** Do not set `DEPLOY_TARGET=gcp` (or set it to something other than `gcp`). +- **GCP only:** Set repository variable **`DEPLOY_TARGET`** = **`gcp`**, complete GCP bootstrap + WIF, and disable or skip the AWS workflow so only `deploy-gcp.yml` runs. -The script: -- prompts for stage (`test`/`prod`) and DB mode (new RDS vs existing host), -- defaults to **PostgreSQL** (`DatabaseEngine=postgresql`); optional advanced prompt for **legacy MySQL** (`DatabaseEngine=mysql`), -- prompts for required secrets/credentials, -- auto-detects bootstrap outputs (region, deploy bucket, suggested stack names) when available, -- supports existing-RDS `public` or `private` network mode (with VPC subnet/security-group prompts for private mode), -- supports disaster recovery with `TokenEncryptionKeyOverride`, -- runs `sam build` and `sam deploy` for you. +--- -If bootstrap is missing, it can deploy bootstrap first. +## Database backends ---- +The app supports **MySQL** (default), **PostgreSQL**, and **SQLite**. **Pre-release:** DB flow targets **fresh installs**; schema is applied at startup via Alembic. -### Fork and Deploy (AWS, GitHub CI) +- **AWS:** Choose engine in the deploy script or pass `DatabaseEngine=mysql` / `postgresql` to `sam deploy`. +- **Contract:** [INFRA_CONTRACT.md](INFRA_CONTRACT.md) — `DATABASE_BACKEND`, `DATABASE_URL` or host/user/password/schema. -1. Complete [One-Time Bootstrap (AWS)](#one-time-bootstrap-aws-both-paths). -2. **First app deploy** (with credentials that can create RDS/VPC/Lambda/API Gateway): +--- - ```bash - sam build -t infra/aws/template.yaml --use-container - sam deploy --guided \ - --template-file infra/aws/template.yaml \ - --stack-name syncbot-test \ - --s3-bucket YOUR_DEPLOYMENT_BUCKET_NAME \ - --capabilities CAPABILITY_IAM \ - --region us-east-2 - ``` +## AWS — manual steps (no helper script) - Use the bootstrap **DeploymentBucketName**. Set parameters (Stage, DB, Slack, etc.) when prompted. +Use this when you already know SAM/CloudFormation or are debugging. -3. **GitHub:** Create environments `test` and `prod`. In **Settings → Secrets and variables → Actions**, set **Variables** (per env): `AWS_ROLE_TO_ASSUME`, `AWS_REGION`, `AWS_S3_BUCKET`, `AWS_STACK_NAME`, `STAGE_NAME`, `SLACK_CLIENT_ID` (Slack app Client ID from Basic Information → App Credentials), `EXISTING_DATABASE_HOST`, `EXISTING_DATABASE_ADMIN_USER` (when using existing RDS host), `DATABASE_USER` (when creating new RDS), `DATABASE_SCHEMA`. Set **Secrets**: `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET`, `EXISTING_DATABASE_ADMIN_PASSWORD` (when using existing host), `DATABASE_PASSWORD` (when creating new RDS). No access keys — the workflow uses OIDC. The SAM template defaults **`DatabaseEngine=postgresql`** (Aurora DSQL / RDS PostgreSQL). To deploy **legacy MySQL** from CI, extend the workflow `parameter-overrides` to include `DatabaseEngine=mysql` (or add a repository variable and wire it through). -4. Push to `test` or `prod` to build and deploy. The workflow file is `.github/workflows/deploy-aws.yml` (runs when `DEPLOY_TARGET` is not `gcp`). - - The AWS workflow runs `pip-audit` against `syncbot/requirements.txt` and `infra/aws/db_setup/requirements.txt`, so dependency pins should be kept current. +### 1. One-time bootstrap -When dependency constraints change in `pyproject.toml`, refresh both lock and deployment requirements: +**Prerequisites:** AWS CLI, SAM CLI (for later app deploy). ```bash -poetry lock -poetry export --only main --format requirements.txt --without-hashes --output syncbot/requirements.txt +aws cloudformation deploy \ + --template-file infra/aws/template.bootstrap.yaml \ + --stack-name syncbot-bootstrap \ + --parameter-overrides \ + GitHubRepository=YOUR_GITHUB_OWNER/YOUR_REPO \ + --capabilities CAPABILITY_NAMED_IAM \ + --region us-east-2 ``` -**Important (token encryption key):** Non-local deploys require a secure `TOKEN_ENCRYPTION_KEY`. The AWS app stack **auto-generates** it in Secrets Manager by default. You must **back up the generated key** after first deploy; if it is lost, existing workspaces must reinstall to re-authorize bot tokens. For local development you may set the key manually in `.env` or leave it unset. - -#### Using an existing RDS host (AWS) - -To **reuse only the DB host** and have the deploy create the schema and a dedicated app user (and generated password) for you: - -1. **What the stack does:** - When you set **ExistingDatabaseHost**, the template skips creating VPC, subnets, and RDS. A custom resource runs during deploy: it connects to your existing MySQL with a **bootstrap** (master) user you provide, creates the schema, creates an app user `syncbot_` with a **generated** password (stored in Secrets Manager), and grants that user full access to the schema. The app Lambda then uses that app user and generated password. You never manage the app DB password. - -2. **What you provide:** - - **Host:** The RDS endpoint (e.g. `mydb.xxxx.us-east-2.rds.amazonaws.com`). No `:3306` or path. - - **Admin user & password:** A MySQL user that can create databases and users (e.g. RDS master). Used only by the deploy step; the app uses a separate `syncbot_` user. - - **Schema name:** A dedicated schema per app or environment (e.g. `syncbot_test`, `syncbot_prod`). The deploy creates this schema and the app user with full access to it; the app runs Alembic migrations on startup. - -3. **Connectivity:** - Existing-host deploys support two modes: - - **public** (default): Lambda is not VPC-attached; existing RDS must be publicly reachable on 3306. - - **private**: Lambda is VPC-attached using `ExistingDatabaseSubnetIdsCsv` and `ExistingDatabaseLambdaSecurityGroupId`. - - For private mode, ensure: - - the Lambda security group can reach the DB on TCP 3306, and - - the app Lambda has outbound internet egress (typically NAT) so Slack API calls succeed. - -4. **First deploy (local `sam deploy`):** - Pass the **existing-host** parameters (admin user/password). When using **guided** mode, SAM may still prompt for **DatabaseUser** and **DatabasePassword**; the stack ignores these when using an existing host (app user/password are auto-generated). If a prompt repeats, provide any placeholder and continue. For non-guided deploys, pass only the existing-host parameters you actually use: - ```bash - sam deploy --guided ... \ - --parameter-overrides \ - ExistingDatabaseHost=your-db.xxxx.us-east-2.rds.amazonaws.com \ - ExistingDatabaseAdminUser=admin \ - ExistingDatabaseAdminPassword=your_master_password \ - DatabaseSchema=syncbot_test \ - SlackClientID=your_slack_app_client_id \ - SlackSigningSecret=... \ - SlackClientSecret=... - ``` - Omit **ExistingDatabaseHost** (or leave it empty) to have the stack create a new RDS instance; then you must pass **DatabaseUser** and **DatabasePassword** for the new RDS master. - -5. **GitHub Actions:** - For **existing host** (deploy creates schema and app user), set **Variables**: - - **EXISTING_DATABASE_HOST** — Full RDS hostname. Leave **empty** to create a new RDS instead. - - **EXISTING_DATABASE_ADMIN_USER** — MySQL user that can create DBs/users (e.g. master). - - **DATABASE_SCHEMA** — Schema name (e.g. `syncbot_test` or `syncbot_prod`). - Set **Secrets**: - - **EXISTING_DATABASE_ADMIN_PASSWORD** — Password for the admin user. - For **new RDS** (stack creates the instance), set **DATABASE_USER**, **DATABASE_SCHEMA**, and secret **DATABASE_PASSWORD** instead, and leave **EXISTING_DATABASE_HOST** empty. The workflow passes all of these; the template uses the right set based on whether **EXISTING_DATABASE_HOST** is set. - -**Disaster recovery:** if you must rebuild and keep existing encrypted tokens working, deploy with the old key: +Optional: `CreateOIDCProvider=false` if the GitHub OIDC provider already exists. + +**Outputs:** ```bash -sam deploy ... --parameter-overrides "... TokenEncryptionKeyOverride=" +./infra/aws/scripts/print-bootstrap-outputs.sh ``` -If using GitHub Actions, set optional secret `TOKEN_ENCRYPTION_KEY_OVERRIDE`; the AWS workflow will pass it automatically. +Map **GitHubDeployRoleArn** → `AWS_ROLE_TO_ASSUME`, **DeploymentBucketName** → `AWS_S3_BUCKET`, **BootstrapRegion** → `AWS_REGION`. -If a previous failed deploy already created `syncbot--token-encryption-key`, you can also reuse that secret directly (instead of creating a new one) by passing: +### 2. Build and deploy the app stack ```bash -sam deploy ... --parameter-overrides "... ExistingTokenEncryptionKeySecretArn=" +sam build -t infra/aws/template.yaml --use-container +sam deploy \ + -t .aws-sam/build/template.yaml \ + --stack-name syncbot-test \ + --s3-bucket YOUR_DEPLOYMENT_BUCKET_NAME \ + --capabilities CAPABILITY_IAM \ + --region us-east-2 \ + --parameter-overrides \ + Stage=test \ + SlackSigningSecret=... \ + SlackClientID=... \ + SlackClientSecret=... \ + SlackOauthBotScopes=... \ + SlackOauthUserScopes=... \ + DatabaseEngine=mysql \ + ... ``` ---- +Use **`sam deploy --guided`** the first time if you prefer prompts. For **existing RDS**, set `ExistingDatabaseHost`, `ExistingDatabaseAdminUser`, `ExistingDatabaseAdminPassword`, and for **private** DBs also `ExistingDatabaseNetworkMode=private`, `ExistingDatabaseSubnetIdsCsv`, `ExistingDatabaseLambdaSecurityGroupId`. Omit `ExistingDatabaseHost` to create a **new** RDS in the stack. -### Download and Deploy (AWS, local) +**samconfig:** Predefined profiles in `samconfig.toml` (`test-new-rds`, `test-existing-rds`, etc.) — adjust placeholders before use. -1. Run [One-Time Bootstrap (AWS)](#one-time-bootstrap-aws-both-paths) and the [first app deploy](#fork-and-deploy-aws-github-ci) once with admin (or equivalent) credentials. -2. **Future deploys** with limited credentials: assume the bootstrap deploy role (recommended): +**Token key:** The stack can auto-generate `TOKEN_ENCRYPTION_KEY` in Secrets Manager. Back it up after first deploy. Optional: `TokenEncryptionKeyOverride`, `ExistingTokenEncryptionKeySecretArn` for recovery. - ```bash - export AWS_PROFILE=syncbot-deploy # profile with role_arn = GitHubDeployRoleArn - sam build -t infra/aws/template.yaml --use-container - sam deploy \ - -t .aws-sam/build/template.yaml \ - --stack-name syncbot-test \ - --s3-bucket YOUR_DEPLOYMENT_BUCKET_NAME \ - --capabilities CAPABILITY_IAM \ - --region us-east-2 - ``` +### 3. GitHub Actions (AWS) - Or use a dedicated IAM user with the same policy. See [Deployment Guide (legacy detail)](#sharing-infrastructure-across-apps-aws) for shared RDS and parameter overrides. +Workflow: `.github/workflows/deploy-aws.yml` (runs on push to `test`/`prod` when not using GCP). -#### Optional: `samconfig` deploy profiles +Configure **repository** variables: `AWS_ROLE_TO_ASSUME`, `AWS_S3_BUCKET`, `AWS_REGION`. -This repo includes pre-defined SAM config environments in `samconfig.toml` to reduce guided prompts: +Configure **per-environment** (`test` / `prod`) variables and secrets so they match your stack — especially if you use **existing RDS** or **private** networking: -- `test-new-rds` — test stack, creates new RDS -- `test-existing-rds` — test stack, uses existing RDS host/admin credentials -- `prod-new-rds` — prod stack, creates new RDS -- `prod-existing-rds` — prod stack, uses existing RDS host/admin credentials +| Type | Name | Notes | +|------|------|--------| +| Var | `AWS_STACK_NAME` | CloudFormation stack name | +| Var | `STAGE_NAME` | `test` or `prod` | +| Var | `DATABASE_SCHEMA` | e.g. `syncbot_test` | +| Var | `LOG_LEVEL` | Optional. `DEBUG`, `INFO`, `WARNING`, `ERROR`, or `CRITICAL`. Passed to SAM as `LogLevel`; defaults to `INFO` in the workflow when unset. | +| Var | `SLACK_CLIENT_ID` | From Slack app | +| Var | `DATABASE_ENGINE` | `mysql` or `postgresql` (workflow defaults to `mysql` if unset) | +| Var | `EXISTING_DATABASE_HOST` | Empty for **new** RDS in stack | +| Var | `EXISTING_DATABASE_ADMIN_USER` | When using existing host | +| Var | `EXISTING_DATABASE_NETWORK_MODE` | `public` or `private` | +| Var | `EXISTING_DATABASE_SUBNET_IDS_CSV` | **Private** mode: comma-separated subnet IDs (no spaces) | +| Var | `EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID` | **Private** mode: Lambda ENI security group | +| Secret | `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET` | | +| Secret | `EXISTING_DATABASE_ADMIN_PASSWORD` | When `EXISTING_DATABASE_HOST` is set | +| Secret | `TOKEN_ENCRYPTION_KEY_OVERRIDE` | Optional DR only | -Examples: +The interactive deploy script can set these via `gh` when you opt in. Re-run that step after changing DB mode or engine so CI stays aligned. -```bash -sam build --config-env test-new-rds -sam deploy --config-env test-new-rds +**Dependency hygiene:** The workflow runs `pip-audit` on `syncbot/requirements.txt` and `infra/aws/db_setup/requirements.txt`. After changing `pyproject.toml`: -sam deploy --config-env test-existing-rds -sam deploy --config-env prod-existing-rds +```bash +poetry lock +poetry export --only main --format requirements.txt --without-hashes --output syncbot/requirements.txt ``` -For the `*-existing-rds` profiles, replace `REPLACE_ME_*` placeholders in `samconfig.toml` before deploy. -For disaster recovery (preserve token decryption), add `TokenEncryptionKeyOverride=` to that profile's `parameter_overrides`. +### 4. Ongoing local deploys (least privilege) + +Assume the bootstrap **GitHubDeployRole** (or equivalent) and run `sam build` / `sam deploy` as in step 2. --- -## GCP +## GCP — manual steps -### One-Time Bootstrap (GCP, both paths) +### 1. Terraform bootstrap -From the project root (or `infra/gcp`): +From `infra/gcp` (or repo root with paths adjusted): ```bash -cd infra/gcp terraform init terraform plan -var="project_id=YOUR_PROJECT_ID" -var="stage=test" terraform apply -var="project_id=YOUR_PROJECT_ID" -var="stage=test" ``` -Set Secret Manager secret values for Slack and DB (see [infra/gcp/README.md](../infra/gcp/README.md)). `TOKEN_ENCRYPTION_KEY` is auto-generated once and stored in Secret Manager during apply. Set **cloud_run_image** after building and pushing your container image. Capture outputs for CI: **service_url**, **region**, **project_id**, **artifact_registry_repository**, **deploy_service_account_email**. - -**Disaster recovery:** if rebuilding and you need to preserve existing token decryption, re-apply with: +Set Secret Manager values for Slack/DB as in [infra/gcp/README.md](../infra/gcp/README.md). Set **`cloud_run_image`** after building and pushing the container. Capture outputs: service URL, region, project, Artifact Registry, deploy service account. ```bash -terraform apply -var="project_id=YOUR_PROJECT_ID" -var="stage=test" -var='token_encryption_key_override=' +./infra/gcp/scripts/print-bootstrap-outputs.sh ``` -Helper script for GitHub vars: +**DR:** Optional `token_encryption_key_override` if you must preserve existing encrypted tokens. -```bash -./infra/gcp/scripts/print-bootstrap-outputs.sh -``` +### 2. GitHub Actions (GCP) ---- +1. Configure [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation) for GitHub → deploy service account. +2. Set **`DEPLOY_TARGET=gcp`** at repo level so `deploy-gcp.yml` runs and `deploy-aws.yml` is skipped. +3. Set variables: `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_WORKLOAD_IDENTITY_PROVIDER`, `GCP_SERVICE_ACCOUNT`, etc. + +**Note:** `.github/workflows/deploy-gcp.yml` may still contain **placeholder** steps in upstream; replace with real **build + push + Cloud Run deploy** (or `terraform apply` with a new image tag) in your fork. The guided `infra/gcp/scripts/deploy.sh` is the source of truth for an interactive path. -### Fork and Deploy (GCP, GitHub CI) +### 3. Ongoing deploys -1. Complete [One-Time Bootstrap (GCP)](#one-time-bootstrap-gcp-both-paths). -2. Configure [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation) for GitHub so the repo can impersonate the deploy service account without a key file. -3. In GitHub: set **Variables** (e.g. `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_WORKLOAD_IDENTITY_PROVIDER`, `GCP_SERVICE_ACCOUNT`). Set **DEPLOY_TARGET** = **gcp** so `.github/workflows/deploy-gcp.yml` runs and `deploy-aws.yml` is skipped. -4. Replace the placeholder steps in `deploy-gcp.yml` with real build (e.g. Cloud Build or Docker push to Artifact Registry) and `gcloud run deploy` (or Terraform apply). See `deploy-gcp.yml` comments and [infra/gcp/README.md](../infra/gcp/README.md). -5. Keep those changes inside your fork's infra/workflow files so future upstream rebases remain straightforward. +Build and push an image to Artifact Registry, then `gcloud run deploy` or `terraform apply` with updated `cloud_run_image`. --- -### Download and Deploy (GCP, local) +## Using an existing RDS host (AWS) -1. Run [One-Time Bootstrap (GCP)](#one-time-bootstrap-gcp-both-paths). -2. Build and push the container image to the Terraform output **artifact_registry_repository**, then update the Cloud Run service: +When **ExistingDatabaseHost** is set, the template **does not** create VPC/RDS; a custom resource creates the schema and `syncbot_user_` with a generated app password in Secrets Manager. - ```bash - gcloud run deploy syncbot-test --image=REGION-docker.pkg.dev/PROJECT/REPO/syncbot:latest --region=REGION - ``` +- **Public:** Lambda is not in your VPC; RDS must be reachable on the Internet on port **3306** or **5432**. +- **Private:** Lambda uses `ExistingDatabaseSubnetIdsCsv` and `ExistingDatabaseLambdaSecurityGroupId`; DB security group must allow the Lambda SG; subnets need **NAT** egress for Slack API calls. - Or run `terraform apply` with an updated `cloud_run_image` variable. +See also [Sharing infrastructure across apps](#sharing-infrastructure-across-apps-aws) below. --- ## Swapping providers -To switch from AWS to GCP (or the other way) in a fork: - -1. **Keep app code and [INFRA_CONTRACT.md](INFRA_CONTRACT.md) unchanged.** Only infra and CI are provider-specific. -2. **Disable the old provider:** Remove or disable the workflow for the provider you are leaving (e.g. delete or disable `deploy-aws.yml` when moving to GCP). If using the same repo, set `DEPLOY_TARGET` accordingly. -3. **Use the new provider folder:** Run bootstrap for the new provider (`infra/aws/` or `infra/gcp/`) and configure GitHub vars/secrets (and WIF for GCP) as in the sections above. -4. **Point Slack** at the new **service_url** (and run DB migrations or attach an existing DB as required by the contract). - -No changes are needed under `syncbot/` or to the deployment contract; only `infra//` and the chosen workflow change. +1. Keep [INFRA_CONTRACT.md](INFRA_CONTRACT.md) satisfied. +2. Disable the old provider’s workflow; set `DEPLOY_TARGET` if using GCP. +3. Bootstrap the new provider; reconfigure GitHub and Slack URLs. --- ## Helper scripts -| Provider | Script | Use | -|----------|--------|-----| -| AWS | `./infra/aws/scripts/print-bootstrap-outputs.sh` | Print bootstrap stack outputs and suggested GitHub variables (run from repo root). | -| AWS | `./infra/aws/scripts/deploy.sh` | Interactive local deploy helper (optional bootstrap, build, deploy, existing/new RDS prompts). | -| GCP | `./infra/gcp/scripts/print-bootstrap-outputs.sh` | Print Terraform outputs and suggested GitHub variables (run from repo root). | +| Script | Purpose | +|--------|---------| +| `infra/aws/scripts/print-bootstrap-outputs.sh` | Bootstrap stack outputs → suggested GitHub vars | +| `infra/aws/scripts/deploy.sh` | Interactive AWS deploy (see [What the deploy scripts do](#what-the-deploy-scripts-do)) | +| `infra/gcp/scripts/print-bootstrap-outputs.sh` | Terraform outputs → suggested GitHub vars | +| `infra/gcp/scripts/deploy.sh` | Interactive GCP deploy | --- ## Security summary -- **Bootstrap** runs once with elevated credentials; it creates a deploy identity (IAM role or GCP service account) and artifact storage (S3 bucket or Artifact Registry). -- **GitHub** uses short-lived federation only: **AWS** OIDC with `AWS_ROLE_TO_ASSUME`; **GCP** Workload Identity Federation with a deploy service account. No long-lived API keys in secrets for deploy. -- **Local** future deploys use assume-role (AWS) or the same deploy service account (GCP) with limited scope. -- **Prod** can be protected with GitHub environment **Required reviewers**. +- **Bootstrap** runs once with elevated credentials; creates deploy identity + artifact storage. +- **GitHub:** Short-lived **AWS OIDC** or **GCP WIF** — no long-lived cloud API keys in repos for deploy. +- **Prod:** Use GitHub environment protection rules as needed. --- -## Database schema (Alembic, fresh install only) - -Schema is managed by **Alembic** (see `db/alembic/`). On startup the app runs **`alembic upgrade head`** only (pre-release: fresh installs only; no stamping of existing DBs). +## Database schema (Alembic) -- **Fresh installs:** A new database (MySQL or SQLite) gets all tables from the baseline migration at first run. -- **Rollback:** If bootstrap fails, fix the migration issue, reset the DB file/schema, and rerun; no manual downgrade is required for the baseline. +Schema lives under `syncbot/db/alembic/`. On startup the app runs **`alembic upgrade head`** (pre-release: fresh installs). --- ## Sharing infrastructure across apps (AWS) -To use an existing RDS instance instead of creating one per stack, see **[Using an existing RDS host (AWS)](#using-an-existing-rds-host-aws)**. Set **ExistingDatabaseHost** and use a **different DatabaseSchema** per app or environment (e.g. `syncbot_test`, `syncbot_prod`). API Gateway and Lambda are per stack; free-tier quotas are account-wide. +Reuse one RDS with **different `DatabaseSchema`** per app/environment; set **ExistingDatabaseHost** and distinct schemas. API Gateway and Lambda remain per stack. diff --git a/docs/IMPROVEMENTS.md b/docs/IMPROVEMENTS.md index d9802a7..c894242 100644 --- a/docs/IMPROVEMENTS.md +++ b/docs/IMPROVEMENTS.md @@ -103,7 +103,7 @@ This document outlines the improvements made to the SyncBot application and addi ### 15. Infrastructure as Code - **AWS SAM template** (`infra/aws/template.yaml`) defining VPC, RDS, Lambda, API Gateway (SAM artifact S3 used for deploy packaging only) -- **Free-tier optimized** (128 MB Lambda, db.t3.micro RDS, gp2 storage, no NAT Gateway) +- **Free-tier optimized** (128 MB Lambda, db.t4g.micro RDS, gp2 storage, no NAT Gateway) - **CI/CD pipeline** (`.github/workflows/sam-pipeline.yml`) for automated build/deploy - **SAM config** (`samconfig.toml`) for staging and production environments @@ -455,11 +455,11 @@ This document outlines the improvements made to the SyncBot application and addi - GCP Terraform variable: `token_encryption_key_override` - **Admin/operator warning surface** — deploy helper scripts and deployment docs now explicitly warn that losing the token key requires workspace reinstall/re-authorization. -### 49. PostgreSQL / Aurora DSQL Parallel Backend (Completed) -- **Runtime** — Added `DATABASE_BACKEND=postgresql` (default), `psycopg2` + `postgresql+psycopg2://` URLs, `DATABASE_PORT` (default 5432), PostgreSQL-safe `CREATE DATABASE`, table drop/reset, and TLS via `sslmode`/`sslrootcert`. -- **AWS SAM** — `DatabaseEngine` parameter (`postgresql` default, `mysql` legacy); split `RDSInstanceMysql` / `RDSInstancePostgres`; Lambda env sets `DATABASE_BACKEND`, `DATABASE_PORT`, and `DATABASE_HOST` accordingly. +### 49. PostgreSQL Parallel Backend (Completed) +- **Runtime** — Added `DATABASE_BACKEND=postgresql`, `psycopg2` + `postgresql+psycopg2://` URLs, `DATABASE_PORT` (default 5432), PostgreSQL-safe `CREATE DATABASE`, table drop/reset, and TLS via `sslmode`/`sslrootcert`. +- **AWS SAM** — `DatabaseEngine` parameter (MySQL default, PostgreSQL supported); split `RDSInstanceMysql` / `RDSInstancePostgres`; Lambda env sets `DATABASE_BACKEND`, `DATABASE_PORT`, and `DATABASE_HOST` accordingly. - **Custom resource** — `infra/aws/db_setup/handler.py` branches on `DatabaseEngine` for MySQL vs PostgreSQL user/database creation. -- **Deploy UX** — `./infra/aws/scripts/deploy.sh` defaults to PostgreSQL; **Advanced: legacy MySQL** toggles `DatabaseEngine=mysql`. `samconfig.toml` profiles pass `DatabaseEngine=postgresql` where applicable. +- **Deploy UX** — `./infra/aws/scripts/deploy.sh` supports both engines with a numbered choice prompt. - **Docs/tests** — `INFRA_CONTRACT.md`, `DEPLOYMENT.md`, `README.md`, `.env.example` updated; `tests/conftest.py` defaults tests to `mysql` for compatibility; added PostgreSQL pool/required-vars tests and `tests/test_db_setup.py`. ## Remaining Recommendations diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index 6965ec0..ca77f0a 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -2,6 +2,8 @@ This document defines what any infrastructure provider (AWS, GCP, Azure, etc.) must supply so SyncBot runs correctly. Forks can swap provider-specific IaC in `infra//` as long as they satisfy this contract. +**Deploy entrypoint:** From the repo root, `./deploy.sh` (macOS/Linux, or Git Bash/WSL bash) or `.\deploy.ps1` (Windows PowerShell — finds Git Bash or WSL, then bash) runs an interactive helper that delegates to `infra//scripts/deploy.sh`. That flow sets Cloud/Terraform resources and runtime env vars consistent with this document. Step-by-step and manual alternatives: [DEPLOYMENT.md](DEPLOYMENT.md). + **Pre-release:** This repo is pre-release. Database rollout assumes **fresh installs only** (no legacy schema migration or stamping). New databases are initialized via Alembic `upgrade head` at startup. ## Runtime Environment Variables @@ -27,7 +29,7 @@ poetry export --only main --format requirements.txt --without-hashes --output sy | Variable | Description | |----------|-------------| -| `DATABASE_BACKEND` | `postgresql` (default), `mysql`, or `sqlite`. | +| `DATABASE_BACKEND` | `mysql` (default), `postgresql`, or `sqlite`. | | `DATABASE_URL` | Full SQLAlchemy URL. When set, overrides host/user/password/schema. **Required for SQLite** (e.g. `sqlite:///path/to/syncbot.db`). For `mysql` / `postgresql`, optional if unset (legacy vars below are used). | | `DATABASE_HOST` | Database hostname (IP or FQDN). Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | | `DATABASE_PORT` | Optional. Defaults to **5432** for `postgresql`, **3306** for `mysql`. | @@ -39,20 +41,23 @@ poetry export --only main --format requirements.txt --without-hashes --output sy **SQLite (forks / local):** Set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:///path/to/file.db`. Single-writer; suitable for small teams and dev. -**PostgreSQL / Aurora DSQL (default):** Set `DATABASE_BACKEND=postgresql` (or rely on the default) and either `DATABASE_URL` (`postgresql+psycopg2://...`) or `DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA`. The AWS SAM template parameter `DatabaseEngine=postgresql` matches this backend. +**MySQL (default):** Set `DATABASE_BACKEND=mysql` (or rely on the default) and either `DATABASE_URL` (`mysql+pymysql://...`) or the four host/user/password/schema vars. The AWS SAM template parameter `DatabaseEngine=mysql` (default) matches this backend. -**MySQL (legacy):** Set `DATABASE_BACKEND=mysql` and either `DATABASE_URL` (`mysql+pymysql://...`) or the four host/user/password/schema vars. Deploy-time bootstrap credentials (e.g. `ExistingDatabaseAdmin*` in AWS) are used only for one-time setup; the app reads `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA` at runtime. +**PostgreSQL:** Set `DATABASE_BACKEND=postgresql` and either `DATABASE_URL` (`postgresql+psycopg2://...`) or `DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA`. Deploy-time bootstrap credentials (e.g. `ExistingDatabaseAdmin*` in AWS) are used only for one-time setup; the app reads `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA` at runtime. ### Required in production (non–local) | Variable | Description | |----------|-------------| | `SLACK_SIGNING_SECRET` | Slack request verification (Basic Information → App Credentials). | -| `ENV_SLACK_CLIENT_ID` | Slack OAuth client ID. | -| `ENV_SLACK_CLIENT_SECRET` | Slack OAuth client secret. | -| `ENV_SLACK_SCOPES` | Comma-separated OAuth scopes (see `.env.example`). | +| `SLACK_CLIENT_ID` | Slack OAuth client ID. | +| `SLACK_CLIENT_SECRET` | Slack OAuth client secret. | +| `SLACK_BOT_SCOPES` | Comma-separated OAuth **bot** scopes. Must match `slack-manifest.json` `oauth_config.scopes.bot` and `syncbot/slack_manifest_scopes.py` `BOT_SCOPES`. | +| `SLACK_USER_SCOPES` | Comma-separated OAuth **user** scopes. Must match `oauth_config.scopes.user` and `syncbot/slack_manifest_scopes.py` `USER_SCOPES`. If this env requests scopes that are not declared on the Slack app, install fails with `invalid_scope`. | | `TOKEN_ENCRYPTION_KEY` | **Required** in production; must be a strong, random value (e.g. 16+ characters). Providers may auto-generate it (e.g. AWS Secrets Manager). Back up the key after first deploy. In local dev you may set it manually or leave unset. | +**Reference wiring:** AWS SAM (`infra/aws/template.yaml`) uses CloudFormation parameters **`SlackOauthBotScopes`** and **`SlackOauthUserScopes`** (defaults match `BOT_SCOPES` / `USER_SCOPES`) to populate **`SLACK_BOT_SCOPES`** and **`SLACK_USER_SCOPES`**, and **`LogLevel`** (default `INFO`) → **`LOG_LEVEL`**. GCP Terraform uses **`secret_slack_bot_scopes`** (Secret Manager → `SLACK_BOT_SCOPES`; you set the secret **value** to the same comma-separated bot list) and **`slack_user_scopes`** (plain env → `SLACK_USER_SCOPES`, default matches SAM); **`log_level`** (default `INFO`) sets **`LOG_LEVEL`** on Cloud Run; see [infra/gcp/README.md](../infra/gcp/README.md). + ### Optional | Variable | Description | @@ -115,6 +120,7 @@ To use a different cloud or IaC stack: 1. Keep `syncbot/` and app behavior unchanged. 2. Add or replace contents of `infra//` with templates/scripts that satisfy the contract above. + - To integrate with the repo-level launcher (`./deploy.sh` and `.\deploy.ps1`), provide `infra//scripts/deploy.sh` only. On Windows, `deploy.ps1` invokes that bash script via Git Bash or WSL; do not add a separate `deploy.ps1` under `infra/`. 3. Point CI (e.g. `.github/workflows/deploy-.yml`) at the new infra paths and provider-specific auth (OIDC, WIF, etc.). 4. Update [DEPLOYMENT.md](DEPLOYMENT.md) (or provider-specific README under `infra//`) with bootstrap and deploy steps that emit the bootstrap output contract. diff --git a/docs/USER_GUIDE.md b/docs/USER_GUIDE.md index 7634d94..d6f595f 100644 --- a/docs/USER_GUIDE.md +++ b/docs/USER_GUIDE.md @@ -1,5 +1,7 @@ # SyncBot User Guide +This guide is for **workspace admins and end users** configuring SyncBot in Slack. For **installing or hosting** the app (AWS, GCP, Docker, GitHub Actions), see **[DEPLOYMENT.md](DEPLOYMENT.md)** and the root **[README](../README.md)**. + ## Getting Started 1. Click the install link from a desktop browser (make sure you've selected the correct workspace in the upper right) diff --git a/infra/aws/db_setup/handler.py b/infra/aws/db_setup/handler.py index c0d078a..f72a328 100644 --- a/infra/aws/db_setup/handler.py +++ b/infra/aws/db_setup/handler.py @@ -1,13 +1,16 @@ """ -Custom CloudFormation resource: create database and app user on an existing RDS host. +Custom CloudFormation resource: create database and app user for SyncBot. -Supports MySQL (port 3306) and PostgreSQL / Aurora DSQL (port 5432). Uses bootstrap -credentials to create the database and a dedicated app user; the app password is read -from the generated Secrets Manager secret. +Supports MySQL (port 3306) and PostgreSQL (port 5432). It can use: +- explicit admin password (existing-host mode), or +- admin password fetched from an admin secret ARN (new-RDS mode). """ import json import re +import base64 +import time +import socket import boto3 import psycopg2 @@ -15,6 +18,12 @@ from psycopg2 import sql as psql from pymysql.cursors import DictCursor +DB_CONNECT_TIMEOUT_SECONDS = 5 +DB_CONNECT_ATTEMPTS = 6 +DB_CONNECT_RETRY_SECONDS = 2 +POSTGRES_DB_CONNECT_ATTEMPTS = 5 +POSTGRES_DB_CONNECT_RETRY_SECONDS = 1 + # CloudFormation custom resource response helper (no cfnresponse in Lambda by default for Python 3) def send(event, context, status, data=None, reason=None, physical_resource_id=None): @@ -75,10 +84,11 @@ def _handler_impl(event, context): host = props.get("Host", "").strip() admin_user = (props.get("AdminUser") or "").strip() admin_password = props.get("AdminPassword") or "" + admin_secret_arn = (props.get("AdminSecretArn") or "").strip() schema = (props.get("Schema") or "syncbot").strip() stage = (props.get("Stage") or "test").strip() secret_arn = (props.get("SecretArn") or "").strip() - database_engine = (props.get("DatabaseEngine") or "postgresql").strip().lower() + database_engine = (props.get("DatabaseEngine") or "mysql").strip().lower() if request_type == "Delete": # Must return the same PhysicalResourceId as Create; never use a placeholder. @@ -86,23 +96,40 @@ def _handler_impl(event, context): send(event, context, "SUCCESS", {"Username": ""}, physical_resource_id=delete_pid) return - if not all([host, admin_user, admin_password, schema, stage, secret_arn]): + if not all([host, admin_user, schema, stage, secret_arn]): + send( + event, + context, + "FAILED", + reason="Missing Host, AdminUser, Schema, Stage, or SecretArn", + ) + return + if not admin_password and not admin_secret_arn: send( event, context, "FAILED", - reason="Missing Host, AdminUser, AdminPassword, Schema, Stage, or SecretArn", + reason="Missing admin credentials: set AdminPassword or AdminSecretArn", ) return - app_username = f"syncbot_{stage}".replace("-", "_") + app_username = f"syncbot_user_{stage}".replace("-", "_") try: - app_password = get_app_password(secret_arn) + app_password = get_secret_value(secret_arn) except Exception as e: send(event, context, "FAILED", reason=f"GetSecretValue failed: {e}") return + if not admin_password: + try: + # RDS-managed master-user secrets store JSON; extract the password field. + admin_password = get_secret_value(admin_secret_arn, json_key="password") + except Exception as e: + send(event, context, "FAILED", reason=f"Get admin secret failed: {e}") + return try: + # Fail fast on obvious network connectivity issues before opening DB client sessions. + _assert_tcp_reachable(host, 3306 if database_engine == "mysql" else 5432) if database_engine == "mysql": setup_database_mysql( host=host, @@ -129,10 +156,47 @@ def _handler_impl(event, context): return {"Username": app_username} -def get_app_password(secret_arn: str) -> str: +def _assert_tcp_reachable(host: str, port: int) -> None: + last_exc = None + for _attempt in range(1, DB_CONNECT_ATTEMPTS + 1): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(DB_CONNECT_TIMEOUT_SECONDS) + try: + sock.connect((host, port)) + return + except Exception as exc: + last_exc = exc + time.sleep(DB_CONNECT_RETRY_SECONDS) + finally: + sock.close() + raise RuntimeError( + f"Cannot reach {host}:{port} over TCP after {DB_CONNECT_ATTEMPTS} attempts: {last_exc}" + ) + + +def get_secret_value(secret_arn: str, json_key: str | None = None) -> str: client = boto3.client("secretsmanager") resp = client.get_secret_value(SecretId=secret_arn) - return (resp.get("SecretString") or "").strip() + secret_string = resp.get("SecretString") + if secret_string is None: + secret_binary = resp.get("SecretBinary") + if secret_binary is not None: + secret_string = base64.b64decode(secret_binary).decode("utf-8") + secret_string = (secret_string or "").strip() + if not secret_string: + raise ValueError(f"Secret {secret_arn} is empty") + + if json_key: + try: + payload = json.loads(secret_string) + except json.JSONDecodeError as exc: + raise ValueError(f"Secret {secret_arn} is not JSON; cannot read key '{json_key}'") from exc + value = (payload.get(json_key) or "").strip() if isinstance(payload, dict) else "" + if not value: + raise ValueError(f"Secret {secret_arn} missing key '{json_key}'") + return value + + return secret_string def setup_database_mysql( @@ -144,23 +208,37 @@ def setup_database_mysql( app_username: str, app_password: str, ) -> None: - conn = pymysql.connect( - host=host, - user=admin_user, - password=admin_password, - port=3306, - charset="utf8mb4", - cursorclass=DictCursor, - connect_timeout=15, - ) + safe_schema = _safe_ident(schema) + _safe_ident(app_username) + conn = None + last_exc = None + for _attempt in range(1, DB_CONNECT_ATTEMPTS + 1): + try: + conn = pymysql.connect( + host=host, + user=admin_user, + password=admin_password, + port=3306, + charset="utf8mb4", + cursorclass=DictCursor, + connect_timeout=DB_CONNECT_TIMEOUT_SECONDS, + ) + break + except Exception as exc: + last_exc = exc + time.sleep(DB_CONNECT_RETRY_SECONDS) + if conn is None: + raise RuntimeError( + f"MySQL connect failed after {DB_CONNECT_ATTEMPTS} attempts: {last_exc}" + ) try: with conn.cursor() as cur: - cur.execute(f"CREATE DATABASE IF NOT EXISTS `{schema}`") + cur.execute(f"CREATE DATABASE IF NOT EXISTS `{safe_schema}`") cur.execute( "CREATE USER IF NOT EXISTS %s@'%%' IDENTIFIED BY %s", (app_username, app_password), ) - cur.execute(f"GRANT ALL PRIVILEGES ON `{schema}`.* TO %s@'%%'", (app_username,)) + cur.execute(f"GRANT ALL PRIVILEGES ON `{safe_schema}`.* TO %s@'%%'", (app_username,)) cur.execute("FLUSH PRIVILEGES") conn.commit() finally: @@ -176,6 +254,8 @@ def setup_database_postgresql( app_username: str, app_password: str, ) -> None: + max_db_connect_attempts = POSTGRES_DB_CONNECT_ATTEMPTS + db_connect_retry_seconds = POSTGRES_DB_CONNECT_RETRY_SECONDS _safe_ident(schema) _safe_ident(app_username) conn = psycopg2.connect( @@ -184,7 +264,7 @@ def setup_database_postgresql( password=admin_password, port=5432, dbname="postgres", - connect_timeout=15, + connect_timeout=DB_CONNECT_TIMEOUT_SECONDS, sslmode="require", ) conn.autocommit = True @@ -212,3 +292,42 @@ def setup_database_postgresql( ) finally: conn.close() + + # Ensure runtime role can connect and run migrations in the target DB. + # After CREATE DATABASE, RDS can take a short time before accepting connections. + last_exc = None + for _attempt in range(1, max_db_connect_attempts + 1): + try: + db_conn = psycopg2.connect( + host=host, + user=admin_user, + password=admin_password, + port=5432, + dbname=schema, + connect_timeout=DB_CONNECT_TIMEOUT_SECONDS, + sslmode="require", + ) + db_conn.autocommit = True + try: + with db_conn.cursor() as cur: + cur.execute( + psql.SQL("GRANT CONNECT, TEMP ON DATABASE {db} TO {user}").format( + db=psql.Identifier(schema), + user=psql.Identifier(app_username), + ) + ) + cur.execute( + psql.SQL("GRANT USAGE, CREATE ON SCHEMA public TO {user}").format( + user=psql.Identifier(app_username), + ) + ) + finally: + db_conn.close() + return + except Exception as exc: + last_exc = exc + time.sleep(db_connect_retry_seconds) + raise RuntimeError( + f"Failed connecting to newly created database '{schema}' after " + f"{max_db_connect_attempts} attempts: {last_exc}" + ) diff --git a/infra/aws/db_setup/requirements.txt b/infra/aws/db_setup/requirements.txt index f4d7528..297cdf7 100644 --- a/infra/aws/db_setup/requirements.txt +++ b/infra/aws/db_setup/requirements.txt @@ -1,2 +1,4 @@ pymysql==1.1.2 psycopg2-binary==2.9.11 +# Required for MySQL 8+ caching_sha2_password; pin for reproducible CI (pip-audit / sam build). +cryptography==44.0.3 diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index b7613bb..bb5bc70 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -4,6 +4,15 @@ # # Run from repo root: # ./infra/aws/scripts/deploy.sh +# +# Phases (main path, after functions are defined below): +# 1) Prerequisites: CLI checks, template paths +# 2) Bootstrap: CloudFormation bootstrap stack and S3 artifact bucket (if missing) +# 3) App stack: region, stage, target stack name; detect existing stack for update +# 4) Database: source mode (stack RDS vs external host), engine, schema, existing-DB networking +# 5) Slack: signing secret, client secret, client ID +# 6) Confirm deploy summary, SAM build + deploy +# 7) After deploy: stage manifest, optional Slack API update, optional GitHub vars, deploy receipt set -euo pipefail @@ -12,13 +21,20 @@ REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" BOOTSTRAP_TEMPLATE="$REPO_ROOT/infra/aws/template.bootstrap.yaml" APP_TEMPLATE="$REPO_ROOT/infra/aws/template.yaml" +SLACK_MANIFEST_GENERATED_PATH="" +APP_DB_PASSWORD_OVERRIDE="${APP_DB_PASSWORD_OVERRIDE:-}" +APP_DB_PASSWORD_REUSED_FROM_SECRET="" +SLACK_SIGNING_SECRET_SOURCE="" +SLACK_CLIENT_SECRET_SOURCE="" +EXISTING_DB_ADMIN_PASSWORD_SOURCE="" +# Populated before write_deploy_receipt: backup summary + markdown receipt (deploy-receipts/*.md). +RECEIPT_TOKEN_SECRET_ID="" +RECEIPT_TOKEN_SECRET_VALUE="" +RECEIPT_APP_DB_SECRET_NAME="" +RECEIPT_APP_DB_SECRET_VALUE="" -require_cmd() { - if ! command -v "$1" >/dev/null 2>&1; then - echo "Error: required command '$1' not found in PATH." >&2 - exit 1 - fi -} +# shellcheck source=/dev/null +source "$REPO_ROOT/deploy.sh" prompt_default() { local prompt="$1" @@ -40,6 +56,49 @@ prompt_secret() { echo "$value" } +prompt_required() { + local prompt="$1" + local value + while true; do + read -r -p "$prompt: " value + if [[ -n "$value" ]]; then + echo "$value" + return 0 + fi + echo "Error: $prompt is required." >&2 + done +} + +prompt_secret_required() { + local prompt="$1" + local value + while true; do + value="$(prompt_secret "$prompt")" + if [[ -n "$value" ]]; then + echo "$value" + return 0 + fi + echo "Error: $prompt is required." >&2 + done +} + +required_from_env_or_prompt() { + local env_name="$1" + local prompt="$2" + local mode="${3:-plain}" # plain|secret + local env_value="${!env_name:-}" + if [[ -n "$env_value" ]]; then + echo "Using $prompt from environment variable $env_name." >&2 + echo "$env_value" + return 0 + fi + if [[ "$mode" == "secret" ]]; then + prompt_secret_required "$prompt" + else + prompt_required "$prompt" + fi +} + prompt_yes_no() { local prompt="$1" local default="${2:-y}" @@ -53,6 +112,186 @@ prompt_yes_no() { [[ "$answer" =~ ^[Yy]$ ]] } +ensure_aws_authenticated() { + local profile active_profile sso_start_url sso_region + profile="${AWS_PROFILE:-}" + active_profile="$profile" + if [[ -z "$active_profile" ]]; then + active_profile="$(aws configure get profile 2>/dev/null || true)" + [[ -z "$active_profile" ]] && active_profile="default" + fi + + if aws sts get-caller-identity >/dev/null 2>&1; then + return 0 + fi + + sso_start_url="$(aws configure get sso_start_url --profile "$active_profile" 2>/dev/null || true)" + sso_region="$(aws configure get sso_region --profile "$active_profile" 2>/dev/null || true)" + + echo "AWS CLI is not authenticated." + if [[ -n "$sso_start_url" && -n "$sso_region" ]]; then + if prompt_yes_no "Run 'aws sso login --profile $active_profile' now?" "y"; then + aws sso login --profile "$active_profile" || true + fi + else + echo "No complete SSO config found for profile '$active_profile'." + # Prefer the user's default interactive AWS login flow when available. + if aws login help >/dev/null 2>&1; then + if prompt_yes_no "Run 'aws login' now?" "y"; then + aws login || true + fi + fi + + if ! aws sts get-caller-identity >/dev/null 2>&1; then + if prompt_yes_no "Run 'aws configure sso --profile $active_profile' now?" "n"; then + aws configure sso --profile "$active_profile" || true + if prompt_yes_no "Run 'aws sso login --profile $active_profile' now?" "y"; then + aws sso login --profile "$active_profile" || true + fi + else + echo "Tip: use 'aws configure' if you authenticate with access keys." + fi + fi + fi + + if ! aws sts get-caller-identity >/dev/null 2>&1; then + echo "Unable to authenticate AWS CLI." + echo "Run one of the following, then rerun deploy:" + echo " aws login" + echo " aws configure sso [--profile ]" + echo " aws sso login [--profile ]" + echo " aws configure" + exit 1 + fi +} + +ensure_gh_authenticated() { + if ! command -v gh >/dev/null 2>&1; then + prereqs_hint_gh_cli >&2 + return 1 + fi + if gh auth status >/dev/null 2>&1; then + return 0 + fi + echo "gh CLI is not authenticated." + if prompt_yes_no "Run 'gh auth login' now?" "y"; then + gh auth login || true + fi + if gh auth status >/dev/null 2>&1; then + return 0 + fi + echo "gh authentication is still missing. Skipping automatic GitHub setup." + return 1 +} + +slack_manifest_json_compact() { + local manifest_file="$1" + python3 - "$manifest_file" <<'PY' +import json +import sys +path = sys.argv[1] +with open(path, "r", encoding="utf-8") as f: + data = json.load(f) +print(json.dumps(data, separators=(",", ":"))) +PY +} + +slack_api_configure_from_manifest() { + local manifest_file="$1" + local install_url="$2" + local token app_id team_id manifest_json api_resp ok + + echo + echo "=== Slack App API ===" + + token="$(required_from_env_or_prompt "SLACK_API_TOKEN" "Slack API token (required scopes: apps.manifest:write)" "secret")" + app_id="$(prompt_default "Slack App ID (optional; blank = create new app)" "${SLACK_APP_ID:-}")" + team_id="$(prompt_default "Slack Team ID (optional; usually blank)" "${SLACK_TEAM_ID:-}")" + + manifest_json="$(slack_manifest_json_compact "$manifest_file" 2>/dev/null || true)" + if [[ -z "$manifest_json" ]]; then + echo "Could not parse manifest JSON automatically." + echo "Ensure $manifest_file is valid JSON and Python 3 is installed." + return 0 + fi + + if [[ -n "$app_id" ]]; then + if [[ -n "$team_id" ]]; then + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "app_id=$app_id" \ + --data-urlencode "team_id=$team_id" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.update" || true)" + else + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "app_id=$app_id" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.update" || true)" + fi + ok="$(python3 - "$api_resp" <<'PY' +import json,sys +try: + data=json.loads(sys.argv[1]) +except Exception: + print("invalid-json") + sys.exit(0) +print("ok" if data.get("ok") else f"error:{data.get('error','unknown_error')}") +PY +)" + if [[ "$ok" == "ok" ]]; then + echo "Slack app manifest updated for App ID: $app_id" + echo "Open install URL: $install_url" + else + echo "Slack API update failed: ${ok#error:}" + echo "Response (truncated):" + slack_api_echo_truncated_body "$api_resp" + echo "Hint: check token scopes (apps.manifest:write), manifest JSON, and api.slack.com methods apps.manifest.update" + fi + return 0 + fi + + # No App ID supplied: create a new Slack app from manifest. + if [[ -n "$team_id" ]]; then + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "team_id=$team_id" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.create" || true)" + else + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.create" || true)" + fi + ok="$(python3 - "$api_resp" <<'PY' +import json,sys +try: + data=json.loads(sys.argv[1]) +except Exception: + print("invalid-json") + sys.exit(0) +if not data.get("ok"): + print(f"error:{data.get('error','unknown_error')}") + sys.exit(0) +app_id = data.get("app_id") or (data.get("app", {}) or {}).get("id") or "" +print(f"ok:{app_id}") +PY +)" + if [[ "$ok" == ok:* ]]; then + app_id="${ok#ok:}" + echo "Slack app created successfully." + [[ -n "$app_id" ]] && echo "New Slack App ID: $app_id" + echo "Open install URL: $install_url" + else + echo "Slack API create failed: ${ok#error:}" + echo "Response (truncated):" + slack_api_echo_truncated_body "$api_resp" + echo "Hint: check token scopes (apps.manifest:write), manifest JSON, and api.slack.com methods apps.manifest.create" + fi +} + bootstrap_describe_outputs() { local stack_name="$1" local region="$2" @@ -63,12 +302,194 @@ bootstrap_describe_outputs() { --region "$region" 2>/dev/null || true } +app_describe_outputs() { + local stack_name="$1" + local region="$2" + aws cloudformation describe-stacks \ + --stack-name "$stack_name" \ + --query 'Stacks[0].Outputs[*].[OutputKey,OutputValue]' \ + --output text \ + --region "$region" 2>/dev/null || true +} + output_value() { local outputs="$1" local key="$2" echo "$outputs" | awk -F'\t' -v k="$key" '$1==k {print $2}' } +configure_github_actions_aws() { + # $1 Bootstrap stack outputs (tab-separated OutputKey / OutputValue) + # $2 AWS region for this deploy session (fallback if bootstrap has no BootstrapRegion output) + # $3 App CloudFormation stack name + # $4 Stage name (test|prod) — GitHub environment name + # $5 Database schema name + # $6 DB source mode: 1 = stack-managed RDS, 2 = external existing host (matches SAM / prompts) + # $7 Existing DB host (mode 2) + # $8 Existing DB admin user (mode 2) + # $9 Existing DB admin password (mode 2) + # $10 Existing DB network mode: public | private + # $11 Comma-separated subnet IDs for Lambda in private mode + # $12 Lambda ENI security group id in private mode + # $13 Database engine: mysql | postgresql + local bootstrap_outputs="$1" + local aws_region="$2" + local app_stack_name="$3" + local deploy_stage="$4" + local database_schema="$5" + local db_mode="$6" + local existing_db_host="$7" + local existing_db_admin_user="$8" + local existing_db_admin_password="$9" + local existing_db_network_mode="${10:-}" + [[ -z "$existing_db_network_mode" ]] && existing_db_network_mode="public" + local existing_db_subnet_ids_csv="${11:-}" + local existing_db_lambda_sg_id="${12:-}" + local database_engine="${13:-}" + [[ -z "$database_engine" ]] && database_engine="mysql" + local role bucket boot_region + role="$(output_value "$bootstrap_outputs" "GitHubDeployRoleArn")" + bucket="$(output_value "$bootstrap_outputs" "DeploymentBucketName")" + boot_region="$(output_value "$bootstrap_outputs" "BootstrapRegion")" + [[ -z "$boot_region" ]] && boot_region="$aws_region" + local repo env_name + env_name="$deploy_stage" + + echo + echo "=== GitHub Actions (AWS) ===" + echo "Detected bootstrap role: $role" + echo "Detected deploy bucket: $bucket" + echo "Detected bootstrap region: $boot_region" + repo="$(gh repo view --json nameWithOwner -q .nameWithOwner 2>/dev/null || true)" + if [[ -z "$repo" ]]; then + repo="$(prompt_required "GitHub repository (owner/repo) for environment setup")" + else + echo "Detected GitHub repository: $repo" + fi + + if ! ensure_gh_authenticated; then + echo + echo "Set these GitHub Actions Variables manually:" + echo " AWS_ROLE_TO_ASSUME = $role" + echo " AWS_S3_BUCKET = $bucket" + echo " AWS_REGION = $boot_region" + echo "For environment '$env_name' also set AWS_STACK_NAME, STAGE_NAME, DATABASE_SCHEMA, DATABASE_ENGINE," + echo "and (if using existing RDS) EXISTING_DATABASE_* / private VPC vars — see docs/DEPLOYMENT.md." + return 0 + fi + + if prompt_yes_no "Create/update GitHub environments 'test' and 'prod' now?" "y"; then + gh api -X PUT "repos/$repo/environments/test" >/dev/null + gh api -X PUT "repos/$repo/environments/prod" >/dev/null + echo "GitHub environments ensured: test, prod." + fi + + if prompt_yes_no "Set repo variables with gh now (AWS_ROLE_TO_ASSUME, AWS_S3_BUCKET, AWS_REGION)?" "y"; then + [[ -n "$role" ]] && gh variable set AWS_ROLE_TO_ASSUME --body "$role" + [[ -n "$bucket" ]] && gh variable set AWS_S3_BUCKET --body "$bucket" + [[ -n "$boot_region" ]] && gh variable set AWS_REGION --body "$boot_region" + echo "GitHub repository variables updated." + fi + + if prompt_yes_no "Set environment variables for '$env_name' now (AWS_STACK_NAME, STAGE_NAME, DATABASE_SCHEMA, DB host/user vars)?" "y"; then + gh variable set AWS_STACK_NAME --env "$env_name" --body "$app_stack_name" + gh variable set STAGE_NAME --env "$env_name" --body "$deploy_stage" + gh variable set DATABASE_SCHEMA --env "$env_name" --body "$database_schema" + gh variable set DATABASE_ENGINE --env "$env_name" --body "$database_engine" + if [[ "$db_mode" == "2" ]]; then + gh variable set EXISTING_DATABASE_HOST --env "$env_name" --body "$existing_db_host" + gh variable set EXISTING_DATABASE_ADMIN_USER --env "$env_name" --body "$existing_db_admin_user" + gh variable set EXISTING_DATABASE_NETWORK_MODE --env "$env_name" --body "$existing_db_network_mode" + if [[ "$existing_db_network_mode" == "private" ]]; then + gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "$existing_db_subnet_ids_csv" + gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "$existing_db_lambda_sg_id" + else + gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "" + gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "" + fi + else + # Clear existing-host vars for new-RDS mode to avoid stale CI config. + gh variable set EXISTING_DATABASE_HOST --env "$env_name" --body "" + gh variable set EXISTING_DATABASE_ADMIN_USER --env "$env_name" --body "" + gh variable set EXISTING_DATABASE_NETWORK_MODE --env "$env_name" --body "public" + gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "" + gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "" + fi + echo "Environment variables updated for '$env_name'." + fi + + if prompt_yes_no "Set environment secrets for '$env_name' now (Slack secrets + optional Existing DB admin password)?" "n"; then + if [[ -z "${SLACK_SIGNING_SECRET:-}" ]]; then + SLACK_SIGNING_SECRET_SOURCE="prompt" + SLACK_SIGNING_SECRET="$(required_from_env_or_prompt "SLACK_SIGNING_SECRET" "SlackSigningSecret" "secret")" + fi + if [[ -z "${SLACK_CLIENT_SECRET:-}" ]]; then + SLACK_CLIENT_SECRET_SOURCE="prompt" + SLACK_CLIENT_SECRET="$(required_from_env_or_prompt "SLACK_CLIENT_SECRET" "SlackClientSecret" "secret")" + fi + gh secret set SLACK_SIGNING_SECRET --env "$env_name" --body "$SLACK_SIGNING_SECRET" + gh secret set SLACK_CLIENT_SECRET --env "$env_name" --body "$SLACK_CLIENT_SECRET" + if [[ "$db_mode" == "2" && -n "$existing_db_admin_password" ]]; then + gh secret set EXISTING_DATABASE_ADMIN_PASSWORD --env "$env_name" --body "$existing_db_admin_password" + fi + echo "Environment secrets updated for '$env_name'." + fi +} + +generate_stage_slack_manifest() { + local stage="$1" + local api_url="$2" + local install_url="$3" + local template="$REPO_ROOT/slack-manifest.json" + local manifest_out="$REPO_ROOT/slack-manifest_${stage}.json" + local events_url base_url oauth_redirect_url + + if [[ ! -f "$template" ]]; then + echo "Slack manifest template not found at $template" + return 0 + fi + if [[ -z "$api_url" ]]; then + echo "Could not determine API URL from stack outputs. Skipping Slack manifest generation." + return 0 + fi + + events_url="${api_url%/}" + base_url="${events_url%/slack/events}" + oauth_redirect_url="${base_url}/slack/oauth_redirect" + + if ! python3 - "$template" "$manifest_out" "$events_url" "$oauth_redirect_url" <<'PY' +import json +import sys + +template_path, out_path, events_url, redirect_url = sys.argv[1:5] +with open(template_path, "r", encoding="utf-8") as f: + manifest = json.load(f) + +manifest.setdefault("oauth_config", {}).setdefault("redirect_urls", []) +manifest["oauth_config"]["redirect_urls"] = [redirect_url] +manifest.setdefault("settings", {}).setdefault("event_subscriptions", {}) +manifest["settings"]["event_subscriptions"]["request_url"] = events_url +manifest.setdefault("settings", {}).setdefault("interactivity", {}) +manifest["settings"]["interactivity"]["request_url"] = events_url + +with open(out_path, "w", encoding="utf-8") as f: + json.dump(manifest, f, indent=2) + f.write("\n") +PY + then + echo "Failed to generate stage Slack manifest from JSON template." + return 0 + fi + + SLACK_MANIFEST_GENERATED_PATH="$manifest_out" + + echo "=== Slack Manifest (${stage}) ===" + echo "Saved file: $manifest_out" + echo "Install URL: $install_url" + echo + sed 's/^/ /' "$manifest_out" +} + secret_arn_by_name() { local secret_name="$1" local region="$2" @@ -79,6 +500,172 @@ secret_arn_by_name() { --output text 2>/dev/null || true } +secret_value_by_id() { + local secret_id="$1" + local region="$2" + aws secretsmanager get-secret-value \ + --secret-id "$secret_id" \ + --region "$region" \ + --query 'SecretString' \ + --output text 2>/dev/null || true +} + +rds_lookup_admin_defaults() { + local db_host="$1" + local region="$2" + aws rds describe-db-instances \ + --region "$region" \ + --query "DBInstances[?Endpoint.Address=='$db_host']|[0].[MasterUsername,MasterUserSecret.SecretArn]" \ + --output text 2>/dev/null || true +} + +secret_password_by_id() { + local secret_id="$1" + local region="$2" + local raw + raw="$(secret_value_by_id "$secret_id" "$region")" + if [[ -z "$raw" || "$raw" == "None" ]]; then + return 1 + fi + python3 - "$raw" <<'PY' +import json +import sys + +raw = sys.argv[1] +if not raw or raw == "None": + print("") + raise SystemExit(0) + +try: + data = json.loads(raw) +except Exception: + print(raw) + raise SystemExit(0) + +if isinstance(data, dict): + password = data.get("password") + if isinstance(password, str) and password: + print(password) + else: + print("") +else: + print("") +PY +} + +wait_for_secret_deleted() { + local secret_id="$1" + local region="$2" + local max_attempts="${3:-20}" + local sleep_seconds="${4:-3}" + local attempt + for ((attempt = 1; attempt <= max_attempts; attempt++)); do + if ! aws secretsmanager describe-secret --secret-id "$secret_id" --region "$region" >/dev/null 2>&1; then + return 0 + fi + sleep "$sleep_seconds" + done + return 1 +} + +handle_orphan_app_db_secret_on_create() { + local stack_status="$1" + local secret_name="$2" + local region="$3" + local secret_arn reuse_value + + # Only needed for brand-new stack creates where a previous failed stack left the named secret. + if [[ -n "$stack_status" && "$stack_status" != "None" ]]; then + return 0 + fi + + secret_arn="$(secret_arn_by_name "$secret_name" "$region")" + if [[ -z "$secret_arn" || "$secret_arn" == "None" ]]; then + return 0 + fi + + echo "Detected existing app DB secret: $secret_name" + if [[ -z "$APP_DB_PASSWORD_OVERRIDE" ]]; then + if prompt_yes_no "Reuse existing app DB password value when recreating this secret?" "y"; then + reuse_value="$(secret_password_by_id "$secret_arn" "$region" 2>/dev/null || true)" + if [[ -n "$reuse_value" && "$reuse_value" != "None" ]]; then + APP_DB_PASSWORD_OVERRIDE="$reuse_value" + APP_DB_PASSWORD_REUSED_FROM_SECRET="$secret_name" + echo "Will reuse existing app DB password value." + else + echo "Could not read existing app DB secret value; deploy will create a new app DB password." + fi + fi + else + echo "Using provided AppDbPasswordOverride for secret recreation." + [[ -z "$APP_DB_PASSWORD_REUSED_FROM_SECRET" ]] && APP_DB_PASSWORD_REUSED_FROM_SECRET="provided-override" + fi + + if ! prompt_yes_no "Delete detected secret now so create can continue?" "y"; then + echo "Cannot create new stack while this secret name already exists." >&2 + echo "Delete it manually or choose a different stage/stack." >&2 + exit 1 + fi + + if ! aws secretsmanager delete-secret \ + --secret-id "$secret_arn" \ + --region "$region" \ + --force-delete-without-recovery >/dev/null 2>&1; then + echo "Failed to delete secret '$secret_name'. Check IAM permissions and retry." >&2 + exit 1 + fi + + echo "Deleted secret '$secret_name'. Waiting for name to become available..." + if ! wait_for_secret_deleted "$secret_arn" "$region"; then + echo "Secret deletion is still propagating. Wait a minute and rerun deploy." >&2 + exit 1 + fi +} + +write_deploy_receipt() { + local provider="$1" + local stage="$2" + local project_or_stack="$3" + local region="$4" + local service_url="$5" + local install_url="$6" + local manifest_path="$7" + local ts_human ts_file receipt_dir receipt_path + + ts_human="$(date -u +"%Y-%m-%d %H:%M:%S UTC")" + ts_file="$(date -u +"%Y%m%dT%H%M%SZ")" + receipt_dir="$REPO_ROOT/deploy-receipts" + receipt_path="$receipt_dir/deploy-${provider}-${stage}-${ts_file}.md" + + mkdir -p "$receipt_dir" + cat >"$receipt_path" </dev/null || true } +ec2_subnet_vpc_ids() { + local region="$1" + shift + aws ec2 describe-subnets \ + --region "$region" \ + --subnet-ids "$@" \ + --query 'Subnets[*].[SubnetId,VpcId]' \ + --output text 2>/dev/null || true +} + +ec2_vpc_subnet_ids() { + local vpc_id="$1" + local region="$2" + aws ec2 describe-subnets \ + --region "$region" \ + --filters "Name=vpc-id,Values=$vpc_id" \ + --query 'Subnets[].SubnetId' \ + --output text 2>/dev/null || true +} + +ec2_security_group_vpc() { + local sg_id="$1" + local region="$2" + aws ec2 describe-security-groups \ + --region "$region" \ + --group-ids "$sg_id" \ + --query 'SecurityGroups[0].VpcId' \ + --output text 2>/dev/null || true +} + +ec2_sg_allows_from_sg_on_port() { + local db_sg_id="$1" + local source_sg_id="$2" + local port="$3" + local region="$4" + local allowed_groups + allowed_groups="$(aws ec2 describe-security-groups \ + --region "$region" \ + --group-ids "$db_sg_id" \ + --query "SecurityGroups[0].IpPermissions[?FromPort<=\`$port\` && ToPort>=\`$port\`].UserIdGroupPairs[].GroupId" \ + --output text 2>/dev/null || true)" + [[ " $allowed_groups " == *" $source_sg_id "* ]] +} + +ec2_subnet_route_table_id() { + local subnet_id="$1" + local vpc_id="$2" + local region="$3" + local rt_id + rt_id="$(aws ec2 describe-route-tables \ + --region "$region" \ + --filters "Name=association.subnet-id,Values=$subnet_id" \ + --query 'RouteTables[0].RouteTableId' \ + --output text 2>/dev/null || true)" + if [[ -z "$rt_id" || "$rt_id" == "None" ]]; then + rt_id="$(aws ec2 describe-route-tables \ + --region "$region" \ + --filters "Name=vpc-id,Values=$vpc_id" "Name=association.main,Values=true" \ + --query 'RouteTables[0].RouteTableId' \ + --output text 2>/dev/null || true)" + fi + echo "$rt_id" +} + +ec2_subnet_default_route_target() { + local subnet_id="$1" + local vpc_id="$2" + local region="$3" + local rt_id targets target + rt_id="$(ec2_subnet_route_table_id "$subnet_id" "$vpc_id" "$region")" + if [[ -z "$rt_id" || "$rt_id" == "None" ]]; then + echo "none" + return 0 + fi + + # Read all active default-route targets and pick the first concrete one. + targets="$(aws ec2 describe-route-tables \ + --region "$region" \ + --route-table-ids "$rt_id" \ + --query "RouteTables[0].Routes[?DestinationCidrBlock=='0.0.0.0/0' && State=='active'].[NatGatewayId,GatewayId,TransitGatewayId,NetworkInterfaceId,VpcPeeringConnectionId]" \ + --output text 2>/dev/null || true)" + for target in $targets; do + [[ "$target" == "None" ]] && continue + echo "$target" + return 0 + done + + echo "none" +} + +discover_private_lambda_subnets_for_db_vpc() { + local vpc_id="$1" + local region="$2" + local subnet_ids subnet_id route_target out + subnet_ids="$(ec2_vpc_subnet_ids "$vpc_id" "$region")" + if [[ -z "$subnet_ids" || "$subnet_ids" == "None" ]]; then + echo "" + return 0 + fi + + out="" + for subnet_id in $subnet_ids; do + [[ -z "$subnet_id" ]] && continue + route_target="$(ec2_subnet_default_route_target "$subnet_id" "$vpc_id" "$region")" + # Lambda private-subnet candidates: active default route through NAT. + if [[ "$route_target" == nat-* ]]; then + if [[ -z "$out" ]]; then + out="$subnet_id" + else + out="$out,$subnet_id" + fi + fi + done + echo "$out" +} + +validate_private_existing_db_connectivity() { + local region="$1" + local engine="$2" + local subnet_csv="$3" + local lambda_sg="$4" + local db_vpc="$5" + local db_sgs_csv="$6" + local db_host="$7" + local db_port subnet_list subnet_vpcs first_vpc line subnet_id subnet_vpc db_sg_id lambda_sg_vpc db_sg_list route_target rt_id ingress_ok + local -a no_nat_subnets + + db_port="3306" + [[ "$engine" == "postgresql" ]] && db_port="5432" + + IFS=',' read -r -a subnet_list <<< "$subnet_csv" + if [[ "${#subnet_list[@]}" -lt 1 ]]; then + echo "Connectivity preflight failed: no subnet IDs provided for private mode." >&2 + return 1 + fi + + subnet_vpcs="$(ec2_subnet_vpc_ids "$region" "${subnet_list[@]}")" + if [[ -z "$subnet_vpcs" || "$subnet_vpcs" == "None" ]]; then + echo "Connectivity preflight failed: could not read VPC IDs for provided subnets." >&2 + return 1 + fi + + first_vpc="" + while IFS=$'\t' read -r subnet_id subnet_vpc; do + [[ -z "$subnet_id" || -z "$subnet_vpc" ]] && continue + if [[ -z "$first_vpc" ]]; then + first_vpc="$subnet_vpc" + elif [[ "$subnet_vpc" != "$first_vpc" ]]; then + echo "Connectivity preflight failed: subnets span multiple VPCs." >&2 + return 1 + fi + done <<< "$subnet_vpcs" + + if [[ -z "$first_vpc" ]]; then + echo "Connectivity preflight failed: unable to determine subnet VPC." >&2 + return 1 + fi + + if [[ -n "$db_vpc" && "$db_vpc" != "$first_vpc" ]]; then + echo "Connectivity preflight failed: Lambda subnets are in $first_vpc but DB is in $db_vpc." >&2 + return 1 + fi + + lambda_sg_vpc="$(ec2_security_group_vpc "$lambda_sg" "$region")" + if [[ -z "$lambda_sg_vpc" || "$lambda_sg_vpc" == "None" ]]; then + echo "Connectivity preflight failed: Lambda security group '$lambda_sg' was not found." >&2 + return 1 + fi + if [[ "$lambda_sg_vpc" != "$first_vpc" ]]; then + echo "Connectivity preflight failed: Lambda security group is in $lambda_sg_vpc, expected $first_vpc." >&2 + return 1 + fi + + if [[ -n "$db_sgs_csv" ]]; then + ingress_ok="false" + IFS=',' read -r -a db_sg_list <<< "$db_sgs_csv" + for db_sg_id in "${db_sg_list[@]}"; do + db_sg_id="${db_sg_id// /}" + [[ -z "$db_sg_id" ]] && continue + if ec2_sg_allows_from_sg_on_port "$db_sg_id" "$lambda_sg" "$db_port" "$region"; then + echo "Connectivity preflight passed: DB SG $db_sg_id allows Lambda SG $lambda_sg on port $db_port." + ingress_ok="true" + break + fi + done + if [[ "$ingress_ok" != "true" ]]; then + echo "Connectivity preflight failed: none of the DB security groups allow Lambda SG $lambda_sg on port $db_port." >&2 + echo "Fix: add an inbound SG rule on the DB security group from '$lambda_sg' to TCP $db_port." >&2 + return 1 + fi + fi + + if [[ -z "$db_sgs_csv" ]]; then + echo "Connectivity preflight warning: DB SGs could not be auto-detected for host $db_host." >&2 + echo "Cannot verify ingress rule automatically; continuing with subnet/VPC checks only." >&2 + fi + + no_nat_subnets=() + for subnet_id in "${subnet_list[@]}"; do + subnet_id="${subnet_id// /}" + [[ -z "$subnet_id" ]] && continue + route_target="$(ec2_subnet_default_route_target "$subnet_id" "$first_vpc" "$region")" + if [[ "$route_target" != nat-* ]]; then + no_nat_subnets+=("$subnet_id:$route_target") + fi + done + + if [[ "${#no_nat_subnets[@]}" -gt 0 ]]; then + echo "Connectivity preflight failed: one or more selected private subnets do not have an active NAT default route." >&2 + for entry in "${no_nat_subnets[@]}"; do + subnet_id="${entry%%:*}" + route_target="${entry#*:}" + rt_id="$(ec2_subnet_route_table_id "$subnet_id" "$first_vpc" "$region")" + echo " - Subnet $subnet_id (route table $rt_id) default route target: $route_target" >&2 + done + echo "Fix before deploy:" >&2 + echo " 1) Use private subnets whose route table has 0.0.0.0/0 -> nat-xxxx" >&2 + echo " 2) Or update those route tables to point 0.0.0.0/0 to a NAT gateway" >&2 + echo " 3) Ensure DB SG allows Lambda SG '$lambda_sg' on TCP $db_port" >&2 + return 1 + fi + + echo "Connectivity preflight passed: private subnets have NAT egress." + return 0 +} + stack_status() { local stack_name="$1" local region="$2" @@ -98,6 +911,22 @@ stack_status() { --output text 2>/dev/null || true } +stack_parameters() { + local stack_name="$1" + local region="$2" + aws cloudformation describe-stacks \ + --stack-name "$stack_name" \ + --region "$region" \ + --query 'Stacks[0].Parameters[*].[ParameterKey,ParameterValue]' \ + --output text 2>/dev/null || true +} + +stack_param_value() { + local params="$1" + local key="$2" + echo "$params" | awk -F'\t' -v k="$key" '$1==k {print $2}' +} + print_recent_stack_failures() { local stack_name="$1" local region="$2" @@ -142,8 +971,13 @@ handle_unhealthy_stack_state() { esac } -require_cmd aws -require_cmd sam +prereqs_require_cmd aws prereqs_hint_aws_cli +prereqs_require_cmd sam prereqs_hint_sam_cli +prereqs_require_cmd docker prereqs_hint_docker +prereqs_require_cmd python3 prereqs_hint_python3 +prereqs_require_cmd curl prereqs_hint_curl + +prereqs_print_cli_status_matrix "AWS" aws sam docker python3 curl if [[ ! -f "$APP_TEMPLATE" ]]; then echo "Error: app template not found at $APP_TEMPLATE" >&2 @@ -154,11 +988,12 @@ if [[ ! -f "$BOOTSTRAP_TEMPLATE" ]]; then exit 1 fi -echo "=== SyncBot AWS Deploy Helper ===" +echo "=== SyncBot AWS Deploy ===" echo DEFAULT_REGION="${AWS_REGION:-us-east-2}" REGION="$(prompt_default "AWS region" "$DEFAULT_REGION")" +ensure_aws_authenticated BOOTSTRAP_STACK="$(prompt_default "Bootstrap stack name" "syncbot-bootstrap")" BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" @@ -170,6 +1005,7 @@ if [[ -z "$BOOTSTRAP_OUTPUTS" ]]; then CREATE_OIDC="$(prompt_default "Create OIDC provider (true/false)" "true")" BUCKET_PREFIX="$(prompt_default "Deployment bucket prefix" "syncbot-deploy")" echo + echo "=== Bootstrap Stack ===" echo "Deploying bootstrap stack..." aws cloudformation deploy \ --template-file "$BOOTSTRAP_TEMPLATE" \ @@ -208,42 +1044,223 @@ fi DEFAULT_STACK="$SUGGESTED_TEST_STACK" [[ "$STAGE" == "prod" ]] && DEFAULT_STACK="$SUGGESTED_PROD_STACK" STACK_NAME="$(prompt_default "App stack name" "$DEFAULT_STACK")" +EXISTING_STACK_STATUS="$(stack_status "$STACK_NAME" "$REGION")" +IS_STACK_UPDATE="false" +EXISTING_STACK_PARAMS="" +PREV_EXISTING_DATABASE_HOST="" +PREV_EXISTING_DATABASE_ADMIN_USER="" +PREV_EXISTING_DATABASE_NETWORK_MODE="" +PREV_EXISTING_DATABASE_SUBNET_IDS_CSV="" +PREV_EXISTING_DATABASE_LAMBDA_SG_ID="" +PREV_DATABASE_ENGINE="" +PREV_DATABASE_SCHEMA="" +PREV_LOG_LEVEL="" +PREV_DATABASE_HOST_IN_USE="" +PREV_STACK_USES_EXISTING_DB="false" +if [[ -n "$EXISTING_STACK_STATUS" && "$EXISTING_STACK_STATUS" != "None" ]]; then + echo "Detected existing CloudFormation stack: $STACK_NAME ($EXISTING_STACK_STATUS)" + if ! prompt_yes_no "Continue and update this existing stack?" "y"; then + echo "Aborted." + exit 0 + fi + IS_STACK_UPDATE="true" + EXISTING_STACK_PARAMS="$(stack_parameters "$STACK_NAME" "$REGION")" + PREV_EXISTING_DATABASE_HOST="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseHost")" + PREV_EXISTING_DATABASE_ADMIN_USER="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseAdminUser")" + PREV_EXISTING_DATABASE_NETWORK_MODE="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseNetworkMode")" + PREV_EXISTING_DATABASE_SUBNET_IDS_CSV="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseSubnetIdsCsv")" + PREV_EXISTING_DATABASE_LAMBDA_SG_ID="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseLambdaSecurityGroupId")" + PREV_DATABASE_ENGINE="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseEngine")" + PREV_DATABASE_SCHEMA="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseSchema")" + PREV_LOG_LEVEL="$(stack_param_value "$EXISTING_STACK_PARAMS" "LogLevel")" + EXISTING_STACK_OUTPUTS="$(app_describe_outputs "$STACK_NAME" "$REGION")" + PREV_DATABASE_HOST_IN_USE="$(output_value "$EXISTING_STACK_OUTPUTS" "DatabaseHostInUse")" + if [[ -n "$PREV_EXISTING_DATABASE_HOST" ]]; then + PREV_STACK_USES_EXISTING_DB="true" + fi + if [[ -z "$PREV_EXISTING_DATABASE_HOST" && -n "$PREV_DATABASE_HOST_IN_USE" ]]; then + PREV_EXISTING_DATABASE_HOST="$PREV_DATABASE_HOST_IN_USE" + fi + + if prompt_yes_no "Skip infrastructure re-deploy and go directly to GitHub Actions setup?" "n"; then + # Same semantics as DB_MODE (1 = stack RDS, 2 = existing host) for GitHub env vars only. + GH_DB_MODE="1" + if [[ "$PREV_STACK_USES_EXISTING_DB" == "true" ]]; then + GH_DB_MODE="2" + fi + GH_DATABASE_SCHEMA="$PREV_DATABASE_SCHEMA" + [[ -z "$GH_DATABASE_SCHEMA" ]] && GH_DATABASE_SCHEMA="syncbot_${STAGE}" + + # Initialize optional globals used only when user opts into setting secrets in GitHub setup. + SLACK_SIGNING_SECRET="${SLACK_SIGNING_SECRET:-}" + SLACK_CLIENT_SECRET="${SLACK_CLIENT_SECRET:-}" + + echo + echo "Skipping deploy. Opening GitHub Actions setup for existing stack..." + [[ -z "$PREV_DATABASE_ENGINE" ]] && PREV_DATABASE_ENGINE="mysql" + configure_github_actions_aws \ + "$BOOTSTRAP_OUTPUTS" \ + "$REGION" \ + "$STACK_NAME" \ + "$STAGE" \ + "$GH_DATABASE_SCHEMA" \ + "$GH_DB_MODE" \ + "$PREV_EXISTING_DATABASE_HOST" \ + "$PREV_EXISTING_DATABASE_ADMIN_USER" \ + "${EXISTING_DATABASE_ADMIN_PASSWORD:-}" \ + "$PREV_EXISTING_DATABASE_NETWORK_MODE" \ + "$PREV_EXISTING_DATABASE_SUBNET_IDS_CSV" \ + "$PREV_EXISTING_DATABASE_LAMBDA_SG_ID" \ + "$PREV_DATABASE_ENGINE" + echo "Done. No infrastructure changes were deployed." + exit 0 + fi +fi echo -echo "Database mode:" -echo " 1) Create new RDS in stack (PostgreSQL by default)" -echo " 2) Use existing RDS / Aurora DSQL host (deploy creates DB and app user)" -DB_MODE="$(prompt_default "Choose 1 or 2" "1")" +echo "=== Database Source ===" +# DB_MODE / GH_DB_MODE: 1 = stack-managed RDS in this template; 2 = external existing RDS host. +DB_MODE_DEFAULT="1" +if [[ "$IS_STACK_UPDATE" == "true" ]]; then + if [[ "$PREV_STACK_USES_EXISTING_DB" == "true" ]]; then + EXISTING_DB_LABEL="$PREV_EXISTING_DATABASE_HOST" + [[ -z "$EXISTING_DB_LABEL" ]] && EXISTING_DB_LABEL="not set" + DB_MODE_DEFAULT="2" + echo " 1) Use stack-managed RDS" + echo " 2) Use external existing RDS host: $EXISTING_DB_LABEL (default)" + else + DB_MODE_DEFAULT="1" + echo " 1) Use stack-managed RDS (default)" + echo " 2) Use external existing RDS host" + fi +else + echo " 1) Use stack-managed RDS (default)" + echo " 2) Use external existing RDS host" +fi +DB_MODE="$(prompt_default "Choose database source (1 or 2)" "$DB_MODE_DEFAULT")" if [[ "$DB_MODE" != "1" && "$DB_MODE" != "2" ]]; then echo "Error: invalid database mode." >&2 exit 1 fi +if [[ "$IS_STACK_UPDATE" == "true" && "$PREV_STACK_USES_EXISTING_DB" != "true" && "$DB_MODE" == "2" ]]; then + echo + echo "Warning: switching from stack-managed RDS to existing external DB will remove stack-managed RDS/VPC resources." + if ! prompt_yes_no "Continue with this destructive migration?" "n"; then + echo "Keeping stack-managed RDS mode for this deploy." + DB_MODE="1" + fi +fi -DATABASE_ENGINE="postgresql" -if prompt_yes_no "Advanced: use legacy MySQL RDS instead of PostgreSQL (Aurora DSQL / RDS PG)?" "n"; then - DATABASE_ENGINE="mysql" +DATABASE_ENGINE="mysql" +DB_ENGINE_DEFAULT="1" +if [[ "$IS_STACK_UPDATE" == "true" && "$PREV_DATABASE_ENGINE" == "postgresql" ]]; then + DATABASE_ENGINE="postgresql" + DB_ENGINE_DEFAULT="2" +fi +echo +echo "=== Database Engine ===" +if [[ "$DB_ENGINE_DEFAULT" == "2" ]]; then + echo " 1) MySQL" + echo " 2) PostgreSQL (default; detected from current stack)" +else + echo " 1) MySQL (default)" + echo " 2) PostgreSQL" +fi +DB_ENGINE_MODE="$(prompt_default "Choose 1 or 2" "$DB_ENGINE_DEFAULT")" +if [[ "$DB_ENGINE_MODE" == "2" ]]; then + DATABASE_ENGINE="postgresql" +elif [[ "$DB_ENGINE_MODE" != "1" ]]; then + echo "Error: invalid database engine mode." >&2 + exit 1 fi echo -SLACK_SIGNING_SECRET="$(prompt_secret "SlackSigningSecret")" -SLACK_CLIENT_SECRET="$(prompt_secret "SlackClientSecret")" -SLACK_CLIENT_ID="$(prompt_default "SlackClientID (optional; blank uses template stage default)" "")" +echo "=== Slack App Credentials ===" +SLACK_SIGNING_SECRET_SOURCE="prompt" +[[ -n "${SLACK_SIGNING_SECRET:-}" ]] && SLACK_SIGNING_SECRET_SOURCE="env:SLACK_SIGNING_SECRET" +SLACK_CLIENT_SECRET_SOURCE="prompt" +[[ -n "${SLACK_CLIENT_SECRET:-}" ]] && SLACK_CLIENT_SECRET_SOURCE="env:SLACK_CLIENT_SECRET" +SLACK_SIGNING_SECRET="$(required_from_env_or_prompt "SLACK_SIGNING_SECRET" "SlackSigningSecret" "secret")" +SLACK_CLIENT_SECRET="$(required_from_env_or_prompt "SLACK_CLIENT_SECRET" "SlackClientSecret" "secret")" +SLACK_CLIENT_ID="$(required_from_env_or_prompt "SLACK_CLIENT_ID" "SlackClientID")" +ENV_EXISTING_DATABASE_HOST="${EXISTING_DATABASE_HOST:-}" +ENV_EXISTING_DATABASE_ADMIN_USER="${EXISTING_DATABASE_ADMIN_USER:-}" +ENV_EXISTING_DATABASE_ADMIN_PASSWORD="${EXISTING_DATABASE_ADMIN_PASSWORD:-}" +EXISTING_DB_ADMIN_PASSWORD_SOURCE="prompt" EXISTING_DATABASE_HOST="" EXISTING_DATABASE_ADMIN_USER="" EXISTING_DATABASE_ADMIN_PASSWORD="" EXISTING_DATABASE_NETWORK_MODE="public" EXISTING_DATABASE_SUBNET_IDS_CSV="" EXISTING_DATABASE_LAMBDA_SG_ID="" -DATABASE_USER="" -DATABASE_PASSWORD="" DATABASE_SCHEMA="" +DATABASE_SCHEMA_DEFAULT="syncbot_${STAGE}" +if [[ "$IS_STACK_UPDATE" == "true" && -n "$PREV_DATABASE_SCHEMA" ]]; then + DATABASE_SCHEMA_DEFAULT="$PREV_DATABASE_SCHEMA" +fi if [[ "$DB_MODE" == "2" ]]; then - EXISTING_DATABASE_HOST="$(prompt_default "ExistingDatabaseHost (RDS endpoint hostname)" "REPLACE_ME_RDS_HOST")" - EXISTING_DATABASE_ADMIN_USER="$(prompt_default "ExistingDatabaseAdminUser" "admin")" - EXISTING_DATABASE_ADMIN_PASSWORD="$(prompt_secret "ExistingDatabaseAdminPassword")" - DATABASE_SCHEMA="$(prompt_default "DatabaseSchema" "syncbot_${STAGE}")" + echo + echo "=== Existing Database Host ===" + EXISTING_DATABASE_HOST_DEFAULT="REPLACE_ME_RDS_HOST" + [[ -n "$PREV_EXISTING_DATABASE_HOST" ]] && EXISTING_DATABASE_HOST_DEFAULT="$PREV_EXISTING_DATABASE_HOST" + EXISTING_DATABASE_ADMIN_USER_DEFAULT="admin" + [[ -n "$PREV_EXISTING_DATABASE_ADMIN_USER" ]] && EXISTING_DATABASE_ADMIN_USER_DEFAULT="$PREV_EXISTING_DATABASE_ADMIN_USER" + + if [[ -n "$ENV_EXISTING_DATABASE_HOST" ]]; then + echo "Using ExistingDatabaseHost from environment variable EXISTING_DATABASE_HOST." + EXISTING_DATABASE_HOST="$ENV_EXISTING_DATABASE_HOST" + else + EXISTING_DATABASE_HOST="$(prompt_default "ExistingDatabaseHost (RDS endpoint hostname)" "$EXISTING_DATABASE_HOST_DEFAULT")" + fi + + DETECTED_ADMIN_USER="" + DETECTED_ADMIN_SECRET_ARN="" + if [[ "$IS_STACK_UPDATE" == "true" ]]; then + RDS_ADMIN_LOOKUP="$(rds_lookup_admin_defaults "$EXISTING_DATABASE_HOST" "$REGION")" + if [[ -n "$RDS_ADMIN_LOOKUP" && "$RDS_ADMIN_LOOKUP" != "None" ]]; then + IFS=$'\t' read -r DETECTED_ADMIN_USER DETECTED_ADMIN_SECRET_ARN <<< "$RDS_ADMIN_LOOKUP" + [[ "$DETECTED_ADMIN_USER" == "None" ]] && DETECTED_ADMIN_USER="" + [[ "$DETECTED_ADMIN_SECRET_ARN" == "None" ]] && DETECTED_ADMIN_SECRET_ARN="" + fi + fi + + if [[ -z "$EXISTING_DATABASE_ADMIN_USER_DEFAULT" || "$EXISTING_DATABASE_ADMIN_USER_DEFAULT" == "admin" ]]; then + [[ -n "$DETECTED_ADMIN_USER" ]] && EXISTING_DATABASE_ADMIN_USER_DEFAULT="$DETECTED_ADMIN_USER" + fi + if [[ -n "$ENV_EXISTING_DATABASE_ADMIN_USER" ]]; then + echo "Using ExistingDatabaseAdminUser from environment variable EXISTING_DATABASE_ADMIN_USER." + EXISTING_DATABASE_ADMIN_USER="$ENV_EXISTING_DATABASE_ADMIN_USER" + else + EXISTING_DATABASE_ADMIN_USER="$(prompt_default "ExistingDatabaseAdminUser" "$EXISTING_DATABASE_ADMIN_USER_DEFAULT")" + fi + + if [[ -n "$ENV_EXISTING_DATABASE_ADMIN_PASSWORD" ]]; then + echo "Using ExistingDatabaseAdminPassword from environment variable EXISTING_DATABASE_ADMIN_PASSWORD." + EXISTING_DATABASE_ADMIN_PASSWORD="$ENV_EXISTING_DATABASE_ADMIN_PASSWORD" + EXISTING_DB_ADMIN_PASSWORD_SOURCE="env:EXISTING_DATABASE_ADMIN_PASSWORD" + else + if [[ "$IS_STACK_UPDATE" == "true" && -n "$DETECTED_ADMIN_SECRET_ARN" ]]; then + EXISTING_DATABASE_ADMIN_PASSWORD="$(secret_password_by_id "$DETECTED_ADMIN_SECRET_ARN" "$REGION" 2>/dev/null || true)" + if [[ -n "$EXISTING_DATABASE_ADMIN_PASSWORD" ]]; then + echo "Detected existing DB admin password from AWS Secrets Manager for re-deploy." + EXISTING_DB_ADMIN_PASSWORD_SOURCE="aws-secret:$DETECTED_ADMIN_SECRET_ARN" + fi + fi + if [[ -z "$EXISTING_DATABASE_ADMIN_PASSWORD" ]]; then + echo "Existing DB admin credentials couldn't be auto-detected. Please enter them manually." + EXISTING_DATABASE_ADMIN_PASSWORD="$(prompt_secret_required "ExistingDatabaseAdminPassword")" + EXISTING_DB_ADMIN_PASSWORD_SOURCE="prompt" + fi + fi + + DATABASE_SCHEMA="$(prompt_default "DatabaseSchema" "$DATABASE_SCHEMA_DEFAULT")" + + if [[ -z "$EXISTING_DATABASE_HOST" || "$EXISTING_DATABASE_HOST" == REPLACE_ME* ]]; then + echo "Error: valid ExistingDatabaseHost is required for existing DB mode." >&2 + exit 1 + fi RDS_LOOKUP="$(rds_lookup_network_defaults "$EXISTING_DATABASE_HOST" "$REGION")" DETECTED_PUBLIC="" @@ -270,6 +1287,9 @@ if [[ "$DB_MODE" == "2" ]]; then fi DEFAULT_EXISTING_DB_NETWORK_MODE="public" + if [[ -n "$PREV_EXISTING_DATABASE_NETWORK_MODE" ]]; then + DEFAULT_EXISTING_DB_NETWORK_MODE="$PREV_EXISTING_DATABASE_NETWORK_MODE" + fi if [[ "$DETECTED_PUBLIC" == "False" ]]; then DEFAULT_EXISTING_DB_NETWORK_MODE="private" fi @@ -280,14 +1300,25 @@ if [[ "$DB_MODE" == "2" ]]; then fi if [[ "$EXISTING_DATABASE_NETWORK_MODE" == "private" ]]; then - DEFAULT_SUBNETS="$DETECTED_SUBNETS" + AUTO_PRIVATE_SUBNETS="" + if [[ -n "$DETECTED_VPC" ]]; then + AUTO_PRIVATE_SUBNETS="$(discover_private_lambda_subnets_for_db_vpc "$DETECTED_VPC" "$REGION")" + if [[ -n "$AUTO_PRIVATE_SUBNETS" ]]; then + echo "Detected private Lambda subnet candidates (NAT-routed): $AUTO_PRIVATE_SUBNETS" + fi + fi + + DEFAULT_SUBNETS="$AUTO_PRIVATE_SUBNETS" + [[ -z "$DEFAULT_SUBNETS" && -n "$PREV_EXISTING_DATABASE_SUBNET_IDS_CSV" ]] && DEFAULT_SUBNETS="$PREV_EXISTING_DATABASE_SUBNET_IDS_CSV" + [[ -z "$DEFAULT_SUBNETS" ]] && DEFAULT_SUBNETS="$DETECTED_SUBNETS" [[ -z "$DEFAULT_SUBNETS" ]] && DEFAULT_SUBNETS="REPLACE_ME_SUBNET_1,REPLACE_ME_SUBNET_2" DEFAULT_SG="${DETECTED_SGS%%,*}" + [[ -n "$PREV_EXISTING_DATABASE_LAMBDA_SG_ID" ]] && DEFAULT_SG="$PREV_EXISTING_DATABASE_LAMBDA_SG_ID" [[ -z "$DEFAULT_SG" ]] && DEFAULT_SG="REPLACE_ME_LAMBDA_SG_ID" echo echo "Private DB mode selected: Lambdas will run in VPC." - echo "Note: app Lambda needs internet egress (usually NAT) to call Slack APIs." + echo "Note: app Lambda needs Internet egress (usually NAT) to call Slack APIs." EXISTING_DATABASE_SUBNET_IDS_CSV="$(prompt_default "ExistingDatabaseSubnetIdsCsv (comma-separated)" "$DEFAULT_SUBNETS")" EXISTING_DATABASE_LAMBDA_SG_ID="$(prompt_default "ExistingDatabaseLambdaSecurityGroupId" "$DEFAULT_SG")" @@ -299,31 +1330,59 @@ if [[ "$DB_MODE" == "2" ]]; then echo "Error: valid ExistingDatabaseLambdaSecurityGroupId is required for private mode." >&2 exit 1 fi + + echo + echo "Running private-connectivity preflight checks..." + if ! validate_private_existing_db_connectivity \ + "$REGION" \ + "$DATABASE_ENGINE" \ + "$EXISTING_DATABASE_SUBNET_IDS_CSV" \ + "$EXISTING_DATABASE_LAMBDA_SG_ID" \ + "$DETECTED_VPC" \ + "$DETECTED_SGS" \ + "$EXISTING_DATABASE_HOST"; then + echo "Fix network settings and rerun deploy." >&2 + exit 1 + fi fi else - DATABASE_USER="$(prompt_default "DatabaseUser (new RDS master username)" "syncbot_admin")" - DATABASE_PASSWORD="$(prompt_secret "DatabasePassword (new RDS master password)")" - DATABASE_SCHEMA="$(prompt_default "DatabaseSchema" "syncbot_${STAGE}")" + echo + echo "=== New RDS Database ===" + echo "New RDS mode uses:" + echo " - admin user: syncbot_admin_${STAGE} (password auto-generated)" + echo " - app user: syncbot_user_${STAGE} (password auto-generated)" + DATABASE_SCHEMA="$(prompt_default "DatabaseSchema" "$DATABASE_SCHEMA_DEFAULT")" fi -TOKEN_OVERRIDE="$(prompt_default "TokenEncryptionKeyOverride (optional DR key; leave blank for normal deploy)" "")" +TOKEN_OVERRIDE="$(prompt_default "TokenEncryptionKeyOverride (optional for disaster recovery; leave blank for normal deploy)" "")" EXISTING_TOKEN_SECRET_ARN="" TOKEN_SECRET_NAME="syncbot-${STAGE}-token-encryption-key" +APP_DB_SECRET_NAME="syncbot-${STAGE}-app-db-password" if [[ -z "$TOKEN_OVERRIDE" ]]; then DETECTED_TOKEN_SECRET_ARN="$(secret_arn_by_name "$TOKEN_SECRET_NAME" "$REGION")" if [[ -n "$DETECTED_TOKEN_SECRET_ARN" && "$DETECTED_TOKEN_SECRET_ARN" != "None" ]]; then echo "Detected existing token secret: $TOKEN_SECRET_NAME" - if prompt_yes_no "Reuse this existing token secret ARN to avoid name-collision failures?" "y"; then + if prompt_yes_no "Reuse detected secret ARN for this deploy?" "y"; then EXISTING_TOKEN_SECRET_ARN="$DETECTED_TOKEN_SECRET_ARN" fi fi fi +LOG_LEVEL_DEFAULT="INFO" +if [[ "$IS_STACK_UPDATE" == "true" && -n "$PREV_LOG_LEVEL" ]]; then + LOG_LEVEL_DEFAULT="$PREV_LOG_LEVEL" +fi + +echo +echo "=== Log Level ===" +LOG_LEVEL="$(prompt_log_level "$LOG_LEVEL_DEFAULT")" + echo echo "=== Deploy Summary ===" echo "Region: $REGION" echo "Stack: $STACK_NAME" echo "Stage: $STAGE" +echo "Log level: $LOG_LEVEL" echo "Deploy bucket: $S3_BUCKET" if [[ "$DB_MODE" == "2" ]]; then echo "DB mode: existing host" @@ -338,7 +1397,8 @@ if [[ "$DB_MODE" == "2" ]]; then else echo "DB mode: create new RDS" echo "DB engine: $DATABASE_ENGINE" - echo "DB user: $DATABASE_USER" + echo "DB admin user: syncbot_admin_${STAGE} (auto password)" + echo "DB app user: syncbot_user_${STAGE} (auto password)" echo "DB schema: $DATABASE_SCHEMA" fi if [[ -n "$TOKEN_OVERRIDE" ]]; then @@ -349,6 +1409,9 @@ else echo "Token secret: Reusing existing secret ARN" fi fi +if [[ -n "$APP_DB_PASSWORD_OVERRIDE" ]]; then + echo "App DB secret: Reusing prior app DB password value" +fi echo if ! prompt_yes_no "Proceed with build + deploy?" "y"; then @@ -356,9 +1419,12 @@ if ! prompt_yes_no "Proceed with build + deploy?" "y"; then exit 0 fi +handle_orphan_app_db_secret_on_create "$EXISTING_STACK_STATUS" "$APP_DB_SECRET_NAME" "$REGION" + handle_unhealthy_stack_state "$STACK_NAME" "$REGION" echo +echo "=== SAM Build ===" echo "Building app..." sam build -t "$APP_TEMPLATE" --use-container @@ -368,6 +1434,7 @@ PARAMS=( "SlackSigningSecret=$SLACK_SIGNING_SECRET" "SlackClientSecret=$SLACK_CLIENT_SECRET" "DatabaseSchema=$DATABASE_SCHEMA" + "LogLevel=$LOG_LEVEL" ) if [[ -n "$SLACK_CLIENT_ID" ]]; then @@ -388,19 +1455,28 @@ if [[ "$DB_MODE" == "2" ]]; then ) fi else + # Explicitly clear existing-host parameters on updates to avoid stale previous values. PARAMS+=( - "DatabaseUser=$DATABASE_USER" - "DatabasePassword=$DATABASE_PASSWORD" + "ExistingDatabaseHost=" + "ExistingDatabaseAdminUser=" + "ExistingDatabaseAdminPassword=" + "ExistingDatabaseNetworkMode=public" + "ExistingDatabaseSubnetIdsCsv=" + "ExistingDatabaseLambdaSecurityGroupId=" ) fi if [[ -n "$TOKEN_OVERRIDE" ]]; then PARAMS+=("TokenEncryptionKeyOverride=$TOKEN_OVERRIDE") fi +if [[ -n "$APP_DB_PASSWORD_OVERRIDE" ]]; then + PARAMS+=("AppDbPasswordOverride=$APP_DB_PASSWORD_OVERRIDE") +fi if [[ -n "$EXISTING_TOKEN_SECRET_ARN" ]]; then PARAMS+=("ExistingTokenEncryptionKeySecretArn=$EXISTING_TOKEN_SECRET_ARN") fi +echo "=== SAM Deploy ===" echo "Deploying stack..." sam deploy \ -t .aws-sam/build/template.yaml \ @@ -411,9 +1487,81 @@ sam deploy \ --no-fail-on-empty-changeset \ --parameter-overrides "${PARAMS[@]}" +APP_OUTPUTS="$(app_describe_outputs "$STACK_NAME" "$REGION")" +SYNCBOT_API_URL="$(output_value "$APP_OUTPUTS" "SyncBotApiUrl")" +SYNCBOT_INSTALL_URL="$(output_value "$APP_OUTPUTS" "SyncBotInstallUrl")" + echo echo "Deploy complete." -echo "IMPORTANT: back up TOKEN_ENCRYPTION_KEY from Secrets Manager." -echo "Expected secret name: syncbot-${STAGE}-token-encryption-key" -echo "Example read command:" -echo " aws secretsmanager get-secret-value --secret-id syncbot-${STAGE}-token-encryption-key --query SecretString --output text --region $REGION" +generate_stage_slack_manifest "$STAGE" "$SYNCBOT_API_URL" "$SYNCBOT_INSTALL_URL" +if [[ -n "$SLACK_MANIFEST_GENERATED_PATH" ]]; then + if prompt_yes_no "Configure Slack app via Slack API now (create or update from generated manifest)?" "n"; then + slack_api_configure_from_manifest "$SLACK_MANIFEST_GENERATED_PATH" "$SYNCBOT_INSTALL_URL" + fi +fi +echo +echo "=== Backup Secrets Summary ===" +if [[ -n "$TOKEN_OVERRIDE" ]]; then + RECEIPT_TOKEN_SECRET_ID="TokenEncryptionKeyOverride" + RECEIPT_TOKEN_SECRET_VALUE="$TOKEN_OVERRIDE" + echo "- TOKEN_ENCRYPTION_KEY: supplied via TokenEncryptionKeyOverride during deploy." + echo " Ensure this key is backed up where you store DR secrets." +else + # Display path: name or ARN for console; RECEIPT_* copies the same id/value into write_deploy_receipt. + TOKEN_SECRET_ID="$TOKEN_SECRET_NAME" + if [[ -n "$EXISTING_TOKEN_SECRET_ARN" ]]; then + TOKEN_SECRET_ID="$EXISTING_TOKEN_SECRET_ARN" + fi + TOKEN_SECRET_VALUE="$(secret_value_by_id "$TOKEN_SECRET_ID" "$REGION")" + RECEIPT_TOKEN_SECRET_ID="$TOKEN_SECRET_ID" + if [[ -n "$TOKEN_SECRET_VALUE" && "$TOKEN_SECRET_VALUE" != "None" ]]; then + RECEIPT_TOKEN_SECRET_VALUE="$TOKEN_SECRET_VALUE" + fi + echo "- TOKEN_ENCRYPTION_KEY secret: $TOKEN_SECRET_ID" + if [[ -n "$TOKEN_SECRET_VALUE" && "$TOKEN_SECRET_VALUE" != "None" ]]; then + echo " TOKEN_ENCRYPTION_KEY value:" + echo " $TOKEN_SECRET_VALUE" + else + echo " (Could not read secret value automatically. Check IAM permissions.)" + fi +fi + +APP_DB_SECRET_VALUE="$(secret_value_by_id "$APP_DB_SECRET_NAME" "$REGION")" +# RECEIPT_APP_DB_* mirror console output for the markdown receipt from write_deploy_receipt. +RECEIPT_APP_DB_SECRET_NAME="$APP_DB_SECRET_NAME" +if [[ -n "$APP_DB_SECRET_VALUE" && "$APP_DB_SECRET_VALUE" != "None" ]]; then + RECEIPT_APP_DB_SECRET_VALUE="$APP_DB_SECRET_VALUE" +fi +echo "- App DB password secret: $APP_DB_SECRET_NAME" +if [[ -n "$APP_DB_SECRET_VALUE" && "$APP_DB_SECRET_VALUE" != "None" ]]; then + echo " App DB password value:" + echo " $APP_DB_SECRET_VALUE" +else + echo " (Could not read secret value automatically. Check IAM permissions.)" +fi + +if prompt_yes_no "Set up GitHub Actions configuration now?" "n"; then + configure_github_actions_aws \ + "$BOOTSTRAP_OUTPUTS" \ + "$REGION" \ + "$STACK_NAME" \ + "$STAGE" \ + "$DATABASE_SCHEMA" \ + "$DB_MODE" \ + "$EXISTING_DATABASE_HOST" \ + "$EXISTING_DATABASE_ADMIN_USER" \ + "$EXISTING_DATABASE_ADMIN_PASSWORD" \ + "$EXISTING_DATABASE_NETWORK_MODE" \ + "$EXISTING_DATABASE_SUBNET_IDS_CSV" \ + "$EXISTING_DATABASE_LAMBDA_SG_ID" \ + "$DATABASE_ENGINE" +fi + +write_deploy_receipt \ + "aws" \ + "$STAGE" \ + "$STACK_NAME" \ + "$REGION" \ + "$SYNCBOT_API_URL" \ + "$SYNCBOT_INSTALL_URL" \ + "$SLACK_MANIFEST_GENERATED_PATH" diff --git a/infra/aws/scripts/print-bootstrap-outputs.sh b/infra/aws/scripts/print-bootstrap-outputs.sh index 78196b7..8b378a4 100755 --- a/infra/aws/scripts/print-bootstrap-outputs.sh +++ b/infra/aws/scripts/print-bootstrap-outputs.sh @@ -2,12 +2,15 @@ # Print SyncBot AWS bootstrap stack outputs for GitHub variables or local config. # Run from repo root: infra/aws/scripts/print-bootstrap-outputs.sh # Optional env: BOOTSTRAP_STACK_NAME (default syncbot-bootstrap), AWS_REGION (default us-east-2). +# +# Flow: describe-stack (key/value) -> raw lines -> suggested GitHub variable names. set -euo pipefail STACK_NAME="${BOOTSTRAP_STACK_NAME:-syncbot-bootstrap}" REGION="${AWS_REGION:-us-east-2}" +echo "=== Bootstrap Stack Outputs ===" echo "Bootstrap stack: $STACK_NAME (region: $REGION)" echo "" @@ -25,7 +28,7 @@ while read -r key value; do done <<< "$outputs" echo "" -echo "--- GitHub Actions variables (set these per environment) ---" +echo "=== Suggested GitHub Actions Variables ===" echo "AWS_ROLE_TO_ASSUME = $(echo "$outputs" | awk -F'\t' '$1=="GitHubDeployRoleArn"{print $2}')" echo "AWS_S3_BUCKET = $(echo "$outputs" | awk -F'\t' '$1=="DeploymentBucketName"{print $2}')" echo "AWS_REGION = $(echo "$outputs" | awk -F'\t' '$1=="BootstrapRegion"{print $2}')" diff --git a/infra/aws/template.bootstrap.yaml b/infra/aws/template.bootstrap.yaml index 7345bd2..8008668 100644 --- a/infra/aws/template.bootstrap.yaml +++ b/infra/aws/template.bootstrap.yaml @@ -26,7 +26,9 @@ Parameters: DeploymentBucketPrefix: Type: String Default: "syncbot-deploy" - Description: Prefix for the deployment artifact bucket name (account + region will be appended). + Description: > + Prefix for the deployment artifact bucket. The bucket resource name is + DeploymentBucketPrefix + account ID + region (see DeploymentBucket in Resources). Conditions: CreateOIDC: !Equals [!Ref CreateOIDCProvider, "true"] diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index 01a0d05..73df658 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -2,10 +2,10 @@ AWSTemplateFormatVersion: "2010-09-09" Transform: AWS::Serverless-2016-10-31 Description: > SyncBot - Slack app that syncs posts and replies across workspaces. - Free-tier compatible: Lambda, API Gateway, RDS PostgreSQL or MySQL (db.t3.micro). + Free-tier compatible: Lambda, API Gateway, RDS PostgreSQL or MySQL (db.t4g.micro). OAuth and app data use RDS; media is uploaded directly to Slack. SAM deploy uses an S3 artifact bucket for packaging only (not runtime). - Template lives under infra/aws; CodeUri points at repo-root syncbot/. + Template lives under infra/aws; CodeUri points at repo-root syncbot/ (includes db/alembic for migrations). Globals: Function: @@ -23,6 +23,9 @@ Globals: # ================================================================ # Parameters # ================================================================ +# Grouping: Stage/engine → Slack (maps to SLACK_* env) → RDS / VPC → +# secrets/overrides (TOKEN_ENCRYPTION_KEY, DB password) → RequireAdmin. +# See each Description for the runtime env name where applicable. Parameters: Stage: @@ -35,10 +38,10 @@ Parameters: DatabaseEngine: Description: > - SQL engine for new or existing RDS host. Use postgresql for Aurora DSQL or RDS PostgreSQL; - use mysql for legacy RDS MySQL. + SQL engine for new or existing RDS database host. + Supported engines: mysql or postgresql. Default is mysql. Type: String - Default: postgresql + Default: mysql AllowedValues: - postgresql - mysql @@ -46,28 +49,32 @@ Parameters: # --- Slack --- SlackSigningSecret: - Description: Slack signing secret for request verification + Description: Slack signing secret for request verification (SLACK_SIGNING_SECRET) Type: String NoEcho: true - Default: "123" SlackClientID: Description: > - Slack OAuth app Client ID (Basic Information → App Credentials). + Slack OAuth app Client ID (SLACK_CLIENT_ID; Basic Information → App Credentials). Required for your Slack app; use the ID from the app you created for this deploy. Type: String - Default: "" SlackClientSecret: - Description: Slack OAuth client secret + Description: Slack OAuth client secret (SLACK_CLIENT_SECRET) Type: String NoEcho: true - Default: "123" - SlackOauthScopes: - Description: Comma-separated list of Slack OAuth scopes + SlackOauthBotScopes: + Description: Comma-separated list of Slack OAuth bot scopes (SLACK_BOT_SCOPES) + Type: String + Default: "app_mentions:read,channels:history,channels:join,channels:read,channels:manage,chat:write,chat:write.customize,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email" + + SlackOauthUserScopes: + Description: > + Comma-separated Slack OAuth user scopes (SLACK_USER_SCOPES). Must match slack-manifest.json + oauth_config.scopes.user and syncbot/slack_manifest_scopes.py USER_SCOPES (same order). Type: String - Default: "app_mentions:read,channels:history,channels:join,chat:write,chat:write:user,chat:write.customize,commands,files:read,files:write,team:read,users:read,channels:manage,users:read.email,reactions:read,reactions:write" + Default: "chat:write,channels:history,channels:read,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email" # --- Database (RDS) --- @@ -81,7 +88,7 @@ Parameters: ExistingDatabaseAdminUser: Description: > - MySQL user that can create databases and users (e.g. master). Used only when + Database admin user that can create databases and users (e.g. RDS master). Used only when ExistingDatabaseHost is set; the deploy creates a dedicated app user and schema. Type: String Default: "" @@ -95,7 +102,7 @@ Parameters: ExistingDatabaseNetworkMode: Description: > Network mode for existing database host. Use "public" when the existing RDS - endpoint is reachable from the public internet. Use "private" when it is only + endpoint is reachable from the public Internet. Use "private" when it is only reachable from within a VPC. Type: String Default: public @@ -119,38 +126,31 @@ Parameters: Type: String Default: "" - DatabaseUser: - Description: > - Database username for new RDS only. When using an existing database - (ExistingDatabaseHost set), leave BLANK — the deploy creates syncbot_ and uses it. - Type: String - Default: "" - - DatabasePassword: - Description: > - Master password for new RDS only (min 8 chars). When using an existing database - (ExistingDatabaseHost set), leave BLANK — the app user password is auto-generated. - Type: String - NoEcho: true - Default: "" - DatabaseSchema: Description: > - MySQL database/schema name. Each app sharing an RDS instance + Database/schema name for MySQL or PostgreSQL. Each app sharing an RDS instance should use a different schema name. Type: String Default: "syncbot" DatabaseInstanceClass: - Description: "RDS instance class (db.t3.micro is free-tier eligible). Ignored when using an existing database." + Description: "RDS instance class (db.t4g.micro is free-tier eligible). Ignored when using an existing database." Type: String - Default: db.t3.micro + Default: db.t4g.micro AllowedValues: - - db.t3.micro - - db.t3.small - - db.t3.medium - db.t4g.micro - - db.t4g.small + + DatabaseBackupRetentionDays: + Description: > + Automated backup retention days for new RDS. Default 0 is the most + free-tier-friendly setting and avoids limits on free-plan accounts. + Increase only if your account plan supports automated backups. + Type: Number + Default: 0 + AllowedValues: + - 0 + - 1 + - 7 AllowedDBCidr: Description: > @@ -164,6 +164,8 @@ Parameters: Type: String Default: "10.0.0.0/16" + # --- Secrets and optional overrides (disaster recovery / recreate) --- + TokenEncryptionKeyOverride: Description: > Optional disaster-recovery override for TOKEN_ENCRYPTION_KEY. @@ -181,6 +183,14 @@ Parameters: Type: String Default: "" + AppDbPasswordOverride: + Description: > + Optional app DB password override used only when recreating the app DB secret. + Leave empty for normal deploys to auto-generate. + Type: String + NoEcho: true + Default: "" + RequireAdmin: Description: > When "true" (default), only workspace admins and owners can @@ -191,6 +201,18 @@ Parameters: - "true" - "false" + LogLevel: + Description: > + Python logging level for the app (LOG_LEVEL). DEBUG, INFO, WARNING, ERROR, or CRITICAL. + Type: String + Default: INFO + AllowedValues: + - DEBUG + - INFO + - WARNING + - ERROR + - CRITICAL + # ================================================================ # Conditions # ================================================================ @@ -202,23 +224,22 @@ Conditions: IsPostgresqlEngine: !Equals [!Ref DatabaseEngine, postgresql] CreateDatabaseMysql: !And [!Condition CreateDatabase, !Condition IsMysqlEngine] CreateDatabasePostgresql: !And [!Condition CreateDatabase, !Condition IsPostgresqlEngine] + UseAutomatedBackups: !Not [!Equals [!Ref DatabaseBackupRetentionDays, 0]] UseExistingDatabasePrivateVpc: !And - !Condition UseExistingDatabase - !Equals [!Ref ExistingDatabaseNetworkMode, "private"] HasTokenEncryptionKeyOverride: !Not [!Equals [!Ref TokenEncryptionKeyOverride, ""]] HasExistingTokenEncryptionKeySecretArn: !Not [!Equals [!Ref ExistingTokenEncryptionKeySecretArn, ""]] + HasAppDbPasswordOverride: !Not [!Equals [!Ref AppDbPasswordOverride, ""]] + HasNoAppDbPasswordOverride: !Not [!Condition HasAppDbPasswordOverride] CreateTokenEncryptionKeySecret: !And - !Not [!Condition HasTokenEncryptionKeyOverride] - !Not [!Condition HasExistingTokenEncryptionKeySecretArn] - HasSlackClientID: !Not [!Equals [!Ref SlackClientID, ""]] - Mappings: StagesMap: test: - SlackClientID: "1966318390773.6037875913205" KeepWarmName: "SyncBotKeepWarmTest" prod: - SlackClientID: "1990266264068.6053437451057" KeepWarmName: "SyncBotKeepWarmProd" Resources: @@ -376,9 +397,10 @@ Resources: DBInstanceIdentifier: !Sub "syncbot-${Stage}-mysql" DBInstanceClass: !Ref DatabaseInstanceClass Engine: mysql - EngineVersion: "8.0" - MasterUsername: !Ref DatabaseUser - MasterUserPassword: !Ref DatabasePassword + # Minor version must match cfn-lint / RDS allowed list (major-only "8.0" fails E3691) + EngineVersion: "8.0.40" + MasterUsername: !Sub "syncbot_admin_${Stage}" + ManageMasterUserPassword: true DBName: !Ref DatabaseSchema AllocatedStorage: 20 StorageType: gp2 @@ -389,8 +411,11 @@ Resources: DBParameterGroupName: !Ref RDSParameterGroupMysql VPCSecurityGroups: - !Ref RDSSecurityGroup - BackupRetentionPeriod: 7 - PreferredBackupWindow: "03:00-04:00" + BackupRetentionPeriod: !Ref DatabaseBackupRetentionDays + PreferredBackupWindow: !If + - UseAutomatedBackups + - "03:00-04:00" + - !Ref AWS::NoValue PreferredMaintenanceWindow: "sun:04:00-sun:05:00" DeletionProtection: true Tags: @@ -407,8 +432,8 @@ Resources: DBInstanceClass: !Ref DatabaseInstanceClass Engine: postgres EngineVersion: "16.6" - MasterUsername: !Ref DatabaseUser - MasterUserPassword: !Ref DatabasePassword + MasterUsername: !Sub "syncbot_admin_${Stage}" + ManageMasterUserPassword: true DBName: !Ref DatabaseSchema AllocatedStorage: 20 StorageType: gp2 @@ -419,8 +444,11 @@ Resources: DBParameterGroupName: !Ref RDSParameterGroupPostgres VPCSecurityGroups: - !Ref RDSSecurityGroup - BackupRetentionPeriod: 7 - PreferredBackupWindow: "03:00-04:00" + BackupRetentionPeriod: !Ref DatabaseBackupRetentionDays + PreferredBackupWindow: !If + - UseAutomatedBackups + - "03:00-04:00" + - !Ref AWS::NoValue PreferredMaintenanceWindow: "sun:04:00-sun:05:00" DeletionProtection: true Tags: @@ -440,10 +468,10 @@ Resources: ExcludePunctuation: true IncludeSpace: false - # --- Existing RDS: generated app password and setup Lambda --- - AppDbCredentialsSecret: + # --- DB setup: generated app password and setup Lambda --- + AppDbCredentialsSecretGenerated: Type: AWS::SecretsManager::Secret - Condition: UseExistingDatabase + Condition: HasNoAppDbPasswordOverride Properties: Name: !Sub "syncbot-${Stage}-app-db-password" Description: !Sub "SyncBot ${Stage} app DB user password (created by stack)" @@ -452,11 +480,23 @@ Resources: ExcludePunctuation: true IncludeSpace: false + AppDbCredentialsSecretProvided: + Type: AWS::SecretsManager::Secret + Condition: HasAppDbPasswordOverride + Properties: + Name: !Sub "syncbot-${Stage}-app-db-password" + Description: !Sub "SyncBot ${Stage} app DB user password (provided override)" + SecretString: !Ref AppDbPasswordOverride + DbSetupFunction: Type: AWS::Serverless::Function - Condition: UseExistingDatabase Metadata: BuildMethod: makefile + cfn-lint: + config: + ignore_checks: + # Parameter ExistingDatabaseLambdaSecurityGroupId is a valid sg-* at deploy time + - W1030 Properties: CodeUri: db_setup/ Handler: handler.handler @@ -471,7 +511,18 @@ Resources: Statement: - Effect: Allow Action: secretsmanager:GetSecretValue - Resource: !Ref AppDbCredentialsSecret + Resource: + - !If + - HasAppDbPasswordOverride + - !Ref AppDbCredentialsSecretProvided + - !Ref AppDbCredentialsSecretGenerated + - !If + - UseExistingDatabase + - !Ref AWS::NoValue + - !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.MasterUserSecret.SecretArn + - !GetAtt RDSInstancePostgres.MasterUserSecret.SecretArn VpcConfig: !If - UseExistingDatabasePrivateVpc - SubnetIds: !Split [",", !Ref ExistingDatabaseSubnetIdsCsv] @@ -481,16 +532,36 @@ Resources: AppDbSetup: Type: Custom::ExistingRDSSetup - Condition: UseExistingDatabase - DependsOn: DbSetupFunction Properties: ServiceToken: !GetAtt DbSetupFunction.Arn - Host: !Ref ExistingDatabaseHost - AdminUser: !Ref ExistingDatabaseAdminUser - AdminPassword: !Ref ExistingDatabaseAdminPassword + Host: !If + - UseExistingDatabase + - !Ref ExistingDatabaseHost + - !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.Endpoint.Address + - !GetAtt RDSInstancePostgres.Endpoint.Address + AdminUser: !If + - UseExistingDatabase + - !Ref ExistingDatabaseAdminUser + - !Sub "syncbot_admin_${Stage}" + AdminPassword: !If + - UseExistingDatabase + - !Ref ExistingDatabaseAdminPassword + - "" + AdminSecretArn: !If + - UseExistingDatabase + - "" + - !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.MasterUserSecret.SecretArn + - !GetAtt RDSInstancePostgres.MasterUserSecret.SecretArn Schema: !Ref DatabaseSchema Stage: !Ref Stage - SecretArn: !Ref AppDbCredentialsSecret + SecretArn: !If + - HasAppDbPasswordOverride + - !Ref AppDbCredentialsSecretProvided + - !Ref AppDbCredentialsSecretGenerated DatabaseEngine: !Ref DatabaseEngine # ============================================================ @@ -499,6 +570,11 @@ Resources: SyncBotFunction: Type: AWS::Serverless::Function + Metadata: + cfn-lint: + config: + ignore_checks: + - W1030 Properties: CodeUri: ../../syncbot/ Handler: app.handler @@ -543,15 +619,10 @@ Resources: Variables: SLACK_BOT_TOKEN: "123" SLACK_SIGNING_SECRET: !Ref SlackSigningSecret - ENV_SLACK_CLIENT_SECRET: !Ref SlackClientSecret - ENV_SLACK_SCOPES: !Ref SlackOauthScopes - ENV_SLACK_CLIENT_ID: !If - - HasSlackClientID - - !Ref SlackClientID - - !FindInMap - - StagesMap - - !Ref Stage - - SlackClientID + SLACK_CLIENT_SECRET: !Ref SlackClientSecret + SLACK_BOT_SCOPES: !Ref SlackOauthBotScopes + SLACK_USER_SCOPES: !Ref SlackOauthUserScopes + SLACK_CLIENT_ID: !Ref SlackClientID DATABASE_BACKEND: !Ref DatabaseEngine DATABASE_PORT: !If - IsMysqlEngine @@ -564,16 +635,13 @@ Resources: - IsMysqlEngine - !GetAtt RDSInstanceMysql.Endpoint.Address - !GetAtt RDSInstancePostgres.Endpoint.Address - DATABASE_USER: !If - - UseExistingDatabase - - !GetAtt AppDbSetup.Username - - !Ref DatabaseUser - DATABASE_PASSWORD: !If - - UseExistingDatabase - - !Sub - - "{{resolve:secretsmanager:${SecretArn}:SecretString}}" - - { SecretArn: !Ref AppDbCredentialsSecret } - - !Ref DatabasePassword + DATABASE_USER: !GetAtt AppDbSetup.Username + DATABASE_PASSWORD: !Sub + - "{{resolve:secretsmanager:${SecretArn}:SecretString}}" + - SecretArn: !If + - HasAppDbPasswordOverride + - !Ref AppDbCredentialsSecretProvided + - !Ref AppDbCredentialsSecretGenerated DATABASE_SCHEMA: !Ref DatabaseSchema TOKEN_ENCRYPTION_KEY: !If - HasTokenEncryptionKeyOverride @@ -585,6 +653,24 @@ Resources: - { SecretArn: !Ref ExistingTokenEncryptionKeySecretArn } - !Sub "{{resolve:secretsmanager:${TokenEncryptionKeySecret}:SecretString}}" REQUIRE_ADMIN: !Ref RequireAdmin + LOG_LEVEL: !Ref LogLevel + + # Slack Bolt (aws_lambda adapter) runs lazy listeners by invoking this function again + # via lambda:InvokeFunction. The execution role must allow self-invoke. + SyncBotFunctionSelfInvokePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: !Sub "syncbot-${Stage}-self-invoke" + Roles: + - !Ref SyncBotFunctionRole + PolicyDocument: + Version: "2012-10-17" + Statement: + - Sid: AllowSelfInvokeForBoltLazyListeners + Effect: Allow + Action: + - lambda:InvokeFunction + Resource: !GetAtt SyncBotFunction.Arn # ============================================================ # CloudWatch Alarms @@ -713,8 +799,11 @@ Outputs: Value: !Ref VPC TokenEncryptionSecretArn: - Description: Secrets Manager ARN containing TOKEN_ENCRYPTION_KEY + Description: Secrets Manager ARN containing TOKEN_ENCRYPTION_KEY (empty when using TokenEncryptionKeyOverride only) Value: !If - HasExistingTokenEncryptionKeySecretArn - !Ref ExistingTokenEncryptionKeySecretArn - - !Ref TokenEncryptionKeySecret + - !If + - CreateTokenEncryptionKeySecret + - !Ref TokenEncryptionKeySecret + - "" diff --git a/infra/aws/tests/test_sam_template_validate.py b/infra/aws/tests/test_sam_template_validate.py new file mode 100644 index 0000000..e78c224 --- /dev/null +++ b/infra/aws/tests/test_sam_template_validate.py @@ -0,0 +1,37 @@ +"""Structural SAM validation for templates next to this package (``sam validate``). + +Requires the AWS SAM CLI on PATH; skipped when missing. +""" + +from __future__ import annotations + +import shutil +import subprocess +from pathlib import Path + +import pytest + +INFRA_AWS = Path(__file__).resolve().parent.parent + + +def _which(name: str) -> str | None: + return shutil.which(name) + + +@pytest.mark.parametrize( + "name", + ["template.yaml", "template.bootstrap.yaml"], +) +def test_sam_template_validates(name: str) -> None: + """Same class of checks as ``sam build``, without packaging.""" + sam = _which("sam") + if not sam: + pytest.skip("sam CLI not on PATH") + template = INFRA_AWS / name + assert template.is_file(), f"missing {template}" + proc = subprocess.run( + [sam, "validate", "-t", str(template), "--lint"], + capture_output=True, + text=True, + ) + assert proc.returncode == 0, f"sam validate failed:\n{proc.stdout}\n{proc.stderr}" diff --git a/infra/gcp/README.md b/infra/gcp/README.md index 039eb70..8117eac 100644 --- a/infra/gcp/README.md +++ b/infra/gcp/README.md @@ -25,7 +25,7 @@ Minimal Terraform scaffold to run SyncBot on Google Cloud. Satisfies the [infras ```bash echo -n "YOUR_SLACK_SIGNING_SECRET" | gcloud secrets versions add syncbot-test-syncbot-slack-signing-secret --data-file=- - # Repeat for ENV_SLACK_CLIENT_ID, ENV_SLACK_CLIENT_SECRET, ENV_SLACK_SCOPES, syncbot-db-password (if existing DB) + # Repeat for SLACK_CLIENT_ID, SLACK_CLIENT_SECRET, SLACK_BOT_SCOPES (comma-separated list must match oauth_config.scopes.bot / BOT_SCOPES), syncbot-db-password (if existing DB) ``` `TOKEN_ENCRYPTION_KEY` is generated once automatically by Terraform and stored in Secret Manager. Back it up. If lost, existing workspaces must reinstall to re-authorize bot tokens. @@ -49,6 +49,9 @@ Minimal Terraform scaffold to run SyncBot on Google Cloud. Satisfies the [infras | `use_existing_database` | If `true`, use `existing_db_*` vars instead of creating Cloud SQL | | `existing_db_host`, `existing_db_schema`, `existing_db_user` | Existing MySQL connection (when `use_existing_database = true`) | | `cloud_run_image` | Container image URL for Cloud Run (set after first build) | +| `secret_slack_bot_scopes` | Secret Manager secret ID for **bot** OAuth scopes (runtime `SLACK_BOT_SCOPES`; default `syncbot-slack-scopes`). The **secret value** must match `oauth_config.scopes.bot` / `BOT_SCOPES` (same requirement as AWS SAM `SlackOauthBotScopes`). | +| `slack_user_scopes` | Plain-text **user** OAuth scopes for Cloud Run (`SLACK_USER_SCOPES`). Default matches repo standard (same comma-separated string as AWS SAM `SlackOauthUserScopes`); must match manifest `oauth_config.scopes.user` and `USER_SCOPES` in `slack_manifest_scopes.py`. | +| `log_level` | Python logging level for the app (`LOG_LEVEL`): `DEBUG`, `INFO`, `WARNING`, `ERROR`, or `CRITICAL` (default `INFO`). | | `enable_keep_warm` | Create Cloud Scheduler job to ping the service (default `true`) | See [variables.tf](variables.tf) for all options. diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf index 19c1010..963a92d 100644 --- a/infra/gcp/main.tf +++ b/infra/gcp/main.tf @@ -26,19 +26,27 @@ locals { var.secret_slack_signing_secret, var.secret_slack_client_id, var.secret_slack_client_secret, - var.secret_slack_scopes, + var.secret_slack_bot_scopes, var.secret_token_encryption_key, var.secret_db_password, ] # Map deploy-contract env var names to Secret Manager secret variable keys (used in app_secrets) env_to_secret_key = { - "SLACK_SIGNING_SECRET" = var.secret_slack_signing_secret - "ENV_SLACK_CLIENT_ID" = var.secret_slack_client_id - "ENV_SLACK_CLIENT_SECRET" = var.secret_slack_client_secret - "ENV_SLACK_SCOPES" = var.secret_slack_scopes - "TOKEN_ENCRYPTION_KEY" = var.secret_token_encryption_key - "DATABASE_PASSWORD" = var.secret_db_password + "SLACK_SIGNING_SECRET" = var.secret_slack_signing_secret + "SLACK_CLIENT_ID" = var.secret_slack_client_id + "SLACK_CLIENT_SECRET" = var.secret_slack_client_secret + "SLACK_BOT_SCOPES" = var.secret_slack_bot_scopes + "TOKEN_ENCRYPTION_KEY" = var.secret_token_encryption_key + "DATABASE_PASSWORD" = var.secret_db_password } + # Runtime DB connection: existing host or Cloud SQL public IP after create + db_host = var.use_existing_database ? var.existing_db_host : ( + length(google_sql_database_instance.main) > 0 ? google_sql_database_instance.main[0].public_ip_address : "" + ) + db_schema = var.use_existing_database ? var.existing_db_schema : "syncbot" + db_user = var.use_existing_database ? var.existing_db_user : "syncbot_app" + # Image: variable or placeholder until first image push + cloud_run_image_effective = var.cloud_run_image != "" ? var.cloud_run_image : "us-docker.pkg.dev/cloudrun/container/hello" } # --------------------------------------------------------------------------- @@ -227,14 +235,6 @@ resource "google_secret_manager_secret_version" "token_encryption_key" { # Cloud Run service # --------------------------------------------------------------------------- -locals { - db_host = var.use_existing_database ? var.existing_db_host : (length(google_sql_database_instance.main) > 0 ? google_sql_database_instance.main[0].public_ip_address : "") - db_schema = var.use_existing_database ? var.existing_db_schema : "syncbot" - db_user = var.use_existing_database ? var.existing_db_user : "syncbot_app" - # Image: use variable or a placeholder until first deploy - image = var.cloud_run_image != "" ? var.cloud_run_image : "us-docker.pkg.dev/cloudrun/container/hello" -} - resource "google_cloud_run_v2_service" "syncbot" { project = var.project_id name = local.name_prefix @@ -250,7 +250,7 @@ resource "google_cloud_run_v2_service" "syncbot" { } containers { - image = local.image + image = local.cloud_run_image_effective resources { limits = { @@ -271,6 +271,15 @@ resource "google_cloud_run_v2_service" "syncbot" { name = "DATABASE_SCHEMA" value = local.db_schema } + # Runtime user OAuth scopes — must match slack-manifest.json and USER_SCOPES in slack_manifest_scopes.py + env { + name = "SLACK_USER_SCOPES" + value = var.slack_user_scopes + } + env { + name = "LOG_LEVEL" + value = var.log_level + } dynamic "env" { for_each = local.env_to_secret_key @@ -295,11 +304,11 @@ resource "google_cloud_run_v2_service" "syncbot" { # Allow unauthenticated invocations (Slack calls the URL; use IAP or Cloud Armor in prod if needed) resource "google_cloud_run_v2_service_iam_member" "public" { - project = google_cloud_run_v2_service.syncbot.project - location = google_cloud_run_v2_service.syncbot.location - name = google_cloud_run_v2_service.syncbot.name - role = "roles/run.invoker" - member = "allUsers" + project = google_cloud_run_v2_service.syncbot.project + location = google_cloud_run_v2_service.syncbot.location + name = google_cloud_run_v2_service.syncbot.name + role = "roles/run.invoker" + member = "allUsers" } # --------------------------------------------------------------------------- @@ -307,12 +316,12 @@ resource "google_cloud_run_v2_service_iam_member" "public" { # --------------------------------------------------------------------------- resource "google_cloud_scheduler_job" "keep_warm" { - count = var.enable_keep_warm ? 1 : 0 - project = var.project_id - name = "${local.name_prefix}-keep-warm" - region = var.region - schedule = "*/${var.keep_warm_interval_minutes} * * * *" - time_zone = "UTC" + count = var.enable_keep_warm ? 1 : 0 + project = var.project_id + name = "${local.name_prefix}-keep-warm" + region = var.region + schedule = "*/${var.keep_warm_interval_minutes} * * * *" + time_zone = "UTC" attempt_deadline = "60s" http_target { diff --git a/infra/gcp/scripts/deploy.sh b/infra/gcp/scripts/deploy.sh new file mode 100755 index 0000000..775afb5 --- /dev/null +++ b/infra/gcp/scripts/deploy.sh @@ -0,0 +1,619 @@ +#!/usr/bin/env bash +# Interactive GCP deploy helper (Terraform). Run from repo root: +# ./infra/gcp/scripts/deploy.sh +# Or via: ./deploy.sh gcp +# +# Phases (main path): +# 1) Prerequisites (terraform, gcloud, python3, curl) +# 2) Project, region, stage; detect existing Cloud Run service +# 3) Database source: USE_EXISTING true = external DB only (skip Cloud SQL); false = Terraform-managed DB path +# 4) Container image var for Cloud Run +# 5) terraform init / plan / apply +# 6) Stage Slack manifest, optional Slack API configure +# 7) Deploy receipt, print-bootstrap-outputs, optional GitHub Actions vars +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +GCP_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +SLACK_MANIFEST_GENERATED_PATH="" + +# shellcheck source=/dev/null +source "$REPO_ROOT/deploy.sh" + +prereqs_require_cmd terraform prereqs_hint_terraform +prereqs_require_cmd gcloud prereqs_hint_gcloud +prereqs_require_cmd python3 prereqs_hint_python3 +prereqs_require_cmd curl prereqs_hint_curl + +prereqs_print_cli_status_matrix "GCP" terraform gcloud python3 curl + +prompt_line() { + local p="$1" + local d="${2:-}" + local v + if [[ -n "$d" ]]; then + read -r -p "$p [$d]: " v + echo "${v:-$d}" + else + read -r -p "$p: " v + echo "$v" + fi +} + +prompt_secret() { + local p="$1" + local v + read -r -s -p "$p: " v + printf '\n' >&2 + echo "$v" +} + +prompt_required() { + local p="$1" + local v + while true; do + read -r -p "$p: " v + if [[ -n "$v" ]]; then + echo "$v" + return 0 + fi + echo "Error: $p is required." >&2 + done +} + +required_from_env_or_prompt() { + local env_name="$1" + local prompt="$2" + local mode="${3:-plain}" # plain|secret + local env_value="${!env_name:-}" + if [[ -n "$env_value" ]]; then + echo "Using $prompt from environment variable $env_name." >&2 + echo "$env_value" + return 0 + fi + if [[ "$mode" == "secret" ]]; then + while true; do + env_value="$(prompt_secret "$prompt")" + if [[ -n "$env_value" ]]; then + echo "$env_value" + return 0 + fi + echo "Error: $prompt is required." >&2 + done + fi + prompt_required "$prompt" +} + +prompt_yn() { + local p="$1" + local def="${2:-y}" + local a + local hint="y/N" + [[ "$def" == "y" ]] && hint="Y/n" + read -r -p "$p [$hint]: " a + if [[ -z "$a" ]]; then + a="$def" + fi + [[ "$a" =~ ^[Yy]$ ]] +} + +ensure_gcloud_authenticated() { + local active_account + active_account="$(gcloud auth list --filter=status:ACTIVE --format='value(account)' 2>/dev/null || true)" + if [[ -n "$active_account" ]]; then + return 0 + fi + echo "gcloud is not authenticated." + if prompt_yn "Run 'gcloud auth login' now?" "y"; then + gcloud auth login || true + fi + active_account="$(gcloud auth list --filter=status:ACTIVE --format='value(account)' 2>/dev/null || true)" + if [[ -z "$active_account" ]]; then + echo "Unable to authenticate gcloud. Run 'gcloud auth login' and rerun." + exit 1 + fi +} + +ensure_gh_authenticated() { + if ! command -v gh >/dev/null 2>&1; then + prereqs_hint_gh_cli >&2 + return 1 + fi + if gh auth status >/dev/null 2>&1; then + return 0 + fi + echo "gh CLI is not authenticated." + if prompt_yn "Run 'gh auth login' now?" "y"; then + gh auth login || true + fi + if gh auth status >/dev/null 2>&1; then + return 0 + fi + echo "gh authentication is still missing. Skipping automatic GitHub setup." + return 1 +} + +cloud_sql_instance_exists() { + local project_id="$1" + local instance_name="$2" + gcloud sql instances describe "$instance_name" \ + --project "$project_id" \ + --format='value(name)' >/dev/null 2>&1 +} + +cloud_run_env_value() { + local project_id="$1" + local region="$2" + local service_name="$3" + local env_key="$4" + gcloud run services describe "$service_name" \ + --project "$project_id" \ + --region "$region" \ + --format=json 2>/dev/null | python3 - "$env_key" <<'PY' +import json +import sys + +env_key = sys.argv[1] +try: + data = json.load(sys.stdin) +except Exception: + print("") + raise SystemExit(0) + +containers = (data.get("spec", {}) or {}).get("template", {}).get("spec", {}).get("containers", []) +for c in containers: + for e in c.get("env", []) or []: + if e.get("name") == env_key: + print(e.get("value", "")) + raise SystemExit(0) +print("") +PY +} + +cloud_run_image_value() { + local project_id="$1" + local region="$2" + local service_name="$3" + gcloud run services describe "$service_name" \ + --project "$project_id" \ + --region "$region" \ + --format='value(spec.template.spec.containers[0].image)' 2>/dev/null || true +} + +slack_manifest_json_compact() { + local manifest_file="$1" + python3 - "$manifest_file" <<'PY' +import json +import sys +path = sys.argv[1] +with open(path, "r", encoding="utf-8") as f: + data = json.load(f) +print(json.dumps(data, separators=(",", ":"))) +PY +} + +slack_api_configure_from_manifest() { + local manifest_file="$1" + local install_url="$2" + local token app_id team_id manifest_json api_resp ok + + echo + echo "=== Slack App API ===" + + token="$(required_from_env_or_prompt "SLACK_API_TOKEN" "Slack API token (required scopes: apps.manifest:write)" "secret")" + app_id="$(prompt_line "Slack App ID (optional; blank = create new app)" "${SLACK_APP_ID:-}")" + team_id="$(prompt_line "Slack Team ID (optional; usually blank)" "${SLACK_TEAM_ID:-}")" + + manifest_json="$(slack_manifest_json_compact "$manifest_file" 2>/dev/null || true)" + if [[ -z "$manifest_json" ]]; then + echo "Could not parse manifest JSON automatically." + echo "Ensure $manifest_file is valid JSON and Python 3 is installed." + return 0 + fi + + if [[ -n "$app_id" ]]; then + if [[ -n "$team_id" ]]; then + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "app_id=$app_id" \ + --data-urlencode "team_id=$team_id" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.update" || true)" + else + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "app_id=$app_id" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.update" || true)" + fi + ok="$(python3 - "$api_resp" <<'PY' +import json,sys +try: + data=json.loads(sys.argv[1]) +except Exception: + print("invalid-json") + sys.exit(0) +print("ok" if data.get("ok") else f"error:{data.get('error','unknown_error')}") +PY +)" + if [[ "$ok" == "ok" ]]; then + echo "Slack app manifest updated for App ID: $app_id" + echo "Open install URL: $install_url" + else + echo "Slack API update failed: ${ok#error:}" + echo "Response (truncated):" + slack_api_echo_truncated_body "$api_resp" + echo "Hint: check token scopes (apps.manifest:write), manifest JSON, and api.slack.com methods apps.manifest.update" + fi + return 0 + fi + + if [[ -n "$team_id" ]]; then + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "team_id=$team_id" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.create" || true)" + else + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.create" || true)" + fi + ok="$(python3 - "$api_resp" <<'PY' +import json,sys +try: + data=json.loads(sys.argv[1]) +except Exception: + print("invalid-json") + sys.exit(0) +if not data.get("ok"): + print(f"error:{data.get('error','unknown_error')}") + sys.exit(0) +app_id = data.get("app_id") or (data.get("app", {}) or {}).get("id") or "" +print(f"ok:{app_id}") +PY +)" + if [[ "$ok" == ok:* ]]; then + app_id="${ok#ok:}" + echo "Slack app created successfully." + [[ -n "$app_id" ]] && echo "New Slack App ID: $app_id" + echo "Open install URL: $install_url" + else + echo "Slack API create failed: ${ok#error:}" + echo "Response (truncated):" + slack_api_echo_truncated_body "$api_resp" + echo "Hint: check token scopes (apps.manifest:write), manifest JSON, and api.slack.com methods apps.manifest.create" + fi +} + +generate_stage_slack_manifest() { + local stage="$1" + local api_url="$2" + local install_url="$3" + local template="$REPO_ROOT/slack-manifest.json" + local manifest_out="$REPO_ROOT/slack-manifest_${stage}.json" + local events_url base_url oauth_redirect_url + + if [[ ! -f "$template" ]]; then + echo "Slack manifest template not found at $template" + return 0 + fi + if [[ -z "$api_url" ]]; then + echo "Could not determine API URL from service outputs. Skipping Slack manifest generation." + return 0 + fi + + events_url="${api_url%/}" + base_url="${events_url%/slack/events}" + oauth_redirect_url="${base_url}/slack/oauth_redirect" + + if ! python3 - "$template" "$manifest_out" "$events_url" "$oauth_redirect_url" <<'PY' +import json +import sys + +template_path, out_path, events_url, redirect_url = sys.argv[1:5] +with open(template_path, "r", encoding="utf-8") as f: + manifest = json.load(f) + +manifest.setdefault("oauth_config", {}).setdefault("redirect_urls", []) +manifest["oauth_config"]["redirect_urls"] = [redirect_url] +manifest.setdefault("settings", {}).setdefault("event_subscriptions", {}) +manifest["settings"]["event_subscriptions"]["request_url"] = events_url +manifest.setdefault("settings", {}).setdefault("interactivity", {}) +manifest["settings"]["interactivity"]["request_url"] = events_url + +with open(out_path, "w", encoding="utf-8") as f: + json.dump(manifest, f, indent=2) + f.write("\n") +PY + then + echo "Failed to generate stage Slack manifest from JSON template." + return 0 + fi + + SLACK_MANIFEST_GENERATED_PATH="$manifest_out" + + echo "=== Slack Manifest (${stage}) ===" + echo "Saved file: $manifest_out" + echo "Install URL: $install_url" + echo + sed 's/^/ /' "$manifest_out" +} + +write_deploy_receipt() { + local provider="$1" + local stage="$2" + local project_or_stack="$3" + local region="$4" + local service_url="$5" + local install_url="$6" + local manifest_path="$7" + local ts_human ts_file receipt_dir receipt_path + + ts_human="$(date -u +"%Y-%m-%d %H:%M:%S UTC")" + ts_file="$(date -u +"%Y%m%dT%H%M%SZ")" + receipt_dir="$REPO_ROOT/deploy-receipts" + receipt_path="$receipt_dir/deploy-${provider}-${stage}-${ts_file}.md" + + mkdir -p "$receipt_dir" + cat >"$receipt_path" </dev/null || true)" + artifact_registry_url="$(cd "$terraform_dir" && terraform output -raw artifact_registry_repository 2>/dev/null || true)" + service_url="$(cd "$terraform_dir" && terraform output -raw service_url 2>/dev/null || true)" + + echo + echo "=== GitHub Actions (GCP) ===" + echo "Detected project: $gcp_project_id" + echo "Detected region: $gcp_region" + echo "Detected service account: $deploy_sa_email" + echo "Detected artifact repo: $artifact_registry_url" + echo "Detected service URL: $service_url" + repo="$(gh repo view --json nameWithOwner -q .nameWithOwner 2>/dev/null || true)" + if [[ -z "$repo" ]]; then + repo="$(prompt_line "GitHub repository (owner/repo) for environment setup" "")" + else + echo "Detected GitHub repository: $repo" + fi + + if ! ensure_gh_authenticated; then + echo + echo "Set these GitHub Actions Variables manually:" + echo " GCP_PROJECT_ID = $gcp_project_id" + echo " GCP_REGION = $gcp_region" + echo " GCP_SERVICE_ACCOUNT = $deploy_sa_email" + echo " DEPLOY_TARGET = gcp" + echo "Also set GCP_WORKLOAD_IDENTITY_PROVIDER for deploy-gcp.yml." + return 0 + fi + + if prompt_yn "Create/update GitHub environments 'test' and 'prod' now?" "y"; then + gh api -X PUT "repos/$repo/environments/test" >/dev/null + gh api -X PUT "repos/$repo/environments/prod" >/dev/null + echo "GitHub environments ensured: test, prod." + fi + + if prompt_yn "Set repo variables with gh now (GCP_PROJECT_ID, GCP_REGION, GCP_SERVICE_ACCOUNT, DEPLOY_TARGET=gcp)?" "y"; then + gh variable set GCP_PROJECT_ID --body "$gcp_project_id" + gh variable set GCP_REGION --body "$gcp_region" + [[ -n "$deploy_sa_email" ]] && gh variable set GCP_SERVICE_ACCOUNT --body "$deploy_sa_email" + gh variable set DEPLOY_TARGET --body "gcp" + echo "GitHub repository variables updated." + echo "Remember to set GCP_WORKLOAD_IDENTITY_PROVIDER." + fi + + if prompt_yn "Set environment variable STAGE_NAME for '$env_name' now?" "y"; then + gh variable set STAGE_NAME --env "$env_name" --body "$deploy_stage" + echo "Environment variable STAGE_NAME updated for '$env_name'." + fi +} + +echo "=== SyncBot GCP Deploy ===" +echo "Working directory: $GCP_DIR" +echo + +echo "=== Project And Region ===" +PROJECT_ID="$(prompt_line "GCP project_id" "${GCP_PROJECT_ID:-}")" +if [[ -z "$PROJECT_ID" ]]; then + echo "Error: project_id is required." >&2 + exit 1 +fi + +REGION="$(prompt_line "GCP region" "${GCP_REGION:-us-central1}")" +ensure_gcloud_authenticated +gcloud config set project "$PROJECT_ID" >/dev/null 2>&1 || true +STAGE="$(prompt_line "Stage (test/prod)" "${STAGE:-test}")" +if [[ "$STAGE" != "test" && "$STAGE" != "prod" ]]; then + echo "Error: stage must be 'test' or 'prod'." >&2 + exit 1 +fi +SERVICE_NAME="syncbot-${STAGE}" +EXISTING_SERVICE_URL="$(gcloud run services describe "$SERVICE_NAME" \ + --project "$PROJECT_ID" \ + --region "$REGION" \ + --format='value(status.url)' 2>/dev/null || true)" +if [[ -n "$EXISTING_SERVICE_URL" ]]; then + echo "Detected existing Cloud Run service: $SERVICE_NAME" + if ! prompt_yn "Continue and update this existing deployment?" "y"; then + echo "Aborted." + exit 0 + fi +fi + +echo +echo "=== Database Source ===" +# USE_EXISTING=true: point Terraform at an external DB only (use_existing_database); skip creating Cloud SQL. +# USE_EXISTING_DEFAULT: y/n default for the prompt when redeploying without a managed instance for this stage. +USE_EXISTING="false" +USE_EXISTING_DEFAULT="n" +DB_INSTANCE_NAME="${SERVICE_NAME}-db" +if [[ -n "$EXISTING_SERVICE_URL" ]]; then + if cloud_sql_instance_exists "$PROJECT_ID" "$DB_INSTANCE_NAME"; then + USE_EXISTING_DEFAULT="n" + echo "Detected managed Cloud SQL instance: $DB_INSTANCE_NAME" + else + USE_EXISTING_DEFAULT="y" + echo "No managed Cloud SQL instance found for stage; defaulting to existing DB mode." + fi +fi +if prompt_yn "Use existing database host (skip Cloud SQL creation)?" "$USE_EXISTING_DEFAULT"; then + USE_EXISTING="true" +fi + +EXISTING_HOST="" +EXISTING_SCHEMA="" +EXISTING_USER="" +DETECTED_EXISTING_HOST="" +DETECTED_EXISTING_SCHEMA="" +DETECTED_EXISTING_USER="" +if [[ -n "$EXISTING_SERVICE_URL" ]]; then + DETECTED_EXISTING_HOST="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_HOST")" + DETECTED_EXISTING_SCHEMA="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_SCHEMA")" + DETECTED_EXISTING_USER="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_USER")" +fi +if [[ "$USE_EXISTING" == "true" ]]; then + EXISTING_HOST="$(prompt_line "Existing DB host" "$DETECTED_EXISTING_HOST")" + EXISTING_SCHEMA="$(prompt_line "Database schema name" "${DETECTED_EXISTING_SCHEMA:-syncbot}")" + EXISTING_USER="$(prompt_line "Database user" "$DETECTED_EXISTING_USER")" + if [[ -z "$EXISTING_HOST" ]]; then + echo "Error: Existing DB host is required when using existing database mode." >&2 + exit 1 + fi + if [[ -z "$EXISTING_USER" ]]; then + echo "Error: Database user is required when using existing database mode." >&2 + exit 1 + fi +fi + +DETECTED_CLOUD_IMAGE="" +if [[ -n "$EXISTING_SERVICE_URL" ]]; then + DETECTED_CLOUD_IMAGE="$(cloud_run_image_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME")" +fi +CLOUD_IMAGE="$(prompt_line "cloud_run_image (blank = placeholder until first build)" "$DETECTED_CLOUD_IMAGE")" + +DETECTED_LOG_LEVEL="" +if [[ -n "$EXISTING_SERVICE_URL" ]]; then + DETECTED_LOG_LEVEL="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "LOG_LEVEL")" +fi +LOG_LEVEL_DEFAULT="INFO" +if [[ -n "$DETECTED_LOG_LEVEL" ]]; then + LOG_LEVEL_DEFAULT="$(normalize_log_level "$DETECTED_LOG_LEVEL")" + if ! is_valid_log_level "$LOG_LEVEL_DEFAULT"; then + LOG_LEVEL_DEFAULT="INFO" + fi +fi + +echo +echo "=== Log Level ===" +LOG_LEVEL="$(prompt_log_level "$LOG_LEVEL_DEFAULT")" + +echo +echo "=== Terraform Init ===" +echo "Running: terraform init" +cd "$GCP_DIR" +terraform init + +VARS=( + "-var=project_id=$PROJECT_ID" + "-var=region=$REGION" + "-var=stage=$STAGE" + "-var=log_level=$LOG_LEVEL" +) + +if [[ "$USE_EXISTING" == "true" ]]; then + VARS+=("-var=use_existing_database=true") + VARS+=("-var=existing_db_host=$EXISTING_HOST") + VARS+=("-var=existing_db_schema=$EXISTING_SCHEMA") + VARS+=("-var=existing_db_user=$EXISTING_USER") +else + VARS+=("-var=use_existing_database=false") +fi + +if [[ -n "$CLOUD_IMAGE" ]]; then + VARS+=("-var=cloud_run_image=$CLOUD_IMAGE") +fi + +echo +echo "Log level: $LOG_LEVEL" +echo +echo "=== Terraform Plan ===" +if ! prompt_yn "Run terraform plan?" "y"; then + echo "Skipped. Run manually from infra/gcp:" + echo " terraform plan ${VARS[*]}" + exit 0 +fi + +terraform plan "${VARS[@]}" + +echo +echo "=== Terraform Apply ===" +if ! prompt_yn "Apply changes (terraform apply)?" "y"; then + echo "Aborted." + exit 0 +fi + +terraform apply -auto-approve "${VARS[@]}" + +echo +echo "=== Apply Complete ===" +SERVICE_URL="$(terraform output -raw service_url 2>/dev/null || true)" +SYNCBOT_API_URL="" +SYNCBOT_INSTALL_URL="" +if [[ -n "$SERVICE_URL" ]]; then + SYNCBOT_API_URL="${SERVICE_URL%/}/slack/events" + SYNCBOT_INSTALL_URL="${SERVICE_URL%/}/slack/install" +fi +generate_stage_slack_manifest "$STAGE" "$SYNCBOT_API_URL" "$SYNCBOT_INSTALL_URL" +if [[ -n "$SLACK_MANIFEST_GENERATED_PATH" ]]; then + if prompt_yn "Configure Slack app via Slack API now (create or update from generated manifest)?" "n"; then + slack_api_configure_from_manifest "$SLACK_MANIFEST_GENERATED_PATH" "$SYNCBOT_INSTALL_URL" + fi +fi + +write_deploy_receipt \ + "gcp" \ + "$STAGE" \ + "$PROJECT_ID" \ + "$REGION" \ + "$SERVICE_URL" \ + "$SYNCBOT_INSTALL_URL" \ + "$SLACK_MANIFEST_GENERATED_PATH" + +echo "Next:" +echo " 1) Set Secret Manager values for Slack (see infra/gcp/README.md)." +echo " 2) Build and push container image; update cloud_run_image and re-apply if needed." +echo " 3) Run: ./infra/gcp/scripts/print-bootstrap-outputs.sh" +bash "$SCRIPT_DIR/print-bootstrap-outputs.sh" || true + +if prompt_yn "Set up GitHub Actions configuration now?" "n"; then + configure_github_actions_gcp "$PROJECT_ID" "$REGION" "$GCP_DIR" "$STAGE" +fi diff --git a/infra/gcp/scripts/print-bootstrap-outputs.sh b/infra/gcp/scripts/print-bootstrap-outputs.sh index 22f6515..6099532 100755 --- a/infra/gcp/scripts/print-bootstrap-outputs.sh +++ b/infra/gcp/scripts/print-bootstrap-outputs.sh @@ -2,6 +2,8 @@ # Print SyncBot GCP Terraform outputs for GitHub variables (WIF, deploy). # Run from repo root: infra/gcp/scripts/print-bootstrap-outputs.sh # Requires: terraform in PATH; run from repo root so infra/gcp is available. +# +# Flow: terraform output (full) -> suggested variable names for CI. set -euo pipefail @@ -13,7 +15,7 @@ if [[ ! -d "$GCP_DIR" ]] || [[ ! -f "$GCP_DIR/main.tf" ]]; then exit 1 fi -echo "GCP Terraform outputs (infra/gcp)" +echo "=== Terraform Outputs (Infra/GCP) ===" echo "" cd "$GCP_DIR" @@ -25,7 +27,7 @@ fi terraform output echo "" -echo "--- GitHub Actions variables (suggested) ---" +echo "=== Suggested GitHub Actions Variables ===" echo "GCP_PROJECT_ID = $(terraform output -raw project_id 2>/dev/null || echo '')" echo "GCP_REGION = $(terraform output -raw region 2>/dev/null || echo '')" echo "GCP_SERVICE_ACCOUNT = $(terraform output -raw deploy_service_account_email 2>/dev/null || echo '')" diff --git a/infra/gcp/tests/test_terraform_validate.py b/infra/gcp/tests/test_terraform_validate.py new file mode 100644 index 0000000..515f01a --- /dev/null +++ b/infra/gcp/tests/test_terraform_validate.py @@ -0,0 +1,53 @@ +"""Terraform validation for the module next to this package. + +``terraform init -backend=false`` may need network access to download providers. +Uses ``TF_DATA_DIR`` in a temp directory so the repo tree is not modified. +""" + +from __future__ import annotations + +import os +import shutil +import subprocess +import tempfile +from pathlib import Path + +import pytest + +INFRA_GCP = Path(__file__).resolve().parent.parent + + +def _which(name: str) -> str | None: + return shutil.which(name) + + +def test_terraform_validates() -> None: + tf = _which("terraform") + if not tf: + pytest.skip("terraform not on PATH") + assert INFRA_GCP.is_dir() + with tempfile.TemporaryDirectory() as tmp: + env = dict(os.environ) + env["TF_DATA_DIR"] = tmp + init = subprocess.run( + [tf, "init", "-backend=false", "-input=false"], + cwd=INFRA_GCP, + capture_output=True, + text=True, + env=env, + timeout=180, + ) + if init.returncode != 0: + pytest.skip( + "terraform init failed (terraform missing or no network for providers?):\n" + f"{init.stdout}\n{init.stderr}" + ) + validate = subprocess.run( + [tf, "validate"], + cwd=INFRA_GCP, + capture_output=True, + text=True, + env=env, + timeout=60, + ) + assert validate.returncode == 0, f"terraform validate failed:\n{validate.stdout}\n{validate.stderr}" diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf index a71daef..1d65f45 100644 --- a/infra/gcp/variables.tf +++ b/infra/gcp/variables.tf @@ -1,4 +1,7 @@ # GCP Terraform variables for SyncBot (see docs/INFRA_CONTRACT.md) +# +# Sections: project / region / stage → database mode → Cloud Run → keep-warm → +# Secret Manager IDs and scope envs → optional overrides. variable "project_id" { type = string @@ -79,6 +82,17 @@ variable "cloud_run_max_instances" { description = "Maximum number of Cloud Run instances" } +variable "log_level" { + type = string + default = "INFO" + description = "Python logging level for the app (LOG_LEVEL). DEBUG, INFO, WARNING, ERROR, or CRITICAL." + + validation { + condition = contains(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], var.log_level) + error_message = "log_level must be DEBUG, INFO, WARNING, ERROR, or CRITICAL." + } +} + # --------------------------------------------------------------------------- # Keep-warm (Cloud Scheduler) # --------------------------------------------------------------------------- @@ -108,19 +122,25 @@ variable "secret_slack_signing_secret" { variable "secret_slack_client_id" { type = string default = "syncbot-slack-client-id" - description = "Secret Manager secret ID for ENV_SLACK_CLIENT_ID" + description = "Secret Manager secret ID for SLACK_CLIENT_ID" } variable "secret_slack_client_secret" { type = string default = "syncbot-slack-client-secret" - description = "Secret Manager secret ID for ENV_SLACK_CLIENT_SECRET" + description = "Secret Manager secret ID for SLACK_CLIENT_SECRET" } -variable "secret_slack_scopes" { +variable "secret_slack_bot_scopes" { type = string default = "syncbot-slack-scopes" - description = "Secret Manager secret ID for ENV_SLACK_SCOPES" + description = "Secret Manager secret ID whose value is comma-separated bot OAuth scopes (runtime env SLACK_BOT_SCOPES)" +} + +variable "slack_user_scopes" { + type = string + default = "chat:write,channels:history,channels:read,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email" + description = "Comma-separated user OAuth scopes for Cloud Run (SLACK_USER_SCOPES). Must match slack-manifest.json oauth_config.scopes.user and syncbot/slack_manifest_scopes.py USER_SCOPES; default matches repo standard (same string as AWS SAM SlackOauthUserScopes Default)." } variable "secret_token_encryption_key" { diff --git a/pyproject.toml b/pyproject.toml index 3ca7590..273be2a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,13 +22,13 @@ boto3 = "^1.28.57" pytest = "^9.0" [tool.pytest.ini_options] -testpaths = ["tests"] +testpaths = ["tests", "infra/aws/tests", "infra/gcp/tests"] pythonpath = ["syncbot", "infra/aws/db_setup"] [tool.ruff] target-version = "py312" line-length = 120 -src = ["syncbot", "tests"] +src = ["syncbot", "tests", "infra/aws/tests", "infra/gcp/tests"] [tool.ruff.lint] select = [ diff --git a/samconfig.toml b/samconfig.toml index e65ab67..97fc6e7 100644 --- a/samconfig.toml +++ b/samconfig.toml @@ -1,98 +1,43 @@ # SAM CLI configuration for local deployment. # -# Usage examples: -# sam build --config-env test-new-rds -# sam deploy --config-env test-new-rds +# Full list of CloudFormation parameters is in infra/aws/template.yaml (Parameters). +# Interactive deploy (infra/aws/scripts/deploy.sh) passes overrides on the CLI. +# This file is for quick sam build / sam deploy with minimal Stage-only overrides. # -# sam build --config-env test-existing-rds -# sam deploy --config-env test-existing-rds +# Usage: +# sam build --config-env test +# sam deploy --config-env test +# sam deploy --config-env prod # -# sam deploy --config-env prod-new-rds -# sam deploy --config-env prod-existing-rds -# -# NOTE: -# - Fill in placeholders (DB host/admin user) before using *-existing-rds configs. -# - Secrets like SlackSigningSecret, SlackClientSecret, and DatabasePassword are -# expected to be provided securely during deploy workflows. -# - For disaster recovery with existing encrypted tokens, add: -# TokenEncryptionKeyOverride= -# to the relevant environment's parameter_overrides. - version = 0.1 [default.build.parameters] template_file = "infra/aws/template.yaml" use_container = true -[default.deploy.parameters] -stack_name = "syncbot-test" -resolve_s3 = true -s3_prefix = "syncbot-test" -region = "us-east-2" -capabilities = "CAPABILITY_IAM" -confirm_changeset = true -parameter_overrides = "Stage=test" - -[test-new-rds.build.parameters] +[test.build.parameters] template_file = "infra/aws/template.yaml" use_container = true -[test-new-rds.deploy.parameters] -stack_name = "syncbot-test" -resolve_s3 = true -s3_prefix = "syncbot-test" -region = "us-east-2" -capabilities = "CAPABILITY_IAM" -confirm_changeset = true -parameter_overrides = "Stage=test DatabaseEngine=postgresql ExistingDatabaseHost= ExistingDatabaseAdminUser= ExistingDatabaseAdminPassword=" - -[test-existing-rds.build.parameters] -template_file = "infra/aws/template.yaml" -use_container = true - -[test-existing-rds.deploy.parameters] +# test: minimal Stage; add other template parameters via --parameter-overrides or guided deploy. +[test.deploy.parameters] stack_name = "syncbot-test" resolve_s3 = true s3_prefix = "syncbot-test" region = "us-east-2" capabilities = "CAPABILITY_IAM" confirm_changeset = true -parameter_overrides = "Stage=test DatabaseEngine=postgresql ExistingDatabaseHost=REPLACE_ME_RDS_HOST ExistingDatabaseAdminUser=REPLACE_ME_ADMIN_USER ExistingDatabaseAdminPassword=REPLACE_ME_ADMIN_PASSWORD" - -[prod-new-rds.build.parameters] -template_file = "infra/aws/template.yaml" -use_container = true - -[prod-new-rds.deploy.parameters] -stack_name = "syncbot-prod" -resolve_s3 = true -s3_prefix = "syncbot-prod" -region = "us-east-2" -capabilities = "CAPABILITY_IAM" -confirm_changeset = true -parameter_overrides = "Stage=prod DatabaseEngine=postgresql ExistingDatabaseHost= ExistingDatabaseAdminUser= ExistingDatabaseAdminPassword=" +parameter_overrides = "Stage=test" -[prod-existing-rds.build.parameters] +[prod.build.parameters] template_file = "infra/aws/template.yaml" use_container = true -[prod-existing-rds.deploy.parameters] +[prod.deploy.parameters] stack_name = "syncbot-prod" resolve_s3 = true s3_prefix = "syncbot-prod" region = "us-east-2" capabilities = "CAPABILITY_IAM" confirm_changeset = true -parameter_overrides = "Stage=prod DatabaseEngine=postgresql ExistingDatabaseHost=REPLACE_ME_RDS_HOST ExistingDatabaseAdminUser=REPLACE_ME_ADMIN_USER ExistingDatabaseAdminPassword=REPLACE_ME_ADMIN_PASSWORD" - -[test.deploy.parameters] -stack_name = "syncbot-test" -resolve_s3 = true -s3_prefix = "syncbot-test" -confirm_changeset = true -capabilities = "CAPABILITY_IAM" -parameter_overrides = "Stage=\"test\" SlackClientID=\"10361912548384.10524713300870\" SlackOauthScopes=\"app_mentions:read,channels:history,channels:join,chat:write,chat:write:user,chat:write.customize,commands,files:read,files:write,team:read,users:read,channels:manage,users:read.email,reactions:read,reactions:write\" ExistingDatabaseHost=\"f3ttown1.c7im4saakwcy.us-east-2.rds.amazonaws.com\" ExistingDatabaseAdminUser=\"f3ttown1dba\" DatabaseUser=\"syncbot_test_user\" DatabaseSchema=\"syncbot_test\" DatabaseInstanceClass=\"db.t3.micro\" AllowedDBCidr=\"0.0.0.0/0\" VpcCidr=\"10.0.0.0/16\" RequireAdmin=\"true\"" -image_repositories = [] - -[test.global.parameters] -region = "us-east-2" +parameter_overrides = "Stage=prod" diff --git a/slack-manifest.json b/slack-manifest.json new file mode 100644 index 0000000..1b24964 --- /dev/null +++ b/slack-manifest.json @@ -0,0 +1,88 @@ +{ + "_metadata": { + "major_version": 1, + "minor_version": 1 + }, + "display_information": { + "name": "SyncBot", + "description": "Sync chat threads between Slack Workspaces.", + "background_color": "#1A1D21" + }, + "features": { + "app_home": { + "home_tab_enabled": true, + "messages_tab_enabled": true, + "messages_tab_read_only_enabled": true + }, + "bot_user": { + "display_name": "SyncBot", + "always_online": true + } + }, + "oauth_config": { + "redirect_urls": [ + "https://your-random-tunnel-name.trycloudflare.com/slack/oauth_redirect" + ], + "scopes": { + "bot": [ + "app_mentions:read", + "channels:history", + "channels:join", + "channels:read", + "channels:manage", + "chat:write", + "chat:write.customize", + "files:read", + "files:write", + "groups:history", + "groups:read", + "groups:write", + "im:write", + "reactions:read", + "reactions:write", + "team:read", + "users:read", + "users:read.email" + ], + "user": [ + "chat:write", + "channels:history", + "channels:read", + "files:read", + "files:write", + "groups:history", + "groups:read", + "groups:write", + "im:write", + "reactions:read", + "reactions:write", + "team:read", + "users:read", + "users:read.email" + ] + } + }, + "settings": { + "event_subscriptions": { + "request_url": "https://your-random-tunnel-name.trycloudflare.com/slack/events", + "bot_events": [ + "app_home_opened", + "member_joined_channel", + "message.channels", + "message.groups", + "reaction_added", + "reaction_removed", + "team_join", + "tokens_revoked", + "user_profile_changed" + ] + }, + "interactivity": { + "is_enabled": true, + "request_url": "https://your-random-tunnel-name.trycloudflare.com/slack/events" + }, + "org_deploy_enabled": false, + "socket_mode_enabled": false, + "token_rotation_enabled": false + } +} diff --git a/slack-manifest.yaml b/slack-manifest.yaml deleted file mode 100644 index 7dc6694..0000000 --- a/slack-manifest.yaml +++ /dev/null @@ -1,79 +0,0 @@ -_metadata: - major_version: 1 - minor_version: 1 - -display_information: - name: SyncBot - description: Sync chat threads between Slack Workspaces. - background_color: "#1A1D21" - -features: - app_home: - home_tab_enabled: true - messages_tab_enabled: true - messages_tab_read_only_enabled: true - bot_user: - display_name: SyncBot - always_online: true - -oauth_config: - redirect_urls: - - https://your-random-tunnel-name.trycloudflare.com/slack/oauth_redirect - scopes: - bot: - - app_mentions:read - - channels:history - - channels:join - - channels:read - - channels:manage - - chat:write - - chat:write.customize - - files:read - - files:write - - groups:history - - groups:read - - groups:write - - im:write - - reactions:read - - reactions:write - - team:read - - users:read - - users:read.email - user: - - app_mentions:read - - channels:history - - channels:join - - channels:read - - channels:manage - - files:read - - files:write - - groups:history - - groups:read - - groups:write - - im:write - - chat:write:user - - reactions:read - - reactions:write - - team:read - - users:read - - users:read.email - -settings: - event_subscriptions: - request_url: https://your-random-tunnel-name.trycloudflare.com/slack/events - bot_events: - - app_home_opened - - member_joined_channel - - message.channels - - message.groups - - reaction_added - - reaction_removed - - team_join - - tokens_revoked - - user_profile_changed - interactivity: - is_enabled: true - request_url: https://your-random-tunnel-name.trycloudflare.com/slack/events - org_deploy_enabled: false - socket_mode_enabled: false - token_rotation_enabled: false diff --git a/syncbot/constants.py b/syncbot/constants.py index 918a2eb..c7ec5e5 100644 --- a/syncbot/constants.py +++ b/syncbot/constants.py @@ -21,9 +21,10 @@ # --------------------------------------------------------------------------- SLACK_BOT_TOKEN = "SLACK_BOT_TOKEN" -SLACK_CLIENT_ID = "ENV_SLACK_CLIENT_ID" -SLACK_CLIENT_SECRET = "ENV_SLACK_CLIENT_SECRET" -SLACK_SCOPES = "ENV_SLACK_SCOPES" +SLACK_CLIENT_ID = "SLACK_CLIENT_ID" +SLACK_CLIENT_SECRET = "SLACK_CLIENT_SECRET" +SLACK_BOT_SCOPES = "SLACK_BOT_SCOPES" +SLACK_USER_SCOPES = "SLACK_USER_SCOPES" SLACK_SIGNING_SECRET = "SLACK_SIGNING_SECRET" TOKEN_ENCRYPTION_KEY = "TOKEN_ENCRYPTION_KEY" REQUIRE_ADMIN = "REQUIRE_ADMIN" @@ -100,9 +101,9 @@ def _has_real_bot_token() -> bool: def get_database_backend() -> str: """Return ``postgresql``, ``mysql``, or ``sqlite``. - Defaults to ``postgresql`` (Aurora DSQL / RDS PostgreSQL) when unset. + Defaults to ``mysql`` when unset. """ - return os.environ.get(DATABASE_BACKEND, "postgresql").lower().strip() or "postgresql" + return os.environ.get(DATABASE_BACKEND, "mysql").lower().strip() or "mysql" def _env_bool(name: str, default: bool) -> bool: @@ -154,7 +155,7 @@ def get_required_db_vars() -> list: SLACK_SIGNING_SECRET, SLACK_CLIENT_ID, SLACK_CLIENT_SECRET, - SLACK_SCOPES, + SLACK_BOT_SCOPES, TOKEN_ENCRYPTION_KEY, ] diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py index f5ecfcc..ad37a9b 100644 --- a/syncbot/db/__init__.py +++ b/syncbot/db/__init__.py @@ -43,8 +43,14 @@ class DatabaseField: _MAX_RETRIES = 2 _DB_INIT_MAX_ATTEMPTS = 15 _DB_INIT_RETRY_SECONDS = 2 -_PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent -_ALEMBIC_SCRIPT_LOCATION = _PROJECT_ROOT / "db" / "alembic" +# Migrations live next to this package so they are included in the Lambda bundle (SAM CodeUri: syncbot/). +_ALEMBIC_SCRIPT_LOCATION = Path(__file__).resolve().parent / "alembic" + +# Repo root locally; Lambda deployment root (/var/task) in AWS — used for relative SQLite paths. +_syncbot_dir = Path(__file__).resolve().parent.parent +_PROJECT_ROOT = ( + _syncbot_dir if os.environ.get("AWS_LAMBDA_FUNCTION_NAME") else _syncbot_dir.parent +) def _mysql_port() -> str: @@ -76,7 +82,7 @@ def _build_mysql_url(include_schema: bool = False) -> tuple[str, dict]: def _build_postgresql_url(include_schema: bool = False) -> tuple[str, dict]: - """Build PostgreSQL URL and connect_args from DATABASE_* env vars (RDS / Aurora DSQL).""" + """Build PostgreSQL URL and connect_args from DATABASE_* env vars.""" host = os.environ[constants.DATABASE_HOST] user = quote_plus(os.environ[constants.DATABASE_USER]) passwd = quote_plus(os.environ[constants.DATABASE_PASSWORD]) @@ -186,7 +192,7 @@ def _ensure_database_exists() -> None: def _alembic_config(): - """Build Alembic config with script_location set to project db/alembic.""" + """Build Alembic config with script_location set to syncbot/db/alembic.""" from alembic.config import Config # pyright: ignore[reportMissingImports] config = Config() config.set_main_option("script_location", str(_ALEMBIC_SCRIPT_LOCATION)) diff --git a/db/alembic/env.py b/syncbot/db/alembic/env.py similarity index 72% rename from db/alembic/env.py rename to syncbot/db/alembic/env.py index 8b8ffbc..155e952 100644 --- a/db/alembic/env.py +++ b/syncbot/db/alembic/env.py @@ -1,17 +1,23 @@ -"""Alembic env: use SyncBot's engine from db.get_engine(). Run from project root with syncbot on PYTHONPATH.""" +"""Alembic env: use SyncBot's engine from db.get_engine(). + +Run from repo root: ``alembic -c alembic.ini upgrade head`` +(with ``syncbot/`` on ``PYTHONPATH`` via ``prepend_sys_path`` in alembic.ini). +""" import sys from pathlib import Path -# Project root (db/alembic/env.py -> db -> project root) -_PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent -if str(_PROJECT_ROOT) not in sys.path: - sys.path.insert(0, str(_PROJECT_ROOT)) +# syncbot/db/alembic/env.py -> syncbot/ (directory that must be on PYTHONPATH for ``import db``) +_SYNCBOT_DIR = Path(__file__).resolve().parent.parent.parent +_REPO_ROOT = _SYNCBOT_DIR.parent +if str(_SYNCBOT_DIR) not in sys.path: + sys.path.insert(0, str(_SYNCBOT_DIR)) # Load .env when running via CLI (alembic upgrade head) try: from dotenv import load_dotenv - load_dotenv(_PROJECT_ROOT / ".env") + + load_dotenv(_REPO_ROOT / ".env") except ImportError: pass diff --git a/db/alembic/script.py.mako b/syncbot/db/alembic/script.py.mako similarity index 100% rename from db/alembic/script.py.mako rename to syncbot/db/alembic/script.py.mako diff --git a/db/alembic/versions/001_baseline.py b/syncbot/db/alembic/versions/001_baseline.py similarity index 100% rename from db/alembic/versions/001_baseline.py rename to syncbot/db/alembic/versions/001_baseline.py diff --git a/syncbot/helpers/oauth.py b/syncbot/helpers/oauth.py index 4b67906..9ecedc3 100644 --- a/syncbot/helpers/oauth.py +++ b/syncbot/helpers/oauth.py @@ -1,4 +1,9 @@ -"""Slack OAuth flow construction.""" +"""Slack OAuth flow construction. + +Bot scopes: :envvar:`SLACK_BOT_SCOPES` (``slack_manifest_scopes.BOT_SCOPES`` / manifest bot). +User scopes: :envvar:`SLACK_USER_SCOPES` (defaults to ``USER_SCOPES`` when unset). +Requesting user scopes that do not match the Slack app manifest causes ``invalid_scope`` on install. +""" import logging import os @@ -9,6 +14,7 @@ from slack_sdk.oauth.state_store.sqlalchemy import SQLAlchemyOAuthStateStore import constants +from slack_manifest_scopes import USER_SCOPES _logger = logging.getLogger(__name__) @@ -24,7 +30,8 @@ def get_oauth_flow(): """ client_id = os.environ.get(constants.SLACK_CLIENT_ID, "").strip() client_secret = os.environ.get(constants.SLACK_CLIENT_SECRET, "").strip() - scopes_raw = os.environ.get(constants.SLACK_SCOPES, "").strip() + scopes_raw = os.environ.get(constants.SLACK_BOT_SCOPES, "").strip() + user_scopes_raw = os.environ.get(constants.SLACK_USER_SCOPES, "").strip() if constants.LOCAL_DEVELOPMENT and not (client_id and client_secret and scopes_raw): _logger.info("OAuth credentials not set — running in single-workspace mode") @@ -42,11 +49,19 @@ def get_oauth_flow(): engine=engine, ) + bot_scopes = [s.strip() for s in scopes_raw.split(",") if s.strip()] + user_scopes = ( + [s.strip() for s in user_scopes_raw.split(",") if s.strip()] + if user_scopes_raw + else list(USER_SCOPES) + ) + return OAuthFlow( settings=OAuthSettings( client_id=client_id, client_secret=client_secret, - scopes=scopes_raw.split(","), + scopes=bot_scopes, + user_scopes=user_scopes, installation_store=installation_store, state_store=state_store, ), diff --git a/syncbot/requirements.txt b/syncbot/requirements.txt index ea3a34b..f40c23b 100644 --- a/syncbot/requirements.txt +++ b/syncbot/requirements.txt @@ -1,3 +1,4 @@ +alembic==1.18.4 ; python_version >= "3.12" and python_version < "4.0" certifi==2026.1.4 ; python_version >= "3.12" and python_version < "4.0" cffi==2.0.0 ; python_version >= "3.12" and python_version < "4.0" charset-normalizer==3.4.4 ; python_version >= "3.12" and python_version < "4.0" diff --git a/syncbot/slack_manifest_scopes.py b/syncbot/slack_manifest_scopes.py new file mode 100644 index 0000000..35a8bcf --- /dev/null +++ b/syncbot/slack_manifest_scopes.py @@ -0,0 +1,63 @@ +"""Canonical Slack OAuth scopes — keep in sync with repo root ``slack-manifest.json``. + +``oauth_config.scopes.bot`` must match :envvar:`SLACK_BOT_SCOPES` (comma-separated). +``oauth_config.scopes.user`` must match :envvar:`SLACK_USER_SCOPES` (comma-separated). +This app always uses both **bot** and **user** scopes; ``USER_SCOPES`` is non-empty and must +match the manifest ``user`` array (order included). When changing scopes, edit this module and +``slack-manifest.json`` / ``slack-manifest_test.json`` together, then AWS SAM defaults, +GCP ``slack_user_scopes``, and env examples. +""" + +from __future__ import annotations + +# --- Must match slack-manifest.json oauth_config.scopes.bot (order as in manifest) --- + +BOT_SCOPES: tuple[str, ...] = ( + "app_mentions:read", + "channels:history", + "channels:join", + "channels:read", + "channels:manage", + "chat:write", + "chat:write.customize", + "files:read", + "files:write", + "groups:history", + "groups:read", + "groups:write", + "im:write", + "reactions:read", + "reactions:write", + "team:read", + "users:read", + "users:read.email", +) + +# --- Must match slack-manifest.json oauth_config.scopes.user (order as in manifest) --- + +USER_SCOPES: tuple[str, ...] = ( + "chat:write", + "channels:history", + "channels:read", + "files:read", + "files:write", + "groups:history", + "groups:read", + "groups:write", + "im:write", + "reactions:read", + "reactions:write", + "team:read", + "users:read", + "users:read.email", +) + + +def bot_scopes_comma_separated() -> str: + """Return the bot scope string for SLACK_BOT_SCOPES / CloudFormation.""" + return ",".join(BOT_SCOPES) + + +def user_scopes_comma_separated() -> str: + """Return the user scope string for SLACK_USER_SCOPES / CloudFormation / Terraform.""" + return ",".join(USER_SCOPES) diff --git a/tests/test_db.py b/tests/test_db.py index 3cb9dd5..3602fef 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -237,7 +237,7 @@ def test_get_required_db_vars_postgresql_without_url(self): assert "DATABASE_PASSWORD" in required assert "DATABASE_SCHEMA" in required - def test_default_database_backend_is_postgresql(self): + def test_default_database_backend_is_mysql(self): import importlib import constants as c @@ -245,7 +245,7 @@ def test_default_database_backend_is_postgresql(self): old = os.environ.pop("DATABASE_BACKEND", None) try: importlib.reload(c) - assert c.get_database_backend() == "postgresql" + assert c.get_database_backend() == "mysql" finally: if old is not None: os.environ["DATABASE_BACKEND"] = old diff --git a/tests/test_deploy_script_syntax.py b/tests/test_deploy_script_syntax.py new file mode 100644 index 0000000..b5d06f3 --- /dev/null +++ b/tests/test_deploy_script_syntax.py @@ -0,0 +1,25 @@ +"""Smoke-check deploy shell scripts parse with bash -n.""" + +import subprocess +from pathlib import Path + +import pytest + +REPO_ROOT = Path(__file__).resolve().parent.parent + +DEPLOY_SCRIPTS = [ + REPO_ROOT / "deploy.sh", + REPO_ROOT / "infra" / "gcp" / "scripts" / "deploy.sh", + REPO_ROOT / "infra" / "aws" / "scripts" / "deploy.sh", +] + + +@pytest.mark.parametrize("path", DEPLOY_SCRIPTS, ids=lambda p: str(p.relative_to(REPO_ROOT))) +def test_bash_syntax(path: Path) -> None: + assert path.is_file(), f"missing {path}" + subprocess.run( + ["bash", "-n", str(path)], + check=True, + capture_output=True, + text=True, + ) diff --git a/tests/test_oauth.py b/tests/test_oauth.py index ab62700..6d9bb83 100644 --- a/tests/test_oauth.py +++ b/tests/test_oauth.py @@ -3,6 +3,8 @@ import os from unittest.mock import patch +from slack_manifest_scopes import USER_SCOPES + os.environ.setdefault("DATABASE_HOST", "localhost") os.environ.setdefault("DATABASE_USER", "root") os.environ.setdefault("DATABASE_PASSWORD", "test") @@ -22,9 +24,9 @@ def test_local_dev_without_oauth_credentials_returns_none(self): @patch.dict( os.environ, { - "ENV_SLACK_CLIENT_ID": "cid", - "ENV_SLACK_CLIENT_SECRET": "csecret", - "ENV_SLACK_SCOPES": "chat:write,channels:read", + "SLACK_CLIENT_ID": "cid", + "SLACK_CLIENT_SECRET": "csecret", + "SLACK_BOT_SCOPES": "chat:write,channels:read", }, clear=True, ) @@ -51,9 +53,9 @@ def test_local_dev_with_credentials_uses_sql_stores( @patch.dict( os.environ, { - "ENV_SLACK_CLIENT_ID": "prod-cid", - "ENV_SLACK_CLIENT_SECRET": "prod-secret", - "ENV_SLACK_SCOPES": "chat:write,groups:read", + "SLACK_CLIENT_ID": "prod-cid", + "SLACK_CLIENT_SECRET": "prod-secret", + "SLACK_BOT_SCOPES": "chat:write,groups:read", }, clear=True, ) @@ -72,6 +74,35 @@ def test_production_uses_sql_stores_without_s3( flow = get_oauth_flow() assert flow is not None + assert flow.settings.scopes == ["chat:write", "groups:read"] + assert flow.settings.user_scopes == list(USER_SCOPES) mock_get_engine.assert_called_once_with() mock_installation_store_cls.assert_called_once_with(client_id="prod-cid", engine=engine) mock_state_store_cls.assert_called_once_with(expiration_seconds=600, engine=engine) + + @patch("helpers.oauth.constants.LOCAL_DEVELOPMENT", True) + @patch.dict( + os.environ, + { + "SLACK_CLIENT_ID": "cid", + "SLACK_CLIENT_SECRET": "csecret", + "SLACK_BOT_SCOPES": "chat:write", + "SLACK_USER_SCOPES": "chat:write,users:read", + }, + clear=True, + ) + @patch("db.get_engine") + @patch("helpers.oauth.SQLAlchemyOAuthStateStore") + @patch("helpers.oauth.SQLAlchemyInstallationStore") + def test_slack_user_scopes_env_overrides_default( + self, + mock_installation_store_cls, + mock_state_store_cls, + mock_get_engine, + ): + mock_get_engine.return_value = object() + + flow = get_oauth_flow() + + assert flow is not None + assert flow.settings.user_scopes == ["chat:write", "users:read"] diff --git a/tests/test_slack_manifest_scopes.py b/tests/test_slack_manifest_scopes.py new file mode 100644 index 0000000..9153d84 --- /dev/null +++ b/tests/test_slack_manifest_scopes.py @@ -0,0 +1,62 @@ +"""slack-manifest.json stays aligned with syncbot/slack_manifest_scopes.py.""" + +import json +import re +from pathlib import Path + +from slack_manifest_scopes import ( + BOT_SCOPES, + USER_SCOPES, + bot_scopes_comma_separated, + user_scopes_comma_separated, +) + + +def _manifest() -> dict: + root = Path(__file__).resolve().parent.parent + return json.loads(root.joinpath("slack-manifest.json").read_text(encoding="utf-8")) + + +def test_slack_manifest_bot_scopes_match_constants(): + bot = _manifest()["oauth_config"]["scopes"]["bot"] + assert bot == list(BOT_SCOPES) + + +def test_slack_manifest_user_scopes_match_constants(): + user = _manifest()["oauth_config"]["scopes"]["user"] + assert user == list(USER_SCOPES) + + +def test_sam_template_slack_oauth_default_matches_bot_scopes(): + """infra/aws/template.yaml SlackOauthBotScopes Default must match BOT_SCOPES.""" + root = Path(__file__).resolve().parent.parent + text = root.joinpath("infra/aws/template.yaml").read_text(encoding="utf-8") + m = re.search( + r'^\s*SlackOauthBotScopes:\s*\n(?:^\s+.*\n)*?\s*Default:\s*"([^"]+)"', + text, + re.MULTILINE, + ) + assert m, "SlackOauthBotScopes Default not found in template.yaml" + assert m.group(1) == bot_scopes_comma_separated() + + +def test_sam_template_slack_user_oauth_default_matches_user_scopes(): + """infra/aws/template.yaml SlackOauthUserScopes Default must match USER_SCOPES.""" + root = Path(__file__).resolve().parent.parent + text = root.joinpath("infra/aws/template.yaml").read_text(encoding="utf-8") + m = re.search( + r'^\s*SlackOauthUserScopes:\s*\n(?:^\s+.*\n)*?\s*Default:\s*"([^"]*)"', + text, + re.MULTILINE, + ) + assert m, "SlackOauthUserScopes Default not found in template.yaml" + assert m.group(1) == user_scopes_comma_separated() + + +def test_bot_scopes_comma_separated_roundtrip(): + assert bot_scopes_comma_separated().split(",") == list(BOT_SCOPES) + + +def test_user_scopes_comma_separated_roundtrip(): + s = user_scopes_comma_separated() + assert [x.strip() for x in s.split(",") if x.strip()] == list(USER_SCOPES) From 8f2704cf8928d65574698b08f9a70bfd56ccc070 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Mon, 23 Mar 2026 23:04:31 -0500 Subject: [PATCH 21/45] Fixed GitHub part of deploy script. --- deploy.sh | 150 +++++++++++++++++++ docs/DEPLOYMENT.md | 4 + docs/INFRA_CONTRACT.md | 2 + infra/aws/scripts/deploy.sh | 59 ++++---- infra/aws/scripts/print-bootstrap-outputs.sh | 2 +- infra/gcp/scripts/deploy.sh | 17 +-- 6 files changed, 190 insertions(+), 44 deletions(-) diff --git a/deploy.sh b/deploy.sh index 664a012..4a9a120 100755 --- a/deploy.sh +++ b/deploy.sh @@ -234,6 +234,156 @@ prompt_log_level() { done } +# Parse owner/repo from a github.com git remote URL (ssh, https, ssh://). Empty if not GitHub. +github_owner_repo_from_url() { + local url="$1" + url="${url%.git}" + url="${url%/}" + if [[ "$url" =~ ^git@github\.com:([^/]+)/(.+)$ ]]; then + echo "${BASH_REMATCH[1]}/${BASH_REMATCH[2]}" + return 0 + fi + if [[ "$url" =~ ^ssh://git@github\.com/([^/]+)/(.+)$ ]]; then + echo "${BASH_REMATCH[1]}/${BASH_REMATCH[2]}" + return 0 + fi + if [[ "$url" =~ ^https://([^/@]+@)?github\.com/([^/]+)/([^/]+)$ ]]; then + echo "${BASH_REMATCH[2]}/${BASH_REMATCH[3]}" + return 0 + fi + return 1 +} + +# Emit owner/repo for GitHub Actions variables. Uses git remotes (origin, upstream, others) so forks +# are not confused with `gh repo view` (which often follows upstream). If there are no github.com +# remotes, falls back to `gh repo view` or a manual prompt. Prints chosen repo to stdout; hints to stderr. +prompt_github_repo_for_actions() { + local git_dir="${1:-$REPO_ROOT}" + local canon tmp url or n gh_inf nlines choice i line or_only lab_only + local _cr_done + _cr_done() { + rm -f "$canon" "$tmp" + } + canon="$(mktemp)" + tmp="$(mktemp)" + + if ! git -C "$git_dir" rev-parse --git-dir >/dev/null 2>&1; then + echo "Not a git checkout; enter GitHub owner/repo manually." >&2 + while true; do + read -r -p "GitHub repository (owner/repo): " choice + if [[ "$choice" =~ ^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+$ ]]; then + _cr_done + echo "$choice" + return 0 + fi + echo "Expected owner/repo (e.g. myorg/syncbot)." >&2 + done + fi + + _github_repo_add_unique() { + local o="$1" + local label="$2" + [[ -z "$o" ]] && return + if ! grep -Fxq "$o" "$tmp" 2>/dev/null; then + echo "$o" >>"$tmp" + printf '%s\t%s\n' "$o" "$label" >>"$canon" + fi + } + + for n in origin upstream; do + url="$(git -C "$git_dir" remote get-url "$n" 2>/dev/null || true)" + or="$(github_owner_repo_from_url "$url" || true)" + _github_repo_add_unique "$or" "git remote $n" + done + while IFS= read -r n; do + [[ "$n" == "origin" || "$n" == "upstream" ]] && continue + url="$(git -C "$git_dir" remote get-url "$n" 2>/dev/null || true)" + or="$(github_owner_repo_from_url "$url" || true)" + _github_repo_add_unique "$or" "git remote $n" + done < <(git -C "$git_dir" remote 2>/dev/null | LC_ALL=C sort) + + # Do not merge in `gh repo view` when remotes exist: gh often tracks upstream and + # disagrees with the fork (origin) the user wants for Actions variables. + + nlines="$(wc -l <"$canon" | tr -d ' ')" + gh_inf="" + if [[ "$nlines" -eq 0 ]] && command -v gh >/dev/null 2>&1; then + gh_inf="$(gh -C "$git_dir" repo view --json nameWithOwner -q .nameWithOwner 2>/dev/null || true)" + fi + + if [[ "$nlines" -eq 0 ]]; then + if [[ -n "$gh_inf" ]]; then + read -r -p "GitHub repository for Actions variables [$gh_inf] (from gh; no github.com remotes): " choice + choice="${choice:-$gh_inf}" + if [[ "$choice" =~ ^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+$ ]]; then + echo "Using GitHub repository: $choice" >&2 + _cr_done + echo "$choice" + return 0 + fi + echo "Using GitHub repository: $gh_inf" >&2 + _cr_done + echo "$gh_inf" + return 0 + fi + echo "Could not detect owner/repo from remotes. Enter it manually." >&2 + while true; do + read -r -p "GitHub repository (owner/repo): " choice + if [[ "$choice" =~ ^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+$ ]]; then + _cr_done + echo "$choice" + return 0 + fi + echo "Expected owner/repo (e.g. myorg/syncbot)." >&2 + done + fi + + if [[ "$nlines" -eq 1 ]]; then + IFS=$'\t' read -r or_only lab_only <"$canon" + read -r -p "GitHub repository for Actions variables [$or_only] ($lab_only): " choice + choice="${choice:-$or_only}" + if [[ "$choice" =~ ^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+$ ]]; then + echo "Using GitHub repository: $choice" >&2 + _cr_done + echo "$choice" + return 0 + fi + echo "Invalid owner/repo; using $or_only." >&2 + _cr_done + echo "$or_only" + return 0 + fi + + echo "Multiple GitHub repositories detected (fork vs upstream, etc.). Choose where to set Actions variables and secrets:" >&2 + i=1 + while IFS=$'\t' read -r or lab_only; do + echo " $i) $or ($lab_only)" >&2 + i=$((i + 1)) + done <"$canon" + + while true; do + read -r -p "Enter number [1-$nlines] or owner/repo: " choice + [[ -z "$choice" ]] && choice=1 + if [[ "$choice" =~ ^[0-9]+$ ]]; then + line="$(sed -n "${choice}p" "$canon")" + if [[ -n "$line" ]]; then + IFS=$'\t' read -r or_only lab_only <<<"$line" + echo "Using GitHub repository: $or_only" >&2 + _cr_done + echo "$or_only" + return 0 + fi + fi + if [[ "$choice" =~ ^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+$ ]]; then + echo "Using GitHub repository: $choice" >&2 + _cr_done + echo "$choice" + return 0 + fi + echo "Invalid choice. Enter 1-$nlines or owner/repo." >&2 + done +} + # When sourced by infra/*/scripts/deploy.sh, only load helpers above. if [[ "${BASH_SOURCE[0]}" != "${0}" ]]; then return 0 diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 9549c0e..1a062a1 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -155,6 +155,8 @@ Workflow: `.github/workflows/deploy-aws.yml` (runs on push to `test`/`prod` when Configure **repository** variables: `AWS_ROLE_TO_ASSUME`, `AWS_S3_BUCKET`, `AWS_REGION`. +`AWS_S3_BUCKET` is the bootstrap **SAM deploy artifact** bucket (`DeploymentBucketName`): CI uses it for `sam deploy --s3-bucket` (Lambda package uploads) only. It is **not** for Slack file hosting or other app media. The guided deploy script resolves the target repo from **git remotes** (origin, upstream, then others): if your fork and upstream differ, it asks which `owner/repo` should receive variables, then passes `-R owner/repo` to `gh` so writes go there (not whatever `gh` infers from context alone). + Configure **per-environment** (`test` / `prod`) variables and secrets so they match your stack — especially if you use **existing RDS** or **private** networking: | Type | Name | Notes | @@ -215,6 +217,8 @@ Set Secret Manager values for Slack/DB as in [infra/gcp/README.md](../infra/gcp/ 2. Set **`DEPLOY_TARGET=gcp`** at repo level so `deploy-gcp.yml` runs and `deploy-aws.yml` is skipped. 3. Set variables: `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_WORKLOAD_IDENTITY_PROVIDER`, `GCP_SERVICE_ACCOUNT`, etc. + The interactive `infra/gcp/scripts/deploy.sh` uses the same GitHub `owner/repo` selection as the AWS script (based on git remotes when fork and upstream differ). + **Note:** `.github/workflows/deploy-gcp.yml` may still contain **placeholder** steps in upstream; replace with real **build + push + Cloud Run deploy** (or `terraform apply` with a new image tag) in your fork. The guided `infra/gcp/scripts/deploy.sh` is the source of truth for an interactive path. ### 3. Ongoing deploys diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index ca77f0a..f820b87 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -112,6 +112,8 @@ After running provider-specific bootstrap (e.g. AWS CloudFormation bootstrap sta | `region` | Primary region for the deployment | GitHub variable (e.g. `AWS_REGION`, `GCP_REGION`) | | `service_url` | Public base URL of the deployed app (optional at bootstrap; may come from app stack) | For Slack app configuration and docs | +**AWS:** `artifact_bucket` is `DeploymentBucketName` in bootstrap outputs; this repo stores it as the GitHub variable `AWS_S3_BUCKET` (SAM/CI packaging for `sam deploy` only; not Slack or app media). + Provider-specific implementations may use different names (e.g. `GitHubDeployRoleArn`, `DeploymentBucketName`) but should document the mapping to this contract. ## Swapping Providers diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index bb5bc70..ab776e9 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -358,20 +358,15 @@ configure_github_actions_aws() { echo echo "=== GitHub Actions (AWS) ===" echo "Detected bootstrap role: $role" - echo "Detected deploy bucket: $bucket" + echo "Detected deploy bucket: $bucket (SAM/CI packaging for sam deploy — not Slack or app media)" echo "Detected bootstrap region: $boot_region" - repo="$(gh repo view --json nameWithOwner -q .nameWithOwner 2>/dev/null || true)" - if [[ -z "$repo" ]]; then - repo="$(prompt_required "GitHub repository (owner/repo) for environment setup")" - else - echo "Detected GitHub repository: $repo" - fi + repo="$(prompt_github_repo_for_actions "$REPO_ROOT")" if ! ensure_gh_authenticated; then echo - echo "Set these GitHub Actions Variables manually:" + echo "Set these GitHub Actions Variables manually (on the repo you intend):" echo " AWS_ROLE_TO_ASSUME = $role" - echo " AWS_S3_BUCKET = $bucket" + echo " AWS_S3_BUCKET = $bucket (SAM deploy artifact bucket / DeploymentBucketName; not Slack file storage)" echo " AWS_REGION = $boot_region" echo "For environment '$env_name' also set AWS_STACK_NAME, STAGE_NAME, DATABASE_SCHEMA, DATABASE_ENGINE," echo "and (if using existing RDS) EXISTING_DATABASE_* / private VPC vars — see docs/DEPLOYMENT.md." @@ -384,36 +379,36 @@ configure_github_actions_aws() { echo "GitHub environments ensured: test, prod." fi - if prompt_yes_no "Set repo variables with gh now (AWS_ROLE_TO_ASSUME, AWS_S3_BUCKET, AWS_REGION)?" "y"; then - [[ -n "$role" ]] && gh variable set AWS_ROLE_TO_ASSUME --body "$role" - [[ -n "$bucket" ]] && gh variable set AWS_S3_BUCKET --body "$bucket" - [[ -n "$boot_region" ]] && gh variable set AWS_REGION --body "$boot_region" + if prompt_yes_no "Set repo variables with gh now (AWS_ROLE_TO_ASSUME, AWS_S3_BUCKET, AWS_REGION)? AWS_S3_BUCKET is SAM/CI packaging only (DeploymentBucketName)." "y"; then + [[ -n "$role" ]] && gh variable set AWS_ROLE_TO_ASSUME --body "$role" -R "$repo" + [[ -n "$bucket" ]] && gh variable set AWS_S3_BUCKET --body "$bucket" -R "$repo" + [[ -n "$boot_region" ]] && gh variable set AWS_REGION --body "$boot_region" -R "$repo" echo "GitHub repository variables updated." fi if prompt_yes_no "Set environment variables for '$env_name' now (AWS_STACK_NAME, STAGE_NAME, DATABASE_SCHEMA, DB host/user vars)?" "y"; then - gh variable set AWS_STACK_NAME --env "$env_name" --body "$app_stack_name" - gh variable set STAGE_NAME --env "$env_name" --body "$deploy_stage" - gh variable set DATABASE_SCHEMA --env "$env_name" --body "$database_schema" - gh variable set DATABASE_ENGINE --env "$env_name" --body "$database_engine" + gh variable set AWS_STACK_NAME --env "$env_name" --body "$app_stack_name" -R "$repo" + gh variable set STAGE_NAME --env "$env_name" --body "$deploy_stage" -R "$repo" + gh variable set DATABASE_SCHEMA --env "$env_name" --body "$database_schema" -R "$repo" + gh variable set DATABASE_ENGINE --env "$env_name" --body "$database_engine" -R "$repo" if [[ "$db_mode" == "2" ]]; then - gh variable set EXISTING_DATABASE_HOST --env "$env_name" --body "$existing_db_host" - gh variable set EXISTING_DATABASE_ADMIN_USER --env "$env_name" --body "$existing_db_admin_user" - gh variable set EXISTING_DATABASE_NETWORK_MODE --env "$env_name" --body "$existing_db_network_mode" + gh variable set EXISTING_DATABASE_HOST --env "$env_name" --body "$existing_db_host" -R "$repo" + gh variable set EXISTING_DATABASE_ADMIN_USER --env "$env_name" --body "$existing_db_admin_user" -R "$repo" + gh variable set EXISTING_DATABASE_NETWORK_MODE --env "$env_name" --body "$existing_db_network_mode" -R "$repo" if [[ "$existing_db_network_mode" == "private" ]]; then - gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "$existing_db_subnet_ids_csv" - gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "$existing_db_lambda_sg_id" + gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "$existing_db_subnet_ids_csv" -R "$repo" + gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "$existing_db_lambda_sg_id" -R "$repo" else - gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "" - gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "" + gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "" -R "$repo" + gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "" -R "$repo" fi else # Clear existing-host vars for new-RDS mode to avoid stale CI config. - gh variable set EXISTING_DATABASE_HOST --env "$env_name" --body "" - gh variable set EXISTING_DATABASE_ADMIN_USER --env "$env_name" --body "" - gh variable set EXISTING_DATABASE_NETWORK_MODE --env "$env_name" --body "public" - gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "" - gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "" + gh variable set EXISTING_DATABASE_HOST --env "$env_name" --body "" -R "$repo" + gh variable set EXISTING_DATABASE_ADMIN_USER --env "$env_name" --body "" -R "$repo" + gh variable set EXISTING_DATABASE_NETWORK_MODE --env "$env_name" --body "public" -R "$repo" + gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "" -R "$repo" + gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "" -R "$repo" fi echo "Environment variables updated for '$env_name'." fi @@ -427,10 +422,10 @@ configure_github_actions_aws() { SLACK_CLIENT_SECRET_SOURCE="prompt" SLACK_CLIENT_SECRET="$(required_from_env_or_prompt "SLACK_CLIENT_SECRET" "SlackClientSecret" "secret")" fi - gh secret set SLACK_SIGNING_SECRET --env "$env_name" --body "$SLACK_SIGNING_SECRET" - gh secret set SLACK_CLIENT_SECRET --env "$env_name" --body "$SLACK_CLIENT_SECRET" + gh secret set SLACK_SIGNING_SECRET --env "$env_name" --body "$SLACK_SIGNING_SECRET" -R "$repo" + gh secret set SLACK_CLIENT_SECRET --env "$env_name" --body "$SLACK_CLIENT_SECRET" -R "$repo" if [[ "$db_mode" == "2" && -n "$existing_db_admin_password" ]]; then - gh secret set EXISTING_DATABASE_ADMIN_PASSWORD --env "$env_name" --body "$existing_db_admin_password" + gh secret set EXISTING_DATABASE_ADMIN_PASSWORD --env "$env_name" --body "$existing_db_admin_password" -R "$repo" fi echo "Environment secrets updated for '$env_name'." fi diff --git a/infra/aws/scripts/print-bootstrap-outputs.sh b/infra/aws/scripts/print-bootstrap-outputs.sh index 8b378a4..51f8176 100755 --- a/infra/aws/scripts/print-bootstrap-outputs.sh +++ b/infra/aws/scripts/print-bootstrap-outputs.sh @@ -30,7 +30,7 @@ done <<< "$outputs" echo "" echo "=== Suggested GitHub Actions Variables ===" echo "AWS_ROLE_TO_ASSUME = $(echo "$outputs" | awk -F'\t' '$1=="GitHubDeployRoleArn"{print $2}')" -echo "AWS_S3_BUCKET = $(echo "$outputs" | awk -F'\t' '$1=="DeploymentBucketName"{print $2}')" +echo "AWS_S3_BUCKET = $(echo "$outputs" | awk -F'\t' '$1=="DeploymentBucketName"{print $2}') (SAM/CI packaging for sam deploy — not Slack or app media)" echo "AWS_REGION = $(echo "$outputs" | awk -F'\t' '$1=="BootstrapRegion"{print $2}')" echo "" echo "Next: deploy the app stack (sam deploy) and set the remaining GitHub vars/secrets." diff --git a/infra/gcp/scripts/deploy.sh b/infra/gcp/scripts/deploy.sh index 775afb5..180795c 100755 --- a/infra/gcp/scripts/deploy.sh +++ b/infra/gcp/scripts/deploy.sh @@ -398,12 +398,7 @@ configure_github_actions_gcp() { echo "Detected service account: $deploy_sa_email" echo "Detected artifact repo: $artifact_registry_url" echo "Detected service URL: $service_url" - repo="$(gh repo view --json nameWithOwner -q .nameWithOwner 2>/dev/null || true)" - if [[ -z "$repo" ]]; then - repo="$(prompt_line "GitHub repository (owner/repo) for environment setup" "")" - else - echo "Detected GitHub repository: $repo" - fi + repo="$(prompt_github_repo_for_actions "$REPO_ROOT")" if ! ensure_gh_authenticated; then echo @@ -423,16 +418,16 @@ configure_github_actions_gcp() { fi if prompt_yn "Set repo variables with gh now (GCP_PROJECT_ID, GCP_REGION, GCP_SERVICE_ACCOUNT, DEPLOY_TARGET=gcp)?" "y"; then - gh variable set GCP_PROJECT_ID --body "$gcp_project_id" - gh variable set GCP_REGION --body "$gcp_region" - [[ -n "$deploy_sa_email" ]] && gh variable set GCP_SERVICE_ACCOUNT --body "$deploy_sa_email" - gh variable set DEPLOY_TARGET --body "gcp" + gh variable set GCP_PROJECT_ID --body "$gcp_project_id" -R "$repo" + gh variable set GCP_REGION --body "$gcp_region" -R "$repo" + [[ -n "$deploy_sa_email" ]] && gh variable set GCP_SERVICE_ACCOUNT --body "$deploy_sa_email" -R "$repo" + gh variable set DEPLOY_TARGET --body "gcp" -R "$repo" echo "GitHub repository variables updated." echo "Remember to set GCP_WORKLOAD_IDENTITY_PROVIDER." fi if prompt_yn "Set environment variable STAGE_NAME for '$env_name' now?" "y"; then - gh variable set STAGE_NAME --env "$env_name" --body "$deploy_stage" + gh variable set STAGE_NAME --env "$env_name" --body "$deploy_stage" -R "$repo" echo "Environment variable STAGE_NAME updated for '$env_name'." fi } From 3f2da58bb203b0cfcb63b062eab672bfe604d22e Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Mon, 23 Mar 2026 23:13:26 -0500 Subject: [PATCH 22/45] Bump cryptography version. --- infra/aws/db_setup/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infra/aws/db_setup/requirements.txt b/infra/aws/db_setup/requirements.txt index 297cdf7..a1c6e6f 100644 --- a/infra/aws/db_setup/requirements.txt +++ b/infra/aws/db_setup/requirements.txt @@ -1,4 +1,4 @@ pymysql==1.1.2 psycopg2-binary==2.9.11 # Required for MySQL 8+ caching_sha2_password; pin for reproducible CI (pip-audit / sam build). -cryptography==44.0.3 +cryptography==46.0.5 From 862fb86d75d5036c0ba303db26fd199ff186a528 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Tue, 24 Mar 2026 08:45:07 -0500 Subject: [PATCH 23/45] Fix issue with GitHub and AWS bootstrap permissions. --- deploy.sh | 2 +- docs/DEPLOYMENT.md | 2 +- infra/aws/scripts/deploy.sh | 177 +++++++++++++++++++++++++----- infra/aws/template.bootstrap.yaml | 5 + 4 files changed, 156 insertions(+), 30 deletions(-) diff --git a/deploy.sh b/deploy.sh index 4a9a120..f0f8e80 100755 --- a/deploy.sh +++ b/deploy.sh @@ -217,7 +217,7 @@ prompt_log_level() { for i in 1 2 3 4 5; do name="$(menu_index_to_log_level "$i")" suf="" - [[ "$i" == "$default_idx" ]] && suf=" (default)" + [[ "$i" == "$default_idx" ]] && suf=" (default/current)" echo " $i) $name$suf" >&2 done diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 1a062a1..7689ef8 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -45,7 +45,7 @@ Runs from repo root (or via `./deploy.sh` → **aws**). It: 1. **Prerequisites** — Verifies `aws`, `sam`, `docker`, `python3`, `curl` are on `PATH` (with install hints). Prints a status matrix; if optional `gh` is missing, shows install hints and asks whether to continue. Prints Slack app / API token / manifest API links. 2. **AWS auth** — Checks credentials; suggests `aws login`, SSO, or `aws configure` as appropriate. -3. **Bootstrap** — Reads or deploys the CloudFormation bootstrap stack (`infra/aws/template.bootstrap.yaml`): OIDC deploy role, S3 artifact bucket, etc. +3. **Bootstrap** — Reads or deploys the CloudFormation bootstrap stack (`infra/aws/template.bootstrap.yaml`): OIDC deploy role, S3 artifact bucket, etc. When bootstrap already exists, the script also syncs it from the checked-in template (with `--no-fail-on-empty-changeset`) so IAM policy updates are applied automatically; set `SYNCBOT_SKIP_BOOTSTRAP_SYNC=1` to skip. 4. **App stack** — Prompts for stage (`test`/`prod`) and stack name; **database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). Then **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight**). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. After that: optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), and a **deploy summary** before you proceed to the build. 5. **Deploy artifacts** — `sam build -t infra/aws/template.yaml --use-container` then `sam deploy` with assembled parameters (including optional token/app-secret overrides for recovery). 6. **Post-deploy** — Prints stack outputs, can generate `slack-manifest_.json`, optional Slack API configure, **backup summary** of secrets, optional **`gh`** setup for GitHub environments/variables/secrets, and a local **deploy receipt** under `deploy-receipts/` (gitignored). diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index ab776e9..8192933 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -320,32 +320,34 @@ output_value() { configure_github_actions_aws() { # $1 Bootstrap stack outputs (tab-separated OutputKey / OutputValue) - # $2 AWS region for this deploy session (fallback if bootstrap has no BootstrapRegion output) - # $3 App CloudFormation stack name - # $4 Stage name (test|prod) — GitHub environment name - # $5 Database schema name - # $6 DB source mode: 1 = stack-managed RDS, 2 = external existing host (matches SAM / prompts) - # $7 Existing DB host (mode 2) - # $8 Existing DB admin user (mode 2) - # $9 Existing DB admin password (mode 2) - # $10 Existing DB network mode: public | private - # $11 Comma-separated subnet IDs for Lambda in private mode - # $12 Lambda ENI security group id in private mode - # $13 Database engine: mysql | postgresql + # $2 Bootstrap CloudFormation stack name (for OIDC drift check vs gh repo) + # $3 AWS region for this deploy session (fallback if bootstrap has no BootstrapRegion output) + # $4 App CloudFormation stack name + # $5 Stage name (test|prod) — GitHub environment name + # $6 Database schema name + # $7 DB source mode: 1 = stack-managed RDS, 2 = external or existing host (matches SAM / prompts) + # $8 Existing DB host (mode 2) + # $9 Existing DB admin user (mode 2) + # $10 Existing DB admin password (mode 2) + # $11 Existing DB network mode: public | private + # $12 Comma-separated subnet IDs for Lambda in private mode + # $13 Lambda ENI security group id in private mode + # $14 Database engine: mysql | postgresql local bootstrap_outputs="$1" - local aws_region="$2" - local app_stack_name="$3" - local deploy_stage="$4" - local database_schema="$5" - local db_mode="$6" - local existing_db_host="$7" - local existing_db_admin_user="$8" - local existing_db_admin_password="$9" - local existing_db_network_mode="${10:-}" + local bootstrap_stack_name="$2" + local aws_region="$3" + local app_stack_name="$4" + local deploy_stage="$5" + local database_schema="$6" + local db_mode="$7" + local existing_db_host="$8" + local existing_db_admin_user="$9" + local existing_db_admin_password="${10}" + local existing_db_network_mode="${11:-}" [[ -z "$existing_db_network_mode" ]] && existing_db_network_mode="public" - local existing_db_subnet_ids_csv="${11:-}" - local existing_db_lambda_sg_id="${12:-}" - local database_engine="${13:-}" + local existing_db_subnet_ids_csv="${12:-}" + local existing_db_lambda_sg_id="${13:-}" + local database_engine="${14:-}" [[ -z "$database_engine" ]] && database_engine="mysql" local role bucket boot_region role="$(output_value "$bootstrap_outputs" "GitHubDeployRoleArn")" @@ -361,6 +363,7 @@ configure_github_actions_aws() { echo "Detected deploy bucket: $bucket (SAM/CI packaging for sam deploy — not Slack or app media)" echo "Detected bootstrap region: $boot_region" repo="$(prompt_github_repo_for_actions "$REPO_ROOT")" + maybe_prompt_bootstrap_github_trust_update "$repo" "$bootstrap_stack_name" "$aws_region" if ! ensure_gh_authenticated; then echo @@ -922,6 +925,117 @@ stack_param_value() { echo "$params" | awk -F'\t' -v k="$key" '$1==k {print $2}' } +# Keep bootstrap stack aligned with the checked-in template so IAM/policy fixes +# (for example CloudFormation changeset permissions) apply before app deploy. +# Set SYNCBOT_SKIP_BOOTSTRAP_SYNC=1 to opt out. +sync_bootstrap_stack_from_repo() { + local bootstrap_stack="$1" + local aws_region="$2" + local params github_repo create_oidc bucket_prefix + + if [[ "${SYNCBOT_SKIP_BOOTSTRAP_SYNC:-}" == "1" ]]; then + echo "Skipping bootstrap template sync (SYNCBOT_SKIP_BOOTSTRAP_SYNC=1)." + return 0 + fi + + params="$(stack_parameters "$bootstrap_stack" "$aws_region")" + if [[ -z "$params" ]]; then + echo "Could not read bootstrap stack parameters for '$bootstrap_stack' in $aws_region; skipping bootstrap template sync." >&2 + return 0 + fi + + github_repo="$(stack_param_value "$params" "GitHubRepository")" + github_repo="${github_repo//$'\r'/}" + github_repo="${github_repo#"${github_repo%%[![:space:]]*}"}" + github_repo="${github_repo%"${github_repo##*[![:space:]]}"}" + if [[ -z "$github_repo" ]]; then + echo "Bootstrap stack has no GitHubRepository parameter; skipping bootstrap template sync." >&2 + return 0 + fi + + create_oidc="$(stack_param_value "$params" "CreateOIDCProvider")" + bucket_prefix="$(stack_param_value "$params" "DeploymentBucketPrefix")" + [[ -z "$create_oidc" ]] && create_oidc="true" + [[ -z "$bucket_prefix" ]] && bucket_prefix="syncbot-deploy" + + echo + echo "Syncing bootstrap stack with repo template..." + aws cloudformation deploy \ + --template-file "$BOOTSTRAP_TEMPLATE" \ + --stack-name "$bootstrap_stack" \ + --parameter-overrides \ + "GitHubRepository=$github_repo" \ + "CreateOIDCProvider=$create_oidc" \ + "DeploymentBucketPrefix=$bucket_prefix" \ + --capabilities CAPABILITY_NAMED_IAM \ + --no-fail-on-empty-changeset \ + --region "$aws_region" +} + +# Compare GitHub owner/repo from bootstrap stack to the repo chosen for gh; offer to update OIDC trust. +maybe_prompt_bootstrap_github_trust_update() { + local picked_repo="$1" + local bootstrap_stack="$2" + local aws_region="$3" + local params trusted picked_lc trusted_lc create_oidc bucket_prefix + + if [[ -z "$bootstrap_stack" || -z "$picked_repo" ]]; then + return 0 + fi + + params="$(stack_parameters "$bootstrap_stack" "$aws_region")" + if [[ -z "$params" ]]; then + echo "Could not read bootstrap stack parameters for '$bootstrap_stack' in $aws_region; skipping OIDC trust drift check." >&2 + return 0 + fi + + trusted="$(stack_param_value "$params" "GitHubRepository")" + # CloudFormation / CLI sometimes surface trailing whitespace; normalize for compare + display. + trusted="${trusted//$'\r'/}" + trusted="${trusted#"${trusted%%[![:space:]]*}"}" + trusted="${trusted%"${trusted##*[![:space:]]}"}" + if [[ -z "$trusted" ]]; then + echo "Bootstrap stack has no GitHubRepository parameter; skipping OIDC trust drift check." >&2 + return 0 + fi + + picked_lc="$(printf '%s' "$picked_repo" | tr '[:upper:]' '[:lower:]')" + trusted_lc="$(printf '%s' "$trusted" | tr '[:upper:]' '[:lower:]')" + if [[ "$picked_lc" == "$trusted_lc" ]]; then + echo "Bootstrap OIDC: stack '$bootstrap_stack' has GitHubRepository=$trusted — matches your choice; no bootstrap update needed." + return 0 + fi + + echo + echo "Warning: Bootstrap stack '$bootstrap_stack' OIDC trust is scoped to:" + echo " GitHubRepository=$trusted" + echo "You chose this repository for GitHub Actions variables:" + echo " $picked_repo" + echo "GitHub Actions in '$picked_repo' cannot assume the deploy role until trust matches." + echo + if ! prompt_yes_no "Update bootstrap OIDC trust to '$picked_repo'? (CloudFormation stack update)" "n"; then + echo "Leaving bootstrap GitHubRepository unchanged. Fix manually or update the bootstrap stack later." >&2 + return 0 + fi + + create_oidc="$(stack_param_value "$params" "CreateOIDCProvider")" + bucket_prefix="$(stack_param_value "$params" "DeploymentBucketPrefix")" + [[ -z "$create_oidc" ]] && create_oidc="true" + [[ -z "$bucket_prefix" ]] && bucket_prefix="syncbot-deploy" + + echo "Updating bootstrap stack '$bootstrap_stack'..." + aws cloudformation deploy \ + --template-file "$BOOTSTRAP_TEMPLATE" \ + --stack-name "$bootstrap_stack" \ + --parameter-overrides \ + "GitHubRepository=$picked_repo" \ + "CreateOIDCProvider=$create_oidc" \ + "DeploymentBucketPrefix=$bucket_prefix" \ + --capabilities CAPABILITY_NAMED_IAM \ + --region "$aws_region" + echo "Bootstrap OIDC trust updated to $picked_repo." +} + print_recent_stack_failures() { local stack_name="$1" local region="$2" @@ -1017,6 +1131,11 @@ if [[ -z "$BOOTSTRAP_OUTPUTS" ]]; then fi fi +if [[ -n "$BOOTSTRAP_OUTPUTS" ]]; then + sync_bootstrap_stack_from_repo "$BOOTSTRAP_STACK" "$REGION" + BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" +fi + S3_BUCKET="$(output_value "$BOOTSTRAP_OUTPUTS" "DeploymentBucketName")" if [[ -n "$S3_BUCKET" ]]; then echo "Detected deploy bucket from bootstrap: $S3_BUCKET" @@ -1095,6 +1214,7 @@ if [[ -n "$EXISTING_STACK_STATUS" && "$EXISTING_STACK_STATUS" != "None" ]]; then [[ -z "$PREV_DATABASE_ENGINE" ]] && PREV_DATABASE_ENGINE="mysql" configure_github_actions_aws \ "$BOOTSTRAP_OUTPUTS" \ + "$BOOTSTRAP_STACK" \ "$REGION" \ "$STACK_NAME" \ "$STAGE" \ @@ -1114,7 +1234,7 @@ fi echo echo "=== Database Source ===" -# DB_MODE / GH_DB_MODE: 1 = stack-managed RDS in this template; 2 = external existing RDS host. +# DB_MODE / GH_DB_MODE: 1 = stack-managed RDS in this template; 2 = external or existing RDS host. DB_MODE_DEFAULT="1" if [[ "$IS_STACK_UPDATE" == "true" ]]; then if [[ "$PREV_STACK_USES_EXISTING_DB" == "true" ]]; then @@ -1122,15 +1242,15 @@ if [[ "$IS_STACK_UPDATE" == "true" ]]; then [[ -z "$EXISTING_DB_LABEL" ]] && EXISTING_DB_LABEL="not set" DB_MODE_DEFAULT="2" echo " 1) Use stack-managed RDS" - echo " 2) Use external existing RDS host: $EXISTING_DB_LABEL (default)" + echo " 2) Use external or existing RDS host: $EXISTING_DB_LABEL (default)" else DB_MODE_DEFAULT="1" echo " 1) Use stack-managed RDS (default)" - echo " 2) Use external existing RDS host" + echo " 2) Use external or existing RDS host" fi else echo " 1) Use stack-managed RDS (default)" - echo " 2) Use external existing RDS host" + echo " 2) Use external or existing RDS host" fi DB_MODE="$(prompt_default "Choose database source (1 or 2)" "$DB_MODE_DEFAULT")" if [[ "$DB_MODE" != "1" && "$DB_MODE" != "2" ]]; then @@ -1538,6 +1658,7 @@ fi if prompt_yes_no "Set up GitHub Actions configuration now?" "n"; then configure_github_actions_aws \ "$BOOTSTRAP_OUTPUTS" \ + "$BOOTSTRAP_STACK" \ "$REGION" \ "$STACK_NAME" \ "$STAGE" \ diff --git a/infra/aws/template.bootstrap.yaml b/infra/aws/template.bootstrap.yaml index 8008668..24a46e5 100644 --- a/infra/aws/template.bootstrap.yaml +++ b/infra/aws/template.bootstrap.yaml @@ -108,6 +108,11 @@ Resources: - cloudformation:CreateStack - cloudformation:UpdateStack - cloudformation:DeleteStack + - cloudformation:CreateChangeSet + - cloudformation:ExecuteChangeSet + - cloudformation:DeleteChangeSet + - cloudformation:DescribeChangeSet + - cloudformation:ListChangeSets - cloudformation:DescribeStacks - cloudformation:DescribeStackEvents - cloudformation:DescribeStackResources From b3450c21c43196d8d370c824ec2716bc87da21ed Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Tue, 24 Mar 2026 20:57:36 -0500 Subject: [PATCH 24/45] Fixes to secrets handling. --- .github/workflows/deploy-gcp.yml | 17 ++-- docs/DEPLOYMENT.md | 6 +- infra/aws/scripts/deploy.sh | 118 ++++++++++++++++++------- infra/aws/template.bootstrap.yaml | 12 +++ infra/gcp/main.tf | 4 +- infra/gcp/scripts/deploy.sh | 139 ++++++++++++++++++++++++++++-- infra/gcp/variables.tf | 5 ++ 7 files changed, 252 insertions(+), 49 deletions(-) diff --git a/.github/workflows/deploy-gcp.yml b/.github/workflows/deploy-gcp.yml index 03e43f8..0415265 100644 --- a/.github/workflows/deploy-gcp.yml +++ b/.github/workflows/deploy-gcp.yml @@ -5,7 +5,7 @@ # 1. Run infra/gcp Terraform and configure Workload Identity Federation for GitHub. # 2. Set GitHub vars: GCP_PROJECT_ID, GCP_REGION, GCP_WORKLOAD_IDENTITY_PROVIDER, GCP_SERVICE_ACCOUNT. # 3. Set GitHub secrets for Slack/DB/encryption as needed for your CI (or use Secret Manager only). -# 4. Replace placeholder steps with real build/deploy; disable deploy-aws.yml if using GCP only. +# 4. This workflow intentionally fails until real build/deploy steps are configured. name: Deploy (GCP) @@ -39,8 +39,11 @@ jobs: # - run: | # gcloud builds submit --tag "${{ vars.GCP_REGION }}-docker.pkg.dev/${{ vars.GCP_PROJECT_ID }}/syncbot-TEST-images/syncbot:${{ github.sha }}" . - - name: Placeholder (GCP deploy not yet configured) - run: echo "Configure Workload Identity Federation and uncomment build/deploy steps in deploy-gcp.yml" + - name: Fail until GCP CI is configured + run: | + echo "GCP CI deploy is not configured in this repository." + echo "Implement WIF auth, image build/push, and deploy steps in deploy-gcp.yml before enabling DEPLOY_TARGET=gcp." + exit 1 deploy-test: if: github.ref == 'refs/heads/test' @@ -53,7 +56,9 @@ jobs: # workload_identity_provider: ${{ vars.GCP_WORKLOAD_IDENTITY_PROVIDER }} # service_account: ${{ vars.GCP_SERVICE_ACCOUNT }} # - run: gcloud run deploy syncbot-test --image=... --region=... - - run: echo "GCP deploy placeholder" + - run: | + echo "deploy-test is blocked because GCP CI deploy is not configured." + exit 1 deploy-prod: if: github.ref == 'refs/heads/prod' @@ -66,4 +71,6 @@ jobs: # workload_identity_provider: ${{ vars.GCP_WORKLOAD_IDENTITY_PROVIDER }} # service_account: ${{ vars.GCP_SERVICE_ACCOUNT }} # - run: gcloud run deploy syncbot-prod --image=... --region=... - - run: echo "GCP deploy placeholder" + - run: | + echo "deploy-prod is blocked because GCP CI deploy is not configured." + exit 1 diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 7689ef8..bc14044 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -57,8 +57,8 @@ You can **skip infra** on an existing stack and jump to GitHub-only setup when p Runs from repo root (or `./deploy.sh` → **gcp**). It: 1. Verifies **Terraform**, **gcloud**, **python3**, **curl**; optional **gh** handling (same as AWS). -2. Guides **auth** (`gcloud auth application-default login` / quota project as needed). -3. **Terraform** — `init` / `plan` / `apply` in `infra/gcp` with prompts for project, stage, image, DB mode, Slack secrets, etc.; can detect existing Cloud Run / Cloud SQL for defaults. +2. Guides **auth** (`gcloud auth login` plus `gcloud auth application-default login`; quota project as needed). +3. **Terraform** — `init` / `plan` / `apply` in `infra/gcp` with prompts for project, stage, image, DB mode, Slack secrets, etc.; can detect existing Cloud Run / Cloud SQL for defaults. `cloud_run_image` is required (no placeholder image fallback). 4. **Post-deploy** — Manifest generation, optional Slack API, deploy receipt, optional **`gh`** for GitHub. See [infra/gcp/README.md](../infra/gcp/README.md) for Terraform variables and outputs. @@ -219,7 +219,7 @@ Set Secret Manager values for Slack/DB as in [infra/gcp/README.md](../infra/gcp/ The interactive `infra/gcp/scripts/deploy.sh` uses the same GitHub `owner/repo` selection as the AWS script (based on git remotes when fork and upstream differ). -**Note:** `.github/workflows/deploy-gcp.yml` may still contain **placeholder** steps in upstream; replace with real **build + push + Cloud Run deploy** (or `terraform apply` with a new image tag) in your fork. The guided `infra/gcp/scripts/deploy.sh` is the source of truth for an interactive path. +**Note:** `.github/workflows/deploy-gcp.yml` is intentionally configured to fail until real CI steps are implemented (WIF auth, image build/push, deploy). Keep using `infra/gcp/scripts/deploy.sh` for interactive deploys until CI is fully wired. ### 3. Ongoing deploys diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index 8192933..79fa5f0 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -29,9 +29,7 @@ SLACK_CLIENT_SECRET_SOURCE="" EXISTING_DB_ADMIN_PASSWORD_SOURCE="" # Populated before write_deploy_receipt: backup summary + markdown receipt (deploy-receipts/*.md). RECEIPT_TOKEN_SECRET_ID="" -RECEIPT_TOKEN_SECRET_VALUE="" RECEIPT_APP_DB_SECRET_NAME="" -RECEIPT_APP_DB_SECRET_VALUE="" # shellcheck source=/dev/null source "$REPO_ROOT/deploy.sh" @@ -655,15 +653,69 @@ write_deploy_receipt() { - Token secret id: ${RECEIPT_TOKEN_SECRET_ID:-n/a} - App DB secret name: ${RECEIPT_APP_DB_SECRET_NAME:-n/a} - Reused app DB password from existing secret: ${APP_DB_PASSWORD_REUSED_FROM_SECRET:-no} - -## Secret Values (backup with care) -- TOKEN_ENCRYPTION_KEY: ${RECEIPT_TOKEN_SECRET_VALUE:-n/a} -- APP_DB_PASSWORD: ${RECEIPT_APP_DB_SECRET_VALUE:-n/a} EOF echo "Deploy receipt written: $receipt_path" } +preflight_secrets_manager_access() { + local region="$1" + local token_secret_id="$2" + local app_db_secret_name="$3" + local existing_token_secret_arn="${4:-}" + local current_secret_id describe_out get_out + + echo + echo "=== Secrets Manager Access Preflight ===" + echo "Verifying deploy principal can read required SyncBot secrets before SAM deploy..." + + # Validate current principal can read both known secret IDs that this deploy path may use. + for current_secret_id in "$token_secret_id" "$app_db_secret_name"; do + if [[ -z "$current_secret_id" ]]; then + continue + fi + + describe_out="$(aws secretsmanager describe-secret \ + --secret-id "$current_secret_id" \ + --region "$region" \ + --query 'ARN' \ + --output text 2>&1 || true)" + if [[ "$describe_out" == *"AccessDenied"* || "$describe_out" == *"not authorized"* ]]; then + echo "Secrets Manager preflight failed: missing DescribeSecret on '$current_secret_id'." >&2 + echo "Fix: re-deploy bootstrap stack to update syncbot deploy policy, then retry." >&2 + exit 1 + fi + + get_out="$(aws secretsmanager get-secret-value \ + --secret-id "$current_secret_id" \ + --region "$region" \ + --query 'ARN' \ + --output text 2>&1 || true)" + if [[ "$get_out" == *"AccessDenied"* || "$get_out" == *"not authorized"* ]]; then + echo "Secrets Manager preflight failed: missing GetSecretValue on '$current_secret_id'." >&2 + echo "This commonly breaks CloudFormation when Lambda environment uses dynamic secret references." >&2 + echo "Fix: re-deploy bootstrap stack to update syncbot deploy policy, then retry." >&2 + exit 1 + fi + done + + # If explicitly reusing an ARN, validate direct access too. + if [[ -n "$existing_token_secret_arn" ]]; then + get_out="$(aws secretsmanager get-secret-value \ + --secret-id "$existing_token_secret_arn" \ + --region "$region" \ + --query 'ARN' \ + --output text 2>&1 || true)" + if [[ "$get_out" == *"AccessDenied"* || "$get_out" == *"not authorized"* ]]; then + echo "Secrets Manager preflight failed: missing GetSecretValue on '$existing_token_secret_arn'." >&2 + echo "Fix: re-deploy bootstrap stack to update syncbot deploy policy, then retry." >&2 + exit 1 + fi + fi + + echo "Secrets Manager preflight passed." +} + rds_lookup_network_defaults() { local db_host="$1" local region="$2" @@ -1534,6 +1586,8 @@ if ! prompt_yes_no "Proceed with build + deploy?" "y"; then exit 0 fi +preflight_secrets_manager_access "$REGION" "$TOKEN_SECRET_NAME" "$APP_DB_SECRET_NAME" "$EXISTING_TOKEN_SECRET_ARN" + handle_orphan_app_db_secret_on_create "$EXISTING_STACK_STATUS" "$APP_DB_SECRET_NAME" "$REGION" handle_unhealthy_stack_state "$STACK_NAME" "$REGION" @@ -1614,46 +1668,24 @@ if [[ -n "$SLACK_MANIFEST_GENERATED_PATH" ]]; then slack_api_configure_from_manifest "$SLACK_MANIFEST_GENERATED_PATH" "$SYNCBOT_INSTALL_URL" fi fi -echo -echo "=== Backup Secrets Summary ===" + +# Prepare secret metadata/value so receipt and final backup output stay in sync. if [[ -n "$TOKEN_OVERRIDE" ]]; then RECEIPT_TOKEN_SECRET_ID="TokenEncryptionKeyOverride" - RECEIPT_TOKEN_SECRET_VALUE="$TOKEN_OVERRIDE" - echo "- TOKEN_ENCRYPTION_KEY: supplied via TokenEncryptionKeyOverride during deploy." - echo " Ensure this key is backed up where you store DR secrets." + TOKEN_SECRET_ID="TokenEncryptionKeyOverride" + TOKEN_SECRET_VALUE="$TOKEN_OVERRIDE" else - # Display path: name or ARN for console; RECEIPT_* copies the same id/value into write_deploy_receipt. TOKEN_SECRET_ID="$TOKEN_SECRET_NAME" if [[ -n "$EXISTING_TOKEN_SECRET_ARN" ]]; then TOKEN_SECRET_ID="$EXISTING_TOKEN_SECRET_ARN" fi TOKEN_SECRET_VALUE="$(secret_value_by_id "$TOKEN_SECRET_ID" "$REGION")" RECEIPT_TOKEN_SECRET_ID="$TOKEN_SECRET_ID" - if [[ -n "$TOKEN_SECRET_VALUE" && "$TOKEN_SECRET_VALUE" != "None" ]]; then - RECEIPT_TOKEN_SECRET_VALUE="$TOKEN_SECRET_VALUE" - fi - echo "- TOKEN_ENCRYPTION_KEY secret: $TOKEN_SECRET_ID" - if [[ -n "$TOKEN_SECRET_VALUE" && "$TOKEN_SECRET_VALUE" != "None" ]]; then - echo " TOKEN_ENCRYPTION_KEY value:" - echo " $TOKEN_SECRET_VALUE" - else - echo " (Could not read secret value automatically. Check IAM permissions.)" - fi fi APP_DB_SECRET_VALUE="$(secret_value_by_id "$APP_DB_SECRET_NAME" "$REGION")" -# RECEIPT_APP_DB_* mirror console output for the markdown receipt from write_deploy_receipt. +# RECEIPT_APP_DB_* mirror the deploy artifacts. RECEIPT_APP_DB_SECRET_NAME="$APP_DB_SECRET_NAME" -if [[ -n "$APP_DB_SECRET_VALUE" && "$APP_DB_SECRET_VALUE" != "None" ]]; then - RECEIPT_APP_DB_SECRET_VALUE="$APP_DB_SECRET_VALUE" -fi -echo "- App DB password secret: $APP_DB_SECRET_NAME" -if [[ -n "$APP_DB_SECRET_VALUE" && "$APP_DB_SECRET_VALUE" != "None" ]]; then - echo " App DB password value:" - echo " $APP_DB_SECRET_VALUE" -else - echo " (Could not read secret value automatically. Check IAM permissions.)" -fi if prompt_yes_no "Set up GitHub Actions configuration now?" "n"; then configure_github_actions_aws \ @@ -1681,3 +1713,23 @@ write_deploy_receipt \ "$SYNCBOT_API_URL" \ "$SYNCBOT_INSTALL_URL" \ "$SLACK_MANIFEST_GENERATED_PATH" + +echo +echo "=== Backup Secrets (Disaster Recovery) ===" +# IMPORTANT: This deploy script must always print plaintext backup secrets at the end. +# Do not remove/redact this section; operators rely on it for DR copy-out immediately after deploy. +echo "Copy these values now and store them in your secure disaster-recovery vault." + +echo "- TOKEN_ENCRYPTION_KEY source: $TOKEN_SECRET_ID" +if [[ -n "$TOKEN_SECRET_VALUE" && "$TOKEN_SECRET_VALUE" != "None" ]]; then + echo " TOKEN_ENCRYPTION_KEY: $TOKEN_SECRET_VALUE" +else + echo " TOKEN_ENCRYPTION_KEY: " +fi + +echo "- DATABASE_PASSWORD source: $APP_DB_SECRET_NAME" +if [[ -n "$APP_DB_SECRET_VALUE" && "$APP_DB_SECRET_VALUE" != "None" ]]; then + echo " DATABASE_PASSWORD: $APP_DB_SECRET_VALUE" +else + echo " DATABASE_PASSWORD: " +fi diff --git a/infra/aws/template.bootstrap.yaml b/infra/aws/template.bootstrap.yaml index 24a46e5..5c19c82 100644 --- a/infra/aws/template.bootstrap.yaml +++ b/infra/aws/template.bootstrap.yaml @@ -242,6 +242,18 @@ Resources: - events:DescribeRule - events:ListTargetsByRule Resource: "*" + - Sid: SecretsManagerSyncBot + Effect: Allow + Action: + - secretsmanager:GetSecretValue + - secretsmanager:DescribeSecret + - secretsmanager:CreateSecret + - secretsmanager:UpdateSecret + - secretsmanager:DeleteSecret + - secretsmanager:TagResource + - secretsmanager:UntagResource + - secretsmanager:ListSecretVersionIds + Resource: !Sub "arn:aws:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:syncbot-*" Outputs: GitHubDeployRoleArn: diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf index 963a92d..0ec9316 100644 --- a/infra/gcp/main.tf +++ b/infra/gcp/main.tf @@ -45,8 +45,6 @@ locals { ) db_schema = var.use_existing_database ? var.existing_db_schema : "syncbot" db_user = var.use_existing_database ? var.existing_db_user : "syncbot_app" - # Image: variable or placeholder until first image push - cloud_run_image_effective = var.cloud_run_image != "" ? var.cloud_run_image : "us-docker.pkg.dev/cloudrun/container/hello" } # --------------------------------------------------------------------------- @@ -250,7 +248,7 @@ resource "google_cloud_run_v2_service" "syncbot" { } containers { - image = local.cloud_run_image_effective + image = var.cloud_run_image resources { limits = { diff --git a/infra/gcp/scripts/deploy.sh b/infra/gcp/scripts/deploy.sh index 180795c..04b37ce 100755 --- a/infra/gcp/scripts/deploy.sh +++ b/infra/gcp/scripts/deploy.sh @@ -115,6 +115,22 @@ ensure_gcloud_authenticated() { fi } +ensure_gcloud_adc_authenticated() { + if gcloud auth application-default print-access-token >/dev/null 2>&1; then + return 0 + fi + + echo "Application Default Credentials (ADC) are not configured." + if prompt_yn "Run 'gcloud auth application-default login' now?" "y"; then + gcloud auth application-default login || true + fi + + if ! gcloud auth application-default print-access-token >/dev/null 2>&1; then + echo "Unable to configure ADC. Run 'gcloud auth application-default login' and rerun." >&2 + exit 1 + fi +} + ensure_gh_authenticated() { if ! command -v gh >/dev/null 2>&1; then prereqs_hint_gh_cli >&2 @@ -181,6 +197,77 @@ cloud_run_image_value() { --format='value(spec.template.spec.containers[0].image)' 2>/dev/null || true } +secret_has_active_version() { + local project_id="$1" + local secret_name="$2" + local latest_state + latest_state="$(gcloud secrets versions describe latest \ + --project "$project_id" \ + --secret "$secret_name" \ + --format='value(state)' 2>/dev/null || true)" + [[ "$latest_state" == "ENABLED" ]] +} + +secret_latest_value() { + local project_id="$1" + local secret_name="$2" + gcloud secrets versions access latest \ + --project "$project_id" \ + --secret "$secret_name" 2>/dev/null || true +} + +cloud_run_secret_name() { + local project_id="$1" + local region="$2" + local service_name="$3" + local env_key="$4" + gcloud run services describe "$service_name" \ + --project "$project_id" \ + --region "$region" \ + --format=json 2>/dev/null | python3 - "$env_key" <<'PY' +import json +import sys + +env_key = sys.argv[1] +try: + data = json.load(sys.stdin) +except Exception: + print("") + raise SystemExit(0) + +containers = (data.get("spec", {}) or {}).get("template", {}).get("spec", {}).get("containers", []) +for c in containers: + for e in c.get("env", []) or []: + if e.get("name") != env_key: + continue + secret_ref = (((e.get("valueSource") or {}).get("secretKeyRef") or {}).get("secret")) or "" + if not secret_ref: + print("") + raise SystemExit(0) + # Accept either full resource names or plain secret IDs. + print(secret_ref.split("/secrets/")[-1]) + raise SystemExit(0) +print("") +PY +} + +preflight_existing_db_secret_readiness() { + local project_id="$1" + local stage="$2" + local db_secret_name="syncbot-${stage}-syncbot-db-password" + + echo + echo "=== Existing DB Secret Preflight ===" + echo "Verifying required Secret Manager value exists for DATABASE_PASSWORD..." + if ! secret_has_active_version "$project_id" "$db_secret_name"; then + echo "Missing active secret version for '$db_secret_name'." >&2 + echo "Create one before deploy, for example:" >&2 + echo " printf '%s' '' | gcloud secrets versions add '$db_secret_name' --project '$project_id' --data-file=-" >&2 + exit 1 + fi + echo "Secret preflight passed for: $db_secret_name" +} + slack_manifest_json_compact() { local manifest_file="$1" python3 - "$manifest_file" <<'PY' @@ -445,6 +532,7 @@ fi REGION="$(prompt_line "GCP region" "${GCP_REGION:-us-central1}")" ensure_gcloud_authenticated +ensure_gcloud_adc_authenticated gcloud config set project "$PROJECT_ID" >/dev/null 2>&1 || true STAGE="$(prompt_line "Stage (test/prod)" "${STAGE:-test}")" if [[ "$STAGE" != "test" && "$STAGE" != "prod" ]]; then @@ -513,7 +601,11 @@ DETECTED_CLOUD_IMAGE="" if [[ -n "$EXISTING_SERVICE_URL" ]]; then DETECTED_CLOUD_IMAGE="$(cloud_run_image_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME")" fi -CLOUD_IMAGE="$(prompt_line "cloud_run_image (blank = placeholder until first build)" "$DETECTED_CLOUD_IMAGE")" +CLOUD_IMAGE="$(prompt_line "cloud_run_image (required)" "$DETECTED_CLOUD_IMAGE")" +if [[ -z "$CLOUD_IMAGE" ]]; then + echo "Error: cloud_run_image is required. Build and push the SyncBot image first, then rerun." >&2 + exit 1 +fi DETECTED_LOG_LEVEL="" if [[ -n "$EXISTING_SERVICE_URL" ]]; then @@ -545,6 +637,7 @@ VARS=( ) if [[ "$USE_EXISTING" == "true" ]]; then + preflight_existing_db_secret_readiness "$PROJECT_ID" "$STAGE" VARS+=("-var=use_existing_database=true") VARS+=("-var=existing_db_host=$EXISTING_HOST") VARS+=("-var=existing_db_schema=$EXISTING_SCHEMA") @@ -553,9 +646,7 @@ else VARS+=("-var=use_existing_database=false") fi -if [[ -n "$CLOUD_IMAGE" ]]; then - VARS+=("-var=cloud_run_image=$CLOUD_IMAGE") -fi +VARS+=("-var=cloud_run_image=$CLOUD_IMAGE") echo echo "Log level: $LOG_LEVEL" @@ -605,10 +696,48 @@ write_deploy_receipt \ echo "Next:" echo " 1) Set Secret Manager values for Slack (see infra/gcp/README.md)." -echo " 2) Build and push container image; update cloud_run_image and re-apply if needed." +echo " 2) Build and push container image; update cloud_run_image and re-apply when image changes." echo " 3) Run: ./infra/gcp/scripts/print-bootstrap-outputs.sh" bash "$SCRIPT_DIR/print-bootstrap-outputs.sh" || true if prompt_yn "Set up GitHub Actions configuration now?" "n"; then configure_github_actions_gcp "$PROJECT_ID" "$REGION" "$GCP_DIR" "$STAGE" fi + +TOKEN_SECRET_NAME="$(terraform output -raw token_encryption_secret_name 2>/dev/null || true)" +TOKEN_SECRET_NAME="${TOKEN_SECRET_NAME##*/secrets/}" +DB_SECRET_NAME="$(cloud_run_secret_name "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_PASSWORD")" +TOKEN_SECRET_VALUE="" +DB_SECRET_VALUE="" +if [[ -n "$TOKEN_SECRET_NAME" ]]; then + TOKEN_SECRET_VALUE="$(secret_latest_value "$PROJECT_ID" "$TOKEN_SECRET_NAME")" +fi +if [[ -n "$DB_SECRET_NAME" ]]; then + DB_SECRET_VALUE="$(secret_latest_value "$PROJECT_ID" "$DB_SECRET_NAME")" +fi + +echo +echo "=== Backup Secrets (Disaster Recovery) ===" +# IMPORTANT: This deploy script must always print plaintext backup secrets at the end. +# Do not remove/redact this section; operators rely on it for DR copy-out immediately after deploy. +echo "Copy these values now and store them in your secure disaster-recovery vault." +if [[ -n "$TOKEN_SECRET_NAME" ]]; then + echo "- TOKEN_ENCRYPTION_KEY source: $TOKEN_SECRET_NAME" +else + echo "- TOKEN_ENCRYPTION_KEY source: " +fi +if [[ -n "$TOKEN_SECRET_VALUE" ]]; then + echo " TOKEN_ENCRYPTION_KEY: $TOKEN_SECRET_VALUE" +else + echo " TOKEN_ENCRYPTION_KEY: " +fi +if [[ -n "$DB_SECRET_NAME" ]]; then + echo "- DATABASE_PASSWORD source: $DB_SECRET_NAME" +else + echo "- DATABASE_PASSWORD source: " +fi +if [[ -n "$DB_SECRET_VALUE" ]]; then + echo " DATABASE_PASSWORD: $DB_SECRET_VALUE" +else + echo " DATABASE_PASSWORD: " +fi diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf index 1d65f45..755a411 100644 --- a/infra/gcp/variables.tf +++ b/infra/gcp/variables.tf @@ -56,6 +56,11 @@ variable "cloud_run_image" { type = string default = "" description = "Container image URL for Cloud Run (e.g. gcr.io/PROJECT/syncbot:latest). Set after first build or by CI." + + validation { + condition = trimspace(var.cloud_run_image) != "" + error_message = "cloud_run_image is required. Build/push the SyncBot image and pass -var=cloud_run_image=." + } } variable "cloud_run_cpu" { From 8557968af0a215dcab7e550a70ae4cb8191eee09 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 11:16:19 -0500 Subject: [PATCH 25/45] Added more app config vars to deploy. --- .github/workflows/deploy-aws.yml | 12 ++ deploy.sh | 162 ++++++++++++++ docs/DEPLOYMENT.md | 17 +- docs/INFRA_CONTRACT.md | 4 +- infra/aws/scripts/deploy.sh | 349 +++++++++++++++++++------------ infra/aws/template.yaml | 78 +++++++ infra/gcp/main.tf | 49 +++-- infra/gcp/scripts/deploy.sh | 248 +++++++++++++++------- infra/gcp/variables.tf | 84 ++++++++ 9 files changed, 775 insertions(+), 228 deletions(-) diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index 8eb970e..f49e019 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -105,6 +105,12 @@ jobs: ExistingDatabaseLambdaSecurityGroupId=${{ vars.EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ + RequireAdmin=${{ vars.REQUIRE_ADMIN || 'true' }} \ + SoftDeleteRetentionDays=${{ vars.SOFT_DELETE_RETENTION_DAYS || '30' }} \ + SyncbotFederationEnabled=${{ vars.SYNCBOT_FEDERATION_ENABLED || 'false' }} \ + SyncbotInstanceId=${{ vars.SYNCBOT_INSTANCE_ID }} \ + SyncbotPublicUrl=${{ vars.SYNCBOT_PUBLIC_URL }} \ + EnableDbReset=${{ vars.ENABLE_DB_RESET }} \ SlackClientID=${{ vars.SLACK_CLIENT_ID }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ @@ -160,6 +166,12 @@ jobs: ExistingDatabaseLambdaSecurityGroupId=${{ vars.EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID }} \ DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ + RequireAdmin=${{ vars.REQUIRE_ADMIN || 'true' }} \ + SoftDeleteRetentionDays=${{ vars.SOFT_DELETE_RETENTION_DAYS || '30' }} \ + SyncbotFederationEnabled=${{ vars.SYNCBOT_FEDERATION_ENABLED || 'false' }} \ + SyncbotInstanceId=${{ vars.SYNCBOT_INSTANCE_ID }} \ + SyncbotPublicUrl=${{ vars.SYNCBOT_PUBLIC_URL }} \ + EnableDbReset=${{ vars.ENABLE_DB_RESET }} \ SlackClientID=${{ vars.SLACK_CLIENT_ID }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ diff --git a/deploy.sh b/deploy.sh index f0f8e80..a652aff 100755 --- a/deploy.sh +++ b/deploy.sh @@ -9,6 +9,7 @@ # # Prerequisite helpers below are also sourced by infra/*/scripts/deploy.sh: # source "$REPO_ROOT/deploy.sh" +# Also includes prompt_deploy_tasks_aws / prompt_deploy_tasks_gcp for multi-select deploy steps. set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" @@ -234,6 +235,101 @@ prompt_log_level() { done } +# App settings (used by infra/aws and infra/gcp deploy scripts). Hints on stderr; value on stdout. + +prompt_require_admin() { + local default="$1" + echo "Restrict sync configuration to workspace admins and owners only." >&2 + local hint="Y/n" + [[ "$default" == "false" ]] && hint="y/N" + while true; do + local answer + read -r -p "REQUIRE_ADMIN [$hint]: " answer + if [[ -z "$answer" ]]; then + echo "$default" + return 0 + fi + case "$answer" in + [Yy] | yes | YES | true | TRUE) echo "true"; return 0 ;; + [Nn] | no | NO | false | FALSE) echo "false"; return 0 ;; + esac + echo "Enter y or n (current: $default)." >&2 + done +} + +prompt_soft_delete_retention_days() { + local default="$1" + echo "Days to keep soft-deleted workspace data before permanent purge." >&2 + while true; do + local v + read -r -p "SOFT_DELETE_RETENTION_DAYS [$default]: " v + v="${v:-$default}" + if [[ "$v" =~ ^[0-9]+$ ]] && [[ "$v" -gt 0 ]]; then + echo "$v" + return 0 + fi + echo "Enter a positive integer." >&2 + done +} + +prompt_enable_db_reset() { + local default="$1" + echo "WARNING: When set to a Slack Team ID, a \"Reset Database\" button appears on the Home tab." >&2 + echo "Clicking it DROPS and reinitializes the entire database -- all data is permanently destroyed." >&2 + echo "Leave empty (or enter none/disabled) to turn this off." >&2 + local disp + if [[ -z "$default" ]]; then + disp="(disabled)" + else + disp="$default" + fi + local v + read -r -p "ENABLE_DB_RESET (Slack Team ID) [$disp]: " v + v="${v:-$default}" + case "$(echo "$v" | tr "[:upper:]" "[:lower:]")" in + "" | none | disabled) echo "" ;; + *) echo "$v" ;; + esac +} + +prompt_federation_enabled() { + local default="$1" + echo "Allow external connections between SyncBot instances (federation)." >&2 + local hint="y/N" + [[ "$default" == "true" ]] && hint="Y/n" + while true; do + local answer + read -r -p "SYNCBOT_FEDERATION_ENABLED [$hint]: " answer + if [[ -z "$answer" ]]; then + echo "$default" + return 0 + fi + case "$answer" in + [Yy] | yes | YES | true | TRUE) echo "true"; return 0 ;; + [Nn] | no | NO | false | FALSE) echo "false"; return 0 ;; + esac + echo "Enter y or n (current: $default)." >&2 + done +} + +prompt_instance_id() { + local default="$1" + echo "Unique UUID for this SyncBot instance (leave empty to auto-generate at runtime)." >&2 + local disp="${default:-(empty)}" + local v + read -r -p "SYNCBOT_INSTANCE_ID [$disp]: " v + echo "${v:-$default}" +} + +prompt_public_url() { + local default="$1" + echo "Public HTTPS base URL for this instance (required for federation)." >&2 + local disp="${default:-(empty)}" + local v + read -r -p "SYNCBOT_PUBLIC_URL [$disp]: " v + echo "${v:-$default}" +} + # Parse owner/repo from a github.com git remote URL (ssh, https, ssh://). Empty if not GitHub. github_owner_repo_from_url() { local url="$1" @@ -384,6 +480,72 @@ prompt_github_repo_for_actions() { done } +# --------------------------------------------------------------------------- +# Deploy task selection (used by infra/aws and infra/gcp deploy scripts). +# Sets global variables named in flag_names to "true" or "false". +# --------------------------------------------------------------------------- + +_prompt_deploy_tasks_parsechoices() { + local choices_raw="${1:-}" + shift + local -a flag_names=("$@") + local n="${#flag_names[@]}" + local i name def="" part idx + for name in "${flag_names[@]}"; do + eval "${name}=false" + done + for ((i = 1; i <= n; i++)); do + [[ -n "$def" ]] && def+="," + def+="$i" + done + local choices="${choices_raw// /}" + [[ -z "$choices" ]] && choices="$def" + IFS=',' read -r -a parts <<<"$choices" + for part in "${parts[@]}"; do + part="${part// /}" + [[ -z "$part" ]] && continue + if [[ "$part" =~ ^[0-9]+$ ]]; then + idx="$part" + if [[ "$idx" -ge 1 && "$idx" -le "$n" ]]; then + eval "${flag_names[$((idx - 1))]}=true" + else + echo "Invalid task number: $part (must be 1-$n)" >&2 + exit 1 + fi + else + echo "Invalid task selection: $part (use comma-separated numbers)" >&2 + exit 1 + fi + done +} + +prompt_deploy_tasks_aws() { + echo "=== Deploy Tasks ===" + printf ' 1) %s\n' "Bootstrap - Create/sync bootstrap stack" + printf ' 2) %s\n' "Build/Deploy - SAM build + deploy" + printf ' 3) %s\n' "CI/CD - GitHub Actions configuration" + printf ' 4) %s\n' "Slack API - Configure Slack app via API" + printf ' 5) %s\n' "Backup Secrets - Print DR backup secrets" + local default_all="1,2,3,4,5" + local choices="" + read -r -e -p "Select tasks (comma-separated) [$default_all]: " choices + choices="${choices:-$default_all}" + _prompt_deploy_tasks_parsechoices "$choices" TASK_BOOTSTRAP TASK_BUILD_DEPLOY TASK_CICD TASK_SLACK_API TASK_BACKUP_SECRETS +} + +prompt_deploy_tasks_gcp() { + echo "=== Deploy Tasks ===" + printf ' 1) %s\n' "Build/Deploy - Terraform plan + apply" + printf ' 2) %s\n' "CI/CD - GitHub Actions configuration" + printf ' 3) %s\n' "Slack API - Configure Slack app via API" + printf ' 4) %s\n' "Backup Secrets - Print DR backup secrets" + local default_all="1,2,3,4" + local choices="" + read -r -e -p "Select tasks (comma-separated) [$default_all]: " choices + choices="${choices:-$default_all}" + _prompt_deploy_tasks_parsechoices "$choices" TASK_BUILD_DEPLOY TASK_CICD TASK_SLACK_API TASK_BACKUP_SECRETS +} + # When sourced by infra/*/scripts/deploy.sh, only load helpers above. if [[ "${BASH_SOURCE[0]}" != "${0}" ]]; then return 0 diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index bc14044..5014754 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -45,12 +45,11 @@ Runs from repo root (or via `./deploy.sh` → **aws**). It: 1. **Prerequisites** — Verifies `aws`, `sam`, `docker`, `python3`, `curl` are on `PATH` (with install hints). Prints a status matrix; if optional `gh` is missing, shows install hints and asks whether to continue. Prints Slack app / API token / manifest API links. 2. **AWS auth** — Checks credentials; suggests `aws login`, SSO, or `aws configure` as appropriate. -3. **Bootstrap** — Reads or deploys the CloudFormation bootstrap stack (`infra/aws/template.bootstrap.yaml`): OIDC deploy role, S3 artifact bucket, etc. When bootstrap already exists, the script also syncs it from the checked-in template (with `--no-fail-on-empty-changeset`) so IAM policy updates are applied automatically; set `SYNCBOT_SKIP_BOOTSTRAP_SYNC=1` to skip. -4. **App stack** — Prompts for stage (`test`/`prod`) and stack name; **database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). Then **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight**). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. After that: optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), and a **deploy summary** before you proceed to the build. -5. **Deploy artifacts** — `sam build -t infra/aws/template.yaml --use-container` then `sam deploy` with assembled parameters (including optional token/app-secret overrides for recovery). -6. **Post-deploy** — Prints stack outputs, can generate `slack-manifest_.json`, optional Slack API configure, **backup summary** of secrets, optional **`gh`** setup for GitHub environments/variables/secrets, and a local **deploy receipt** under `deploy-receipts/` (gitignored). - -You can **skip infra** on an existing stack and jump to GitHub-only setup when prompted. +3. **Bootstrap probe** — Reads bootstrap stack outputs if the stack exists (for suggested stack names and later CI/CD). Full **bootstrap** create/sync runs only if you select it in **Deploy Tasks** (see below). +4. **App stack identity** — Prompts for stage (`test`/`prod`) and stack name; detects an existing CloudFormation stack for update. +5. **Deploy Tasks** — Multi-select menu (comma-separated, default all): **Bootstrap** (create/sync bootstrap stack; respects `SYNCBOT_SKIP_BOOTSTRAP_SYNC=1` for sync), **Build/Deploy** (full config + SAM), **CI/CD** (`gh` / GitHub Actions), **Slack API**, **Backup Secrets** (DR plaintext echo). Omitting **Build/Deploy** requires an existing stack for tasks that need live outputs. +6. **Configuration** (if Build/Deploy selected) — **Database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight**). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. Optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), **deploy summary**, then **SAM build** (`--use-container`) and **sam deploy**. +7. **Post-deploy** — According to selected tasks: stack outputs, `slack-manifest_.json`, Slack API, **`gh`** setup, deploy receipt under `deploy-receipts/` (gitignored), and DR backup lines. ### GCP: `infra/gcp/scripts/deploy.sh` @@ -58,8 +57,10 @@ Runs from repo root (or `./deploy.sh` → **gcp**). It: 1. Verifies **Terraform**, **gcloud**, **python3**, **curl**; optional **gh** handling (same as AWS). 2. Guides **auth** (`gcloud auth login` plus `gcloud auth application-default login`; quota project as needed). -3. **Terraform** — `init` / `plan` / `apply` in `infra/gcp` with prompts for project, stage, image, DB mode, Slack secrets, etc.; can detect existing Cloud Run / Cloud SQL for defaults. `cloud_run_image` is required (no placeholder image fallback). -4. **Post-deploy** — Manifest generation, optional Slack API, deploy receipt, optional **`gh`** for GitHub. +3. **Project / stage / existing service** — Prompts for project, region, stage; can detect existing Cloud Run for defaults. +4. **Deploy Tasks** — Multi-select menu (comma-separated, default all): **Build/Deploy** (full Terraform flow), **CI/CD**, **Slack API**, **Backup Secrets**. Skipping **Build/Deploy** requires existing Terraform state/outputs for tasks that need them. +5. **Terraform** (if Build/Deploy selected) — Prompts for DB mode, `cloud_run_image` (required), log level, etc.; `terraform init` / `plan` / `apply` in `infra/gcp` (no separate y/n gates on plan/apply). +6. **Post-deploy** — According to selected tasks: manifest, Slack API, deploy receipt, **`gh`**, `print-bootstrap-outputs.sh`, DR backup lines. See [infra/gcp/README.md](../infra/gcp/README.md) for Terraform variables and outputs. diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index f820b87..9be77b4 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -2,7 +2,7 @@ This document defines what any infrastructure provider (AWS, GCP, Azure, etc.) must supply so SyncBot runs correctly. Forks can swap provider-specific IaC in `infra//` as long as they satisfy this contract. -**Deploy entrypoint:** From the repo root, `./deploy.sh` (macOS/Linux, or Git Bash/WSL bash) or `.\deploy.ps1` (Windows PowerShell — finds Git Bash or WSL, then bash) runs an interactive helper that delegates to `infra//scripts/deploy.sh`. That flow sets Cloud/Terraform resources and runtime env vars consistent with this document. Step-by-step and manual alternatives: [DEPLOYMENT.md](DEPLOYMENT.md). +**Deploy entrypoint:** From the repo root, `./deploy.sh` (macOS/Linux, or Git Bash/WSL bash) or `.\deploy.ps1` (Windows PowerShell — finds Git Bash or WSL, then bash) runs an interactive helper that delegates to `infra//scripts/deploy.sh`. After identity/auth prompts, each provider script shows a **Deploy Tasks** menu (comma-separated numbers, default all): bootstrap (AWS only), build/deploy, CI/CD (GitHub Actions), Slack API configuration, and DR backup secret output—so operators can run subsets (e.g. CI/CD only against an existing stack) without mid-flow surprises. That flow sets Cloud/Terraform resources and runtime env vars consistent with this document. Step-by-step and manual alternatives: [DEPLOYMENT.md](DEPLOYMENT.md). **Pre-release:** This repo is pre-release. Database rollout assumes **fresh installs only** (no legacy schema migration or stamping). New databases are initialized via Alembic `upgrade head` at startup. @@ -56,7 +56,7 @@ poetry export --only main --format requirements.txt --without-hashes --output sy | `SLACK_USER_SCOPES` | Comma-separated OAuth **user** scopes. Must match `oauth_config.scopes.user` and `syncbot/slack_manifest_scopes.py` `USER_SCOPES`. If this env requests scopes that are not declared on the Slack app, install fails with `invalid_scope`. | | `TOKEN_ENCRYPTION_KEY` | **Required** in production; must be a strong, random value (e.g. 16+ characters). Providers may auto-generate it (e.g. AWS Secrets Manager). Back up the key after first deploy. In local dev you may set it manually or leave unset. | -**Reference wiring:** AWS SAM (`infra/aws/template.yaml`) uses CloudFormation parameters **`SlackOauthBotScopes`** and **`SlackOauthUserScopes`** (defaults match `BOT_SCOPES` / `USER_SCOPES`) to populate **`SLACK_BOT_SCOPES`** and **`SLACK_USER_SCOPES`**, and **`LogLevel`** (default `INFO`) → **`LOG_LEVEL`**. GCP Terraform uses **`secret_slack_bot_scopes`** (Secret Manager → `SLACK_BOT_SCOPES`; you set the secret **value** to the same comma-separated bot list) and **`slack_user_scopes`** (plain env → `SLACK_USER_SCOPES`, default matches SAM); **`log_level`** (default `INFO`) sets **`LOG_LEVEL`** on Cloud Run; see [infra/gcp/README.md](../infra/gcp/README.md). +**Reference wiring:** AWS SAM ([`infra/aws/template.yaml`](../infra/aws/template.yaml)) maps CloudFormation parameters to Lambda env: **`SlackOauthBotScopes`** / **`SlackOauthUserScopes`** → **`SLACK_BOT_SCOPES`** / **`SLACK_USER_SCOPES`** (defaults match `BOT_SCOPES` / `USER_SCOPES`); **`LogLevel`** → **`LOG_LEVEL`**; **`RequireAdmin`** → **`REQUIRE_ADMIN`**; **`SoftDeleteRetentionDays`** → **`SOFT_DELETE_RETENTION_DAYS`**; **`SyncbotFederationEnabled`**, **`SyncbotInstanceId`**, **`SyncbotPublicUrl`** (optional override) → federation env vars; **`EnableDbReset`** → **`ENABLE_DB_RESET`**; optional **`DatabaseTlsEnabled`** / **`DatabaseSslCaPath`** → **`DATABASE_TLS_ENABLED`** / **`DATABASE_SSL_CA_PATH`** (omit when empty so app defaults apply). **`SYNCBOT_PUBLIC_URL`** defaults to the API Gateway stage base URL unless **`SyncbotPublicUrl`** is set; stack output **`SyncBotPublicBaseUrl`** documents that base. GCP Terraform uses **`secret_slack_bot_scopes`** (Secret Manager → `SLACK_BOT_SCOPES`) and variables **`slack_user_scopes`**, **`log_level`**, **`require_admin`**, **`database_backend`**, **`database_port`**, **`soft_delete_retention_days`**, **`syncbot_federation_enabled`**, **`syncbot_instance_id`**, **`syncbot_public_url_override`**, **`enable_db_reset`**, **`database_tls_enabled`**, **`database_ssl_ca_path`** for the corresponding runtime env on Cloud Run (see [infra/gcp/README.md](../infra/gcp/README.md)); **`syncbot_public_url_override`** is empty by default—set it to your service’s public HTTPS base (e.g. after first deploy) if you need **`SYNCBOT_PUBLIC_URL`** for federation. ### Optional diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index 79fa5f0..0fa6013 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -7,12 +7,12 @@ # # Phases (main path, after functions are defined below): # 1) Prerequisites: CLI checks, template paths -# 2) Bootstrap: CloudFormation bootstrap stack and S3 artifact bucket (if missing) -# 3) App stack: region, stage, target stack name; detect existing stack for update -# 4) Database: source mode (stack RDS vs external host), engine, schema, existing-DB networking -# 5) Slack: signing secret, client secret, client ID -# 6) Confirm deploy summary, SAM build + deploy -# 7) After deploy: stage manifest, optional Slack API update, optional GitHub vars, deploy receipt +# 2) Authentication: AWS region and credentials +# 3) Bootstrap probe: read bootstrap stack outputs (create/sync runs only if task 1 selected) +# 4) Stack identity: stage, app stack name; detect existing stack for update +# 5) Deploy Tasks: multi-select menu (bootstrap, build/deploy, CI/CD, Slack API, backup secrets) +# 6) Configuration (if build/deploy): database, Slack creds, SAM build + deploy +# 7) Post-tasks: Slack manifest/API, GitHub Actions, deploy receipt, DR secret backup set -euo pipefail @@ -1132,6 +1132,7 @@ handle_unhealthy_stack_state() { esac } +echo "=== Prerequisites ===" prereqs_require_cmd aws prereqs_hint_aws_cli prereqs_require_cmd sam prereqs_hint_sam_cli prereqs_require_cmd docker prereqs_hint_docker @@ -1154,46 +1155,13 @@ echo DEFAULT_REGION="${AWS_REGION:-us-east-2}" REGION="$(prompt_default "AWS region" "$DEFAULT_REGION")" +echo +echo "=== Authentication ===" ensure_aws_authenticated BOOTSTRAP_STACK="$(prompt_default "Bootstrap stack name" "syncbot-bootstrap")" +# Probe bootstrap outputs only; create/sync runs later if task 1 (Bootstrap) is selected. BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" -if [[ -z "$BOOTSTRAP_OUTPUTS" ]]; then - echo - echo "Bootstrap stack not found (or has no outputs): $BOOTSTRAP_STACK in $REGION" - if prompt_yes_no "Deploy bootstrap stack now?" "y"; then - GITHUB_REPO="$(prompt_default "GitHub repository (owner/repo)" "REPLACE_ME_OWNER/REPLACE_ME_REPO")" - CREATE_OIDC="$(prompt_default "Create OIDC provider (true/false)" "true")" - BUCKET_PREFIX="$(prompt_default "Deployment bucket prefix" "syncbot-deploy")" - echo - echo "=== Bootstrap Stack ===" - echo "Deploying bootstrap stack..." - aws cloudformation deploy \ - --template-file "$BOOTSTRAP_TEMPLATE" \ - --stack-name "$BOOTSTRAP_STACK" \ - --parameter-overrides \ - "GitHubRepository=$GITHUB_REPO" \ - "CreateOIDCProvider=$CREATE_OIDC" \ - "DeploymentBucketPrefix=$BUCKET_PREFIX" \ - --capabilities CAPABILITY_NAMED_IAM \ - --region "$REGION" - BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" - else - echo "Skipping bootstrap. You must provide deploy bucket manually." - fi -fi - -if [[ -n "$BOOTSTRAP_OUTPUTS" ]]; then - sync_bootstrap_stack_from_repo "$BOOTSTRAP_STACK" "$REGION" - BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" -fi - -S3_BUCKET="$(output_value "$BOOTSTRAP_OUTPUTS" "DeploymentBucketName")" -if [[ -n "$S3_BUCKET" ]]; then - echo "Detected deploy bucket from bootstrap: $S3_BUCKET" -else - S3_BUCKET="$(prompt_default "Deployment S3 bucket name" "REPLACE_ME_DEPLOY_BUCKET")" -fi SUGGESTED_TEST_STACK="$(output_value "$BOOTSTRAP_OUTPUTS" "SuggestedTestStackName")" SUGGESTED_PROD_STACK="$(output_value "$BOOTSTRAP_OUTPUTS" "SuggestedProdStackName")" @@ -1201,6 +1169,7 @@ SUGGESTED_PROD_STACK="$(output_value "$BOOTSTRAP_OUTPUTS" "SuggestedProdStackNam [[ -z "$SUGGESTED_PROD_STACK" ]] && SUGGESTED_PROD_STACK="syncbot-prod" echo +echo "=== Stack Identity ===" STAGE="$(prompt_default "Deploy stage (test/prod)" "test")" if [[ "$STAGE" != "test" && "$STAGE" != "prod" ]]; then echo "Error: stage must be 'test' or 'prod'." >&2 @@ -1221,8 +1190,17 @@ PREV_EXISTING_DATABASE_LAMBDA_SG_ID="" PREV_DATABASE_ENGINE="" PREV_DATABASE_SCHEMA="" PREV_LOG_LEVEL="" +PREV_REQUIRE_ADMIN="" +PREV_SOFT_DELETE="" +PREV_FEDERATION="" +PREV_INSTANCE_ID="" +PREV_PUBLIC_URL="" +PREV_ENABLE_DB_RESET="" +PREV_DB_TLS="" +PREV_DB_SSL_CA="" PREV_DATABASE_HOST_IN_USE="" PREV_STACK_USES_EXISTING_DB="false" +EXISTING_STACK_OUTPUTS="" if [[ -n "$EXISTING_STACK_STATUS" && "$EXISTING_STACK_STATUS" != "None" ]]; then echo "Detected existing CloudFormation stack: $STACK_NAME ($EXISTING_STACK_STATUS)" if ! prompt_yes_no "Continue and update this existing stack?" "y"; then @@ -1239,6 +1217,14 @@ if [[ -n "$EXISTING_STACK_STATUS" && "$EXISTING_STACK_STATUS" != "None" ]]; then PREV_DATABASE_ENGINE="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseEngine")" PREV_DATABASE_SCHEMA="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseSchema")" PREV_LOG_LEVEL="$(stack_param_value "$EXISTING_STACK_PARAMS" "LogLevel")" + PREV_REQUIRE_ADMIN="$(stack_param_value "$EXISTING_STACK_PARAMS" "RequireAdmin")" + PREV_SOFT_DELETE="$(stack_param_value "$EXISTING_STACK_PARAMS" "SoftDeleteRetentionDays")" + PREV_FEDERATION="$(stack_param_value "$EXISTING_STACK_PARAMS" "SyncbotFederationEnabled")" + PREV_INSTANCE_ID="$(stack_param_value "$EXISTING_STACK_PARAMS" "SyncbotInstanceId")" + PREV_PUBLIC_URL="$(stack_param_value "$EXISTING_STACK_PARAMS" "SyncbotPublicUrl")" + PREV_ENABLE_DB_RESET="$(stack_param_value "$EXISTING_STACK_PARAMS" "EnableDbReset")" + PREV_DB_TLS="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseTlsEnabled")" + PREV_DB_SSL_CA="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseSslCaPath")" EXISTING_STACK_OUTPUTS="$(app_describe_outputs "$STACK_NAME" "$REGION")" PREV_DATABASE_HOST_IN_USE="$(output_value "$EXISTING_STACK_OUTPUTS" "DatabaseHostInUse")" if [[ -n "$PREV_EXISTING_DATABASE_HOST" ]]; then @@ -1247,44 +1233,63 @@ if [[ -n "$EXISTING_STACK_STATUS" && "$EXISTING_STACK_STATUS" != "None" ]]; then if [[ -z "$PREV_EXISTING_DATABASE_HOST" && -n "$PREV_DATABASE_HOST_IN_USE" ]]; then PREV_EXISTING_DATABASE_HOST="$PREV_DATABASE_HOST_IN_USE" fi +fi + +echo +prompt_deploy_tasks_aws - if prompt_yes_no "Skip infrastructure re-deploy and go directly to GitHub Actions setup?" "n"; then - # Same semantics as DB_MODE (1 = stack RDS, 2 = existing host) for GitHub env vars only. - GH_DB_MODE="1" - if [[ "$PREV_STACK_USES_EXISTING_DB" == "true" ]]; then - GH_DB_MODE="2" +if [[ "$TASK_BOOTSTRAP" == "true" ]]; then + echo + echo "=== Bootstrap Stack ===" + if [[ -z "$BOOTSTRAP_OUTPUTS" ]]; then + echo "Bootstrap stack not found (or has no outputs): $BOOTSTRAP_STACK in $REGION" + if prompt_yes_no "Deploy bootstrap stack now?" "y"; then + GITHUB_REPO="$(prompt_default "GitHub repository (owner/repo)" "REPLACE_ME_OWNER/REPLACE_ME_REPO")" + CREATE_OIDC="$(prompt_default "Create OIDC provider (true/false)" "true")" + BUCKET_PREFIX="$(prompt_default "Deployment bucket prefix" "syncbot-deploy")" + echo "Deploying bootstrap stack..." + aws cloudformation deploy \ + --template-file "$BOOTSTRAP_TEMPLATE" \ + --stack-name "$BOOTSTRAP_STACK" \ + --parameter-overrides \ + "GitHubRepository=$GITHUB_REPO" \ + "CreateOIDCProvider=$CREATE_OIDC" \ + "DeploymentBucketPrefix=$BUCKET_PREFIX" \ + --capabilities CAPABILITY_NAMED_IAM \ + --region "$REGION" + BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" + else + echo "Skipping bootstrap. You must provide deploy bucket manually when deploying." fi - GH_DATABASE_SCHEMA="$PREV_DATABASE_SCHEMA" - [[ -z "$GH_DATABASE_SCHEMA" ]] && GH_DATABASE_SCHEMA="syncbot_${STAGE}" + fi + if [[ -n "$BOOTSTRAP_OUTPUTS" ]]; then + sync_bootstrap_stack_from_repo "$BOOTSTRAP_STACK" "$REGION" + BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" + fi +fi - # Initialize optional globals used only when user opts into setting secrets in GitHub setup. - SLACK_SIGNING_SECRET="${SLACK_SIGNING_SECRET:-}" - SLACK_CLIENT_SECRET="${SLACK_CLIENT_SECRET:-}" +BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" +S3_BUCKET="$(output_value "$BOOTSTRAP_OUTPUTS" "DeploymentBucketName")" +if [[ -n "$S3_BUCKET" ]]; then + echo "Detected deploy bucket from bootstrap: $S3_BUCKET" +elif [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then + S3_BUCKET="$(prompt_default "Deployment S3 bucket name" "REPLACE_ME_DEPLOY_BUCKET")" +else + S3_BUCKET="" +fi - echo - echo "Skipping deploy. Opening GitHub Actions setup for existing stack..." - [[ -z "$PREV_DATABASE_ENGINE" ]] && PREV_DATABASE_ENGINE="mysql" - configure_github_actions_aws \ - "$BOOTSTRAP_OUTPUTS" \ - "$BOOTSTRAP_STACK" \ - "$REGION" \ - "$STACK_NAME" \ - "$STAGE" \ - "$GH_DATABASE_SCHEMA" \ - "$GH_DB_MODE" \ - "$PREV_EXISTING_DATABASE_HOST" \ - "$PREV_EXISTING_DATABASE_ADMIN_USER" \ - "${EXISTING_DATABASE_ADMIN_PASSWORD:-}" \ - "$PREV_EXISTING_DATABASE_NETWORK_MODE" \ - "$PREV_EXISTING_DATABASE_SUBNET_IDS_CSV" \ - "$PREV_EXISTING_DATABASE_LAMBDA_SG_ID" \ - "$PREV_DATABASE_ENGINE" - echo "Done. No infrastructure changes were deployed." - exit 0 +if [[ "$TASK_BUILD_DEPLOY" != "true" ]]; then + if [[ "$TASK_CICD" == "true" || "$TASK_SLACK_API" == "true" || "$TASK_BACKUP_SECRETS" == "true" ]]; then + if [[ -z "${EXISTING_STACK_STATUS:-}" || "$EXISTING_STACK_STATUS" == "None" ]]; then + echo "Error: CloudFormation stack '$STACK_NAME' does not exist in $REGION. Select task 2 (Build/Deploy) first or create the stack." >&2 + exit 1 + fi fi fi +if [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then echo +echo "=== Configuration ===" echo "=== Database Source ===" # DB_MODE / GH_DB_MODE: 1 = stack-managed RDS in this template; 2 = external or existing RDS host. DB_MODE_DEFAULT="1" @@ -1294,10 +1299,10 @@ if [[ "$IS_STACK_UPDATE" == "true" ]]; then [[ -z "$EXISTING_DB_LABEL" ]] && EXISTING_DB_LABEL="not set" DB_MODE_DEFAULT="2" echo " 1) Use stack-managed RDS" - echo " 2) Use external or existing RDS host: $EXISTING_DB_LABEL (default)" + echo " 2) Use external or existing RDS host: $EXISTING_DB_LABEL (default/current)" else DB_MODE_DEFAULT="1" - echo " 1) Use stack-managed RDS (default)" + echo " 1) Use stack-managed RDS (default/current)" echo " 2) Use external or existing RDS host" fi else @@ -1328,9 +1333,9 @@ echo echo "=== Database Engine ===" if [[ "$DB_ENGINE_DEFAULT" == "2" ]]; then echo " 1) MySQL" - echo " 2) PostgreSQL (default; detected from current stack)" + echo " 2) PostgreSQL (default/current)" else - echo " 1) MySQL (default)" + echo " 1) MySQL (default/current)" echo " 2) PostgreSQL" fi DB_ENGINE_MODE="$(prompt_default "Choose 1 or 2" "$DB_ENGINE_DEFAULT")" @@ -1540,16 +1545,48 @@ if [[ "$IS_STACK_UPDATE" == "true" && -n "$PREV_LOG_LEVEL" ]]; then LOG_LEVEL_DEFAULT="$PREV_LOG_LEVEL" fi +REQUIRE_ADMIN="${PREV_REQUIRE_ADMIN:-true}" +SOFT_DELETE_RETENTION_DAYS="${PREV_SOFT_DELETE:-30}" +SYNCBOT_FEDERATION_ENABLED="${PREV_FEDERATION:-false}" +SYNCBOT_INSTANCE_ID="${PREV_INSTANCE_ID:-}" +SYNCBOT_PUBLIC_URL="${PREV_PUBLIC_URL:-}" +ENABLE_DB_RESET="${PREV_ENABLE_DB_RESET:-}" +DATABASE_TLS_ENABLED="${PREV_DB_TLS:-}" +DATABASE_SSL_CA_PATH="${PREV_DB_SSL_CA:-}" + echo echo "=== Log Level ===" LOG_LEVEL="$(prompt_log_level "$LOG_LEVEL_DEFAULT")" +echo +echo "=== App Settings ===" +REQUIRE_ADMIN="$(prompt_require_admin "$REQUIRE_ADMIN")" +SOFT_DELETE_RETENTION_DAYS="$(prompt_soft_delete_retention_days "$SOFT_DELETE_RETENTION_DAYS")" +ENABLE_DB_RESET="$(prompt_enable_db_reset "$ENABLE_DB_RESET")" +SYNCBOT_FEDERATION_ENABLED="$(prompt_federation_enabled "$SYNCBOT_FEDERATION_ENABLED")" +if [[ "$SYNCBOT_FEDERATION_ENABLED" == "true" ]]; then + SYNCBOT_INSTANCE_ID="$(prompt_instance_id "$SYNCBOT_INSTANCE_ID")" + SYNCBOT_PUBLIC_URL="$(prompt_public_url "$SYNCBOT_PUBLIC_URL")" +fi + echo echo "=== Deploy Summary ===" echo "Region: $REGION" echo "Stack: $STACK_NAME" echo "Stage: $STAGE" echo "Log level: $LOG_LEVEL" +echo "Require admin: $REQUIRE_ADMIN" +echo "Soft-delete days: $SOFT_DELETE_RETENTION_DAYS" +if [[ -n "$ENABLE_DB_RESET" ]]; then + echo "DB reset (team): $ENABLE_DB_RESET" +else + echo "DB reset (team): (disabled)" +fi +if [[ "$SYNCBOT_FEDERATION_ENABLED" == "true" ]]; then + echo "Federation: enabled" + [[ -n "$SYNCBOT_INSTANCE_ID" ]] && echo "Instance ID: $SYNCBOT_INSTANCE_ID" + [[ -n "$SYNCBOT_PUBLIC_URL" ]] && echo "Public URL: $SYNCBOT_PUBLIC_URL" +fi echo "Deploy bucket: $S3_BUCKET" if [[ "$DB_MODE" == "2" ]]; then echo "DB mode: existing host" @@ -1586,6 +1623,8 @@ if ! prompt_yes_no "Proceed with build + deploy?" "y"; then exit 0 fi +echo +echo "=== Preflight ===" preflight_secrets_manager_access "$REGION" "$TOKEN_SECRET_NAME" "$APP_DB_SECRET_NAME" "$EXISTING_TOKEN_SECRET_ARN" handle_orphan_app_db_secret_on_create "$EXISTING_STACK_STATUS" "$APP_DB_SECRET_NAME" "$REGION" @@ -1604,7 +1643,16 @@ PARAMS=( "SlackClientSecret=$SLACK_CLIENT_SECRET" "DatabaseSchema=$DATABASE_SCHEMA" "LogLevel=$LOG_LEVEL" + "RequireAdmin=$REQUIRE_ADMIN" + "SoftDeleteRetentionDays=$SOFT_DELETE_RETENTION_DAYS" + "SyncbotFederationEnabled=$SYNCBOT_FEDERATION_ENABLED" ) +# SAM rejects Key= (empty value) in shorthand format; only include when non-empty. +[[ -n "$SYNCBOT_INSTANCE_ID" ]] && PARAMS+=("SyncbotInstanceId=$SYNCBOT_INSTANCE_ID") +[[ -n "$SYNCBOT_PUBLIC_URL" ]] && PARAMS+=("SyncbotPublicUrl=$SYNCBOT_PUBLIC_URL") +[[ -n "$ENABLE_DB_RESET" ]] && PARAMS+=("EnableDbReset=$ENABLE_DB_RESET") +[[ -n "$DATABASE_TLS_ENABLED" ]] && PARAMS+=("DatabaseTlsEnabled=$DATABASE_TLS_ENABLED") +[[ -n "$DATABASE_SSL_CA_PATH" ]] && PARAMS+=("DatabaseSslCaPath=$DATABASE_SSL_CA_PATH") if [[ -n "$SLACK_CLIENT_ID" ]]; then PARAMS+=("SlackClientID=$SLACK_CLIENT_ID") @@ -1624,14 +1672,15 @@ if [[ "$DB_MODE" == "2" ]]; then ) fi else - # Explicitly clear existing-host parameters on updates to avoid stale previous values. + # Clear existing-host parameters on updates to avoid stale previous values. + # SAM rejects Key= (empty value) in shorthand; use ParameterKey=K,ParameterValue= instead. PARAMS+=( - "ExistingDatabaseHost=" - "ExistingDatabaseAdminUser=" - "ExistingDatabaseAdminPassword=" + "ParameterKey=ExistingDatabaseHost,ParameterValue=" + "ParameterKey=ExistingDatabaseAdminUser,ParameterValue=" + "ParameterKey=ExistingDatabaseAdminPassword,ParameterValue=" "ExistingDatabaseNetworkMode=public" - "ExistingDatabaseSubnetIdsCsv=" - "ExistingDatabaseLambdaSecurityGroupId=" + "ParameterKey=ExistingDatabaseSubnetIdsCsv,ParameterValue=" + "ParameterKey=ExistingDatabaseLambdaSecurityGroupId,ParameterValue=" ) fi @@ -1657,37 +1706,57 @@ sam deploy \ --parameter-overrides "${PARAMS[@]}" APP_OUTPUTS="$(app_describe_outputs "$STACK_NAME" "$REGION")" + +else + echo + echo "Skipping Build/Deploy (task 2 not selected)." + APP_OUTPUTS="${EXISTING_STACK_OUTPUTS:-}" + DB_MODE="1" + if [[ "$PREV_STACK_USES_EXISTING_DB" == "true" ]]; then + DB_MODE="2" + fi + DATABASE_SCHEMA="${PREV_DATABASE_SCHEMA:-}" + [[ -z "$DATABASE_SCHEMA" ]] && DATABASE_SCHEMA="syncbot_${STAGE}" + DATABASE_ENGINE="${PREV_DATABASE_ENGINE:-mysql}" + [[ -z "$DATABASE_ENGINE" ]] && DATABASE_ENGINE="mysql" + EXISTING_DATABASE_HOST="${PREV_EXISTING_DATABASE_HOST:-}" + EXISTING_DATABASE_ADMIN_USER="${PREV_EXISTING_DATABASE_ADMIN_USER:-}" + EXISTING_DATABASE_ADMIN_PASSWORD="${EXISTING_DATABASE_ADMIN_PASSWORD:-}" + EXISTING_DATABASE_NETWORK_MODE="${PREV_EXISTING_DATABASE_NETWORK_MODE:-public}" + EXISTING_DATABASE_SUBNET_IDS_CSV="${PREV_EXISTING_DATABASE_SUBNET_IDS_CSV:-}" + EXISTING_DATABASE_LAMBDA_SG_ID="${PREV_EXISTING_DATABASE_LAMBDA_SG_ID:-}" + SLACK_SIGNING_SECRET="${SLACK_SIGNING_SECRET:-}" + SLACK_CLIENT_SECRET="${SLACK_CLIENT_SECRET:-}" + SLACK_CLIENT_ID="${SLACK_CLIENT_ID:-}" + TOKEN_SECRET_NAME="syncbot-${STAGE}-token-encryption-key" + APP_DB_SECRET_NAME="syncbot-${STAGE}-app-db-password" + TOKEN_OVERRIDE="" + EXISTING_TOKEN_SECRET_ARN="" + RECEIPT_TOKEN_SECRET_ID="" + RECEIPT_APP_DB_SECRET_NAME="" + TOKEN_SECRET_ID="" + TOKEN_SECRET_VALUE="" + APP_DB_SECRET_VALUE="" +fi + SYNCBOT_API_URL="$(output_value "$APP_OUTPUTS" "SyncBotApiUrl")" SYNCBOT_INSTALL_URL="$(output_value "$APP_OUTPUTS" "SyncBotInstallUrl")" echo -echo "Deploy complete." -generate_stage_slack_manifest "$STAGE" "$SYNCBOT_API_URL" "$SYNCBOT_INSTALL_URL" -if [[ -n "$SLACK_MANIFEST_GENERATED_PATH" ]]; then - if prompt_yes_no "Configure Slack app via Slack API now (create or update from generated manifest)?" "n"; then - slack_api_configure_from_manifest "$SLACK_MANIFEST_GENERATED_PATH" "$SYNCBOT_INSTALL_URL" - fi +echo "=== Post-Deploy ===" +if [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then + echo "Deploy complete." fi -# Prepare secret metadata/value so receipt and final backup output stay in sync. -if [[ -n "$TOKEN_OVERRIDE" ]]; then - RECEIPT_TOKEN_SECRET_ID="TokenEncryptionKeyOverride" - TOKEN_SECRET_ID="TokenEncryptionKeyOverride" - TOKEN_SECRET_VALUE="$TOKEN_OVERRIDE" -else - TOKEN_SECRET_ID="$TOKEN_SECRET_NAME" - if [[ -n "$EXISTING_TOKEN_SECRET_ARN" ]]; then - TOKEN_SECRET_ID="$EXISTING_TOKEN_SECRET_ARN" - fi - TOKEN_SECRET_VALUE="$(secret_value_by_id "$TOKEN_SECRET_ID" "$REGION")" - RECEIPT_TOKEN_SECRET_ID="$TOKEN_SECRET_ID" +if [[ "$TASK_SLACK_API" == "true" || "$TASK_BUILD_DEPLOY" == "true" ]]; then + generate_stage_slack_manifest "$STAGE" "$SYNCBOT_API_URL" "$SYNCBOT_INSTALL_URL" fi -APP_DB_SECRET_VALUE="$(secret_value_by_id "$APP_DB_SECRET_NAME" "$REGION")" -# RECEIPT_APP_DB_* mirror the deploy artifacts. -RECEIPT_APP_DB_SECRET_NAME="$APP_DB_SECRET_NAME" +if [[ "$TASK_SLACK_API" == "true" ]] && [[ -n "${SLACK_MANIFEST_GENERATED_PATH:-}" ]]; then + slack_api_configure_from_manifest "$SLACK_MANIFEST_GENERATED_PATH" "$SYNCBOT_INSTALL_URL" +fi -if prompt_yes_no "Set up GitHub Actions configuration now?" "n"; then +if [[ "$TASK_CICD" == "true" ]]; then configure_github_actions_aws \ "$BOOTSTRAP_OUTPUTS" \ "$BOOTSTRAP_STACK" \ @@ -1705,31 +1774,55 @@ if prompt_yes_no "Set up GitHub Actions configuration now?" "n"; then "$DATABASE_ENGINE" fi -write_deploy_receipt \ - "aws" \ - "$STAGE" \ - "$STACK_NAME" \ - "$REGION" \ - "$SYNCBOT_API_URL" \ - "$SYNCBOT_INSTALL_URL" \ - "$SLACK_MANIFEST_GENERATED_PATH" +if [[ "$TASK_BUILD_DEPLOY" == "true" || "$TASK_BACKUP_SECRETS" == "true" ]]; then + # Prepare secret metadata/value so receipt and final backup output stay in sync. + if [[ -n "${TOKEN_OVERRIDE:-}" ]]; then + RECEIPT_TOKEN_SECRET_ID="TokenEncryptionKeyOverride" + TOKEN_SECRET_ID="TokenEncryptionKeyOverride" + TOKEN_SECRET_VALUE="$TOKEN_OVERRIDE" + else + TOKEN_SECRET_ID="${TOKEN_SECRET_NAME:-}" + if [[ -n "${EXISTING_TOKEN_SECRET_ARN:-}" ]]; then + TOKEN_SECRET_ID="$EXISTING_TOKEN_SECRET_ARN" + fi + TOKEN_SECRET_VALUE="$(secret_value_by_id "$TOKEN_SECRET_ID" "$REGION" 2>/dev/null || true)" + RECEIPT_TOKEN_SECRET_ID="$TOKEN_SECRET_ID" + fi + APP_DB_SECRET_VALUE="$(secret_value_by_id "$APP_DB_SECRET_NAME" "$REGION" 2>/dev/null || true)" + RECEIPT_APP_DB_SECRET_NAME="$APP_DB_SECRET_NAME" +fi -echo -echo "=== Backup Secrets (Disaster Recovery) ===" -# IMPORTANT: This deploy script must always print plaintext backup secrets at the end. -# Do not remove/redact this section; operators rely on it for DR copy-out immediately after deploy. -echo "Copy these values now and store them in your secure disaster-recovery vault." - -echo "- TOKEN_ENCRYPTION_KEY source: $TOKEN_SECRET_ID" -if [[ -n "$TOKEN_SECRET_VALUE" && "$TOKEN_SECRET_VALUE" != "None" ]]; then - echo " TOKEN_ENCRYPTION_KEY: $TOKEN_SECRET_VALUE" -else - echo " TOKEN_ENCRYPTION_KEY: " +if [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then + echo + echo "=== Deploy Receipt ===" + write_deploy_receipt \ + "aws" \ + "$STAGE" \ + "$STACK_NAME" \ + "$REGION" \ + "$SYNCBOT_API_URL" \ + "$SYNCBOT_INSTALL_URL" \ + "$SLACK_MANIFEST_GENERATED_PATH" fi -echo "- DATABASE_PASSWORD source: $APP_DB_SECRET_NAME" -if [[ -n "$APP_DB_SECRET_VALUE" && "$APP_DB_SECRET_VALUE" != "None" ]]; then - echo " DATABASE_PASSWORD: $APP_DB_SECRET_VALUE" -else - echo " DATABASE_PASSWORD: " +if [[ "$TASK_BACKUP_SECRETS" == "true" ]]; then + echo + echo "=== Backup Secrets (Disaster Recovery) ===" + # IMPORTANT: When Backup Secrets is selected, print plaintext backup secrets here. + # Do not remove/redact this section; operators rely on it for DR copy-out. + echo "Copy these values now and store them in your secure disaster-recovery vault." + + echo "- TOKEN_ENCRYPTION_KEY source: ${TOKEN_SECRET_ID:-}" + if [[ -n "${TOKEN_SECRET_VALUE:-}" && "$TOKEN_SECRET_VALUE" != "None" ]]; then + echo " TOKEN_ENCRYPTION_KEY: $TOKEN_SECRET_VALUE" + else + echo " TOKEN_ENCRYPTION_KEY: " + fi + + echo "- DATABASE_PASSWORD source: ${APP_DB_SECRET_NAME:-}" + if [[ -n "${APP_DB_SECRET_VALUE:-}" && "$APP_DB_SECRET_VALUE" != "None" ]]; then + echo " DATABASE_PASSWORD: $APP_DB_SECRET_VALUE" + else + echo " DATABASE_PASSWORD: " + fi fi diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index 73df658..7b1f785 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -213,6 +213,57 @@ Parameters: - ERROR - CRITICAL + SoftDeleteRetentionDays: + Description: Days to retain soft-deleted workspace data (SOFT_DELETE_RETENTION_DAYS). + Type: Number + Default: 30 + MinValue: 1 + + SyncbotFederationEnabled: + Description: Set to "true" to enable external connections / federation (SYNCBOT_FEDERATION_ENABLED). + Type: String + Default: "false" + AllowedValues: + - "true" + - "false" + + SyncbotInstanceId: + Description: > + Optional stable UUID for this instance (SYNCBOT_INSTANCE_ID). Leave empty to auto-generate at runtime. + Type: String + Default: "" + + SyncbotPublicUrl: + Description: > + Public HTTPS base URL without path (SYNCBOT_PUBLIC_URL). Required when + federation is enabled (SyncbotFederationEnabled=true); use the API Gateway + stage URL from stack outputs or a custom domain. Leave empty otherwise. + Type: String + Default: "" + + EnableDbReset: + Description: > + Slack Team ID to scope the Reset Database button (ENABLE_DB_RESET). Leave empty to disable. + Type: String + Default: "" + + DatabaseTlsEnabled: + Description: > + Optional DATABASE_TLS_ENABLED. Empty = use app default (TLS on outside local dev). + Set "true" or "false" to override. + Type: String + Default: "" + AllowedValues: + - "" + - "true" + - "false" + + DatabaseSslCaPath: + Description: > + Optional CA bundle path when DB TLS is on (DATABASE_SSL_CA_PATH). Empty = app default. + Type: String + Default: "" + # ================================================================ # Conditions # ================================================================ @@ -232,6 +283,10 @@ Conditions: HasExistingTokenEncryptionKeySecretArn: !Not [!Equals [!Ref ExistingTokenEncryptionKeySecretArn, ""]] HasAppDbPasswordOverride: !Not [!Equals [!Ref AppDbPasswordOverride, ""]] HasNoAppDbPasswordOverride: !Not [!Condition HasAppDbPasswordOverride] + HasSyncbotPublicUrlOverride: !Not [!Equals [!Ref SyncbotPublicUrl, ""]] + HasEnableDbReset: !Not [!Equals [!Ref EnableDbReset, ""]] + HasDatabaseTlsExplicit: !Not [!Equals [!Ref DatabaseTlsEnabled, ""]] + HasDatabaseSslCaPath: !Not [!Equals [!Ref DatabaseSslCaPath, ""]] CreateTokenEncryptionKeySecret: !And - !Not [!Condition HasTokenEncryptionKeyOverride] - !Not [!Condition HasExistingTokenEncryptionKeySecretArn] @@ -654,6 +709,22 @@ Resources: - !Sub "{{resolve:secretsmanager:${TokenEncryptionKeySecret}:SecretString}}" REQUIRE_ADMIN: !Ref RequireAdmin LOG_LEVEL: !Ref LogLevel + SOFT_DELETE_RETENTION_DAYS: !Sub "${SoftDeleteRetentionDays}" + SYNCBOT_FEDERATION_ENABLED: !Ref SyncbotFederationEnabled + SYNCBOT_INSTANCE_ID: !Ref SyncbotInstanceId + SYNCBOT_PUBLIC_URL: !Ref SyncbotPublicUrl + ENABLE_DB_RESET: !If + - HasEnableDbReset + - !Ref EnableDbReset + - "" + DATABASE_TLS_ENABLED: !If + - HasDatabaseTlsExplicit + - !Ref DatabaseTlsEnabled + - !Ref AWS::NoValue + DATABASE_SSL_CA_PATH: !If + - HasDatabaseSslCaPath + - !Ref DatabaseSslCaPath + - !Ref AWS::NoValue # Slack Bolt (aws_lambda adapter) runs lazy listeners by invoking this function again # via lambda:InvokeFunction. The execution role must allow self-invoke. @@ -755,6 +826,13 @@ Resources: # ================================================================ Outputs: + SyncBotPublicBaseUrl: + Description: Public HTTPS base URL (SYNCBOT_PUBLIC_URL) for Slack and federation + Value: !If + - HasSyncbotPublicUrlOverride + - !Ref SyncbotPublicUrl + - !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod" + SyncBotApiUrl: Description: API Gateway endpoint URL Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/slack/events/" diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf index 0ec9316..105ec77 100644 --- a/infra/gcp/main.tf +++ b/infra/gcp/main.tf @@ -45,6 +45,29 @@ locals { ) db_schema = var.use_existing_database ? var.existing_db_schema : "syncbot" db_user = var.use_existing_database ? var.existing_db_user : "syncbot_app" + + # Non-secret Cloud Run env (see docs/INFRA_CONTRACT.md) + syncbot_public_url_effective = trimspace(var.syncbot_public_url_override) != "" ? trimspace(var.syncbot_public_url_override) : "" + runtime_plain_env = merge( + { + DATABASE_HOST = local.db_host + DATABASE_USER = local.db_user + DATABASE_SCHEMA = local.db_schema + DATABASE_BACKEND = var.database_backend + DATABASE_PORT = var.database_port + SLACK_USER_SCOPES = var.slack_user_scopes + LOG_LEVEL = var.log_level + REQUIRE_ADMIN = var.require_admin + SLACK_BOT_TOKEN = "123" + SOFT_DELETE_RETENTION_DAYS = tostring(var.soft_delete_retention_days) + SYNCBOT_FEDERATION_ENABLED = var.syncbot_federation_enabled ? "true" : "false" + }, + var.syncbot_instance_id != "" ? { SYNCBOT_INSTANCE_ID = var.syncbot_instance_id } : {}, + local.syncbot_public_url_effective != "" ? { SYNCBOT_PUBLIC_URL = trimsuffix(local.syncbot_public_url_effective, "/") } : {}, + trimspace(var.enable_db_reset) != "" ? { ENABLE_DB_RESET = var.enable_db_reset } : {}, + var.database_tls_enabled != "" ? { DATABASE_TLS_ENABLED = var.database_tls_enabled } : {}, + trimspace(var.database_ssl_ca_path) != "" ? { DATABASE_SSL_CA_PATH = var.database_ssl_ca_path } : {}, + ) } # --------------------------------------------------------------------------- @@ -257,26 +280,12 @@ resource "google_cloud_run_v2_service" "syncbot" { } } - env { - name = "DATABASE_HOST" - value = local.db_host - } - env { - name = "DATABASE_USER" - value = local.db_user - } - env { - name = "DATABASE_SCHEMA" - value = local.db_schema - } - # Runtime user OAuth scopes — must match slack-manifest.json and USER_SCOPES in slack_manifest_scopes.py - env { - name = "SLACK_USER_SCOPES" - value = var.slack_user_scopes - } - env { - name = "LOG_LEVEL" - value = var.log_level + dynamic "env" { + for_each = local.runtime_plain_env + content { + name = env.key + value = env.value + } } dynamic "env" { diff --git a/infra/gcp/scripts/deploy.sh b/infra/gcp/scripts/deploy.sh index 04b37ce..a1156d1 100755 --- a/infra/gcp/scripts/deploy.sh +++ b/infra/gcp/scripts/deploy.sh @@ -6,11 +6,9 @@ # Phases (main path): # 1) Prerequisites (terraform, gcloud, python3, curl) # 2) Project, region, stage; detect existing Cloud Run service -# 3) Database source: USE_EXISTING true = external DB only (skip Cloud SQL); false = Terraform-managed DB path -# 4) Container image var for Cloud Run -# 5) terraform init / plan / apply -# 6) Stage Slack manifest, optional Slack API configure -# 7) Deploy receipt, print-bootstrap-outputs, optional GitHub Actions vars +# 3) Deploy Tasks: multi-select menu (build/deploy, CI/CD, Slack API, backup secrets) +# 4) Configuration (if build/deploy): database, image, log level, terraform init/plan/apply +# 5) Post-tasks: Slack manifest/API, deploy receipt, print-bootstrap-outputs, GitHub Actions, DR secrets set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" @@ -21,6 +19,7 @@ SLACK_MANIFEST_GENERATED_PATH="" # shellcheck source=/dev/null source "$REPO_ROOT/deploy.sh" +echo "=== Prerequisites ===" prereqs_require_cmd terraform prereqs_hint_terraform prereqs_require_cmd gcloud prereqs_hint_gcloud prereqs_require_cmd python3 prereqs_hint_python3 @@ -531,6 +530,8 @@ if [[ -z "$PROJECT_ID" ]]; then fi REGION="$(prompt_line "GCP region" "${GCP_REGION:-us-central1}")" +echo +echo "=== Authentication ===" ensure_gcloud_authenticated ensure_gcloud_adc_authenticated gcloud config set project "$PROJECT_ID" >/dev/null 2>&1 || true @@ -553,6 +554,21 @@ if [[ -n "$EXISTING_SERVICE_URL" ]]; then fi echo +prompt_deploy_tasks_gcp + +if [[ "$TASK_BUILD_DEPLOY" != "true" ]]; then + if [[ "$TASK_CICD" == "true" || "$TASK_SLACK_API" == "true" || "$TASK_BACKUP_SECRETS" == "true" ]]; then + cd "$GCP_DIR" + if ! terraform output -raw service_url &>/dev/null; then + echo "Error: No Terraform outputs found in $GCP_DIR. Select task 1 (Build/Deploy) first." >&2 + exit 1 + fi + fi +fi + +if [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then +echo +echo "=== Configuration ===" echo "=== Database Source ===" # USE_EXISTING=true: point Terraform at an external DB only (use_existing_database); skip creating Cloud SQL. # USE_EXISTING_DEFAULT: y/n default for the prompt when redeploying without a managed instance for this stage. @@ -601,6 +617,8 @@ DETECTED_CLOUD_IMAGE="" if [[ -n "$EXISTING_SERVICE_URL" ]]; then DETECTED_CLOUD_IMAGE="$(cloud_run_image_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME")" fi +echo +echo "=== Container Image ===" CLOUD_IMAGE="$(prompt_line "cloud_run_image (required)" "$DETECTED_CLOUD_IMAGE")" if [[ -z "$CLOUD_IMAGE" ]]; then echo "Error: cloud_run_image is required. Build and push the SyncBot image first, then rerun." >&2 @@ -623,17 +641,79 @@ echo echo "=== Log Level ===" LOG_LEVEL="$(prompt_log_level "$LOG_LEVEL_DEFAULT")" +# Preserve optional runtime env on redeploy (Terraform defaults otherwise). +REQUIRE_ADMIN_DEFAULT="true" +SOFT_DELETE_DEFAULT="30" +SYNCBOT_PUBLIC_DEFAULT="" +SYNCBOT_FEDERATION_DEFAULT="false" +INSTANCE_ID_VAR="" +ENABLE_DB_RESET_VAR="" +DB_TLS_VAR="" +DB_SSL_CA_VAR="" +DB_BACKEND="mysql" +DB_PORT="3306" +if [[ -n "$EXISTING_SERVICE_URL" ]]; then + DETECTED_RA="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "REQUIRE_ADMIN")" + [[ -n "$DETECTED_RA" ]] && REQUIRE_ADMIN_DEFAULT="$DETECTED_RA" + DETECTED_SD="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "SOFT_DELETE_RETENTION_DAYS")" + if [[ "$DETECTED_SD" =~ ^[0-9]+$ ]]; then + SOFT_DELETE_DEFAULT="$DETECTED_SD" + fi + SYNCBOT_PUBLIC_DEFAULT="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "SYNCBOT_PUBLIC_URL")" + DETECTED_FED="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "SYNCBOT_FEDERATION_ENABLED")" + if [[ "$DETECTED_FED" == "true" ]]; then + SYNCBOT_FEDERATION_DEFAULT="true" + elif [[ "$DETECTED_FED" == "false" ]]; then + SYNCBOT_FEDERATION_DEFAULT="false" + fi + DETECTED_INSTANCE_ID="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "SYNCBOT_INSTANCE_ID")" + INSTANCE_ID_VAR="${DETECTED_INSTANCE_ID:-}" + DETECTED_ER="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "ENABLE_DB_RESET")" + ENABLE_DB_RESET_VAR="${DETECTED_ER:-}" + DETECTED_DB_TLS="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_TLS_ENABLED")" + DB_TLS_VAR="${DETECTED_DB_TLS:-}" + DETECTED_DB_SSL_CA="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_SSL_CA_PATH")" + DB_SSL_CA_VAR="${DETECTED_DB_SSL_CA:-}" + DETECTED_DB_BACKEND="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_BACKEND")" + [[ -n "$DETECTED_DB_BACKEND" ]] && DB_BACKEND="$DETECTED_DB_BACKEND" + DETECTED_DB_PORT="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_PORT")" + [[ -n "$DETECTED_DB_PORT" ]] && DB_PORT="$DETECTED_DB_PORT" +fi + +echo +echo "=== App Settings ===" +REQUIRE_ADMIN_DEFAULT="$(prompt_require_admin "$REQUIRE_ADMIN_DEFAULT")" +SOFT_DELETE_DEFAULT="$(prompt_soft_delete_retention_days "$SOFT_DELETE_DEFAULT")" +ENABLE_DB_RESET_VAR="$(prompt_enable_db_reset "$ENABLE_DB_RESET_VAR")" +SYNCBOT_FEDERATION_DEFAULT="$(prompt_federation_enabled "$SYNCBOT_FEDERATION_DEFAULT")" +if [[ "$SYNCBOT_FEDERATION_DEFAULT" == "true" ]]; then + INSTANCE_ID_VAR="$(prompt_instance_id "$INSTANCE_ID_VAR")" + SYNCBOT_PUBLIC_DEFAULT="$(prompt_public_url "$SYNCBOT_PUBLIC_DEFAULT")" +fi + echo echo "=== Terraform Init ===" echo "Running: terraform init" cd "$GCP_DIR" terraform init +# TF_VAR_* avoids shell parsing issues when the URL contains & or other metacharacters. +export TF_VAR_syncbot_public_url_override="$SYNCBOT_PUBLIC_DEFAULT" + VARS=( "-var=project_id=$PROJECT_ID" "-var=region=$REGION" "-var=stage=$STAGE" "-var=log_level=$LOG_LEVEL" + "-var=require_admin=$REQUIRE_ADMIN_DEFAULT" + "-var=soft_delete_retention_days=$SOFT_DELETE_DEFAULT" + "-var=syncbot_federation_enabled=$SYNCBOT_FEDERATION_DEFAULT" + "-var=syncbot_instance_id=${INSTANCE_ID_VAR:-}" + "-var=enable_db_reset=${ENABLE_DB_RESET_VAR:-}" + "-var=database_tls_enabled=${DB_TLS_VAR:-}" + "-var=database_ssl_ca_path=${DB_SSL_CA_VAR:-}" + "-var=database_backend=${DB_BACKEND:-mysql}" + "-var=database_port=${DB_PORT:-3306}" ) if [[ "$USE_EXISTING" == "true" ]]; then @@ -649,95 +729,123 @@ fi VARS+=("-var=cloud_run_image=$CLOUD_IMAGE") echo -echo "Log level: $LOG_LEVEL" +echo "Require admin: $REQUIRE_ADMIN_DEFAULT" +echo "Soft-delete days: $SOFT_DELETE_DEFAULT" +echo "Log level: $LOG_LEVEL" +if [[ -n "$ENABLE_DB_RESET_VAR" ]]; then + echo "DB reset (team): $ENABLE_DB_RESET_VAR" +else + echo "DB reset (team): (disabled)" +fi +if [[ "$SYNCBOT_FEDERATION_DEFAULT" == "true" ]]; then + echo "Federation: enabled" + [[ -n "$INSTANCE_ID_VAR" ]] && echo "Instance ID: $INSTANCE_ID_VAR" + [[ -n "$SYNCBOT_PUBLIC_DEFAULT" ]] && echo "Public URL: $SYNCBOT_PUBLIC_DEFAULT" +fi echo echo "=== Terraform Plan ===" -if ! prompt_yn "Run terraform plan?" "y"; then - echo "Skipped. Run manually from infra/gcp:" - echo " terraform plan ${VARS[*]}" - exit 0 -fi - terraform plan "${VARS[@]}" echo echo "=== Terraform Apply ===" -if ! prompt_yn "Apply changes (terraform apply)?" "y"; then - echo "Aborted." - exit 0 -fi - terraform apply -auto-approve "${VARS[@]}" echo echo "=== Apply Complete ===" SERVICE_URL="$(terraform output -raw service_url 2>/dev/null || true)" + +else + echo + echo "Skipping Build/Deploy (task 1 not selected)." + cd "$GCP_DIR" + SERVICE_URL="$(terraform output -raw service_url 2>/dev/null || true)" +fi + SYNCBOT_API_URL="" SYNCBOT_INSTALL_URL="" if [[ -n "$SERVICE_URL" ]]; then SYNCBOT_API_URL="${SERVICE_URL%/}/slack/events" SYNCBOT_INSTALL_URL="${SERVICE_URL%/}/slack/install" fi -generate_stage_slack_manifest "$STAGE" "$SYNCBOT_API_URL" "$SYNCBOT_INSTALL_URL" -if [[ -n "$SLACK_MANIFEST_GENERATED_PATH" ]]; then - if prompt_yn "Configure Slack app via Slack API now (create or update from generated manifest)?" "n"; then - slack_api_configure_from_manifest "$SLACK_MANIFEST_GENERATED_PATH" "$SYNCBOT_INSTALL_URL" - fi -fi -write_deploy_receipt \ - "gcp" \ - "$STAGE" \ - "$PROJECT_ID" \ - "$REGION" \ - "$SERVICE_URL" \ - "$SYNCBOT_INSTALL_URL" \ - "$SLACK_MANIFEST_GENERATED_PATH" - -echo "Next:" -echo " 1) Set Secret Manager values for Slack (see infra/gcp/README.md)." -echo " 2) Build and push container image; update cloud_run_image and re-apply when image changes." -echo " 3) Run: ./infra/gcp/scripts/print-bootstrap-outputs.sh" -bash "$SCRIPT_DIR/print-bootstrap-outputs.sh" || true - -if prompt_yn "Set up GitHub Actions configuration now?" "n"; then - configure_github_actions_gcp "$PROJECT_ID" "$REGION" "$GCP_DIR" "$STAGE" +echo +echo "=== Post-Deploy ===" +if [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then + echo "Deploy complete." fi -TOKEN_SECRET_NAME="$(terraform output -raw token_encryption_secret_name 2>/dev/null || true)" -TOKEN_SECRET_NAME="${TOKEN_SECRET_NAME##*/secrets/}" -DB_SECRET_NAME="$(cloud_run_secret_name "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_PASSWORD")" -TOKEN_SECRET_VALUE="" -DB_SECRET_VALUE="" -if [[ -n "$TOKEN_SECRET_NAME" ]]; then - TOKEN_SECRET_VALUE="$(secret_latest_value "$PROJECT_ID" "$TOKEN_SECRET_NAME")" +if [[ "$TASK_SLACK_API" == "true" || "$TASK_BUILD_DEPLOY" == "true" ]]; then + generate_stage_slack_manifest "$STAGE" "$SYNCBOT_API_URL" "$SYNCBOT_INSTALL_URL" fi -if [[ -n "$DB_SECRET_NAME" ]]; then - DB_SECRET_VALUE="$(secret_latest_value "$PROJECT_ID" "$DB_SECRET_NAME")" + +if [[ "$TASK_SLACK_API" == "true" ]] && [[ -n "${SLACK_MANIFEST_GENERATED_PATH:-}" ]]; then + slack_api_configure_from_manifest "$SLACK_MANIFEST_GENERATED_PATH" "$SYNCBOT_INSTALL_URL" fi -echo -echo "=== Backup Secrets (Disaster Recovery) ===" -# IMPORTANT: This deploy script must always print plaintext backup secrets at the end. -# Do not remove/redact this section; operators rely on it for DR copy-out immediately after deploy. -echo "Copy these values now and store them in your secure disaster-recovery vault." -if [[ -n "$TOKEN_SECRET_NAME" ]]; then - echo "- TOKEN_ENCRYPTION_KEY source: $TOKEN_SECRET_NAME" -else - echo "- TOKEN_ENCRYPTION_KEY source: " +if [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then + echo + echo "=== Deploy Receipt ===" + write_deploy_receipt \ + "gcp" \ + "$STAGE" \ + "$PROJECT_ID" \ + "$REGION" \ + "$SERVICE_URL" \ + "$SYNCBOT_INSTALL_URL" \ + "$SLACK_MANIFEST_GENERATED_PATH" + + echo "Next:" + echo " 1) Set Secret Manager values for Slack (see infra/gcp/README.md)." + echo " 2) Build and push container image; update cloud_run_image and re-apply when image changes." + echo " 3) Run: ./infra/gcp/scripts/print-bootstrap-outputs.sh" + bash "$SCRIPT_DIR/print-bootstrap-outputs.sh" || true fi -if [[ -n "$TOKEN_SECRET_VALUE" ]]; then - echo " TOKEN_ENCRYPTION_KEY: $TOKEN_SECRET_VALUE" -else - echo " TOKEN_ENCRYPTION_KEY: " + +if [[ "$TASK_CICD" == "true" ]]; then + configure_github_actions_gcp "$PROJECT_ID" "$REGION" "$GCP_DIR" "$STAGE" fi -if [[ -n "$DB_SECRET_NAME" ]]; then - echo "- DATABASE_PASSWORD source: $DB_SECRET_NAME" -else - echo "- DATABASE_PASSWORD source: " + +TOKEN_SECRET_NAME="" +DB_SECRET_NAME="" +TOKEN_SECRET_VALUE="" +DB_SECRET_VALUE="" +if [[ "$TASK_BUILD_DEPLOY" == "true" || "$TASK_BACKUP_SECRETS" == "true" ]]; then + cd "$GCP_DIR" + TOKEN_SECRET_NAME="$(terraform output -raw token_encryption_secret_name 2>/dev/null || true)" + TOKEN_SECRET_NAME="${TOKEN_SECRET_NAME##*/secrets/}" + DB_SECRET_NAME="$(cloud_run_secret_name "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_PASSWORD")" + if [[ -n "$TOKEN_SECRET_NAME" ]]; then + TOKEN_SECRET_VALUE="$(secret_latest_value "$PROJECT_ID" "$TOKEN_SECRET_NAME")" + fi + if [[ -n "$DB_SECRET_NAME" ]]; then + DB_SECRET_VALUE="$(secret_latest_value "$PROJECT_ID" "$DB_SECRET_NAME")" + fi fi -if [[ -n "$DB_SECRET_VALUE" ]]; then - echo " DATABASE_PASSWORD: $DB_SECRET_VALUE" -else - echo " DATABASE_PASSWORD: " + +if [[ "$TASK_BACKUP_SECRETS" == "true" ]]; then + echo + echo "=== Backup Secrets (Disaster Recovery) ===" + # IMPORTANT: When Backup Secrets is selected, print plaintext backup secrets here. + # Do not remove/redact this section; operators rely on it for DR copy-out. + echo "Copy these values now and store them in your secure disaster-recovery vault." + if [[ -n "$TOKEN_SECRET_NAME" ]]; then + echo "- TOKEN_ENCRYPTION_KEY source: $TOKEN_SECRET_NAME" + else + echo "- TOKEN_ENCRYPTION_KEY source: " + fi + if [[ -n "$TOKEN_SECRET_VALUE" ]]; then + echo " TOKEN_ENCRYPTION_KEY: $TOKEN_SECRET_VALUE" + else + echo " TOKEN_ENCRYPTION_KEY: " + fi + if [[ -n "$DB_SECRET_NAME" ]]; then + echo "- DATABASE_PASSWORD source: $DB_SECRET_NAME" + else + echo "- DATABASE_PASSWORD source: " + fi + if [[ -n "$DB_SECRET_VALUE" ]]; then + echo " DATABASE_PASSWORD: $DB_SECRET_VALUE" + else + echo " DATABASE_PASSWORD: " + fi fi diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf index 755a411..6ce01c1 100644 --- a/infra/gcp/variables.tf +++ b/infra/gcp/variables.tf @@ -166,3 +166,87 @@ variable "secret_db_password" { default = "syncbot-db-password" description = "Secret Manager secret ID for DATABASE_PASSWORD (used when use_existing_database = true or with Cloud SQL)" } + +# --------------------------------------------------------------------------- +# Runtime plain env (Cloud Run) — parity with infra/aws/template.yaml +# --------------------------------------------------------------------------- + +variable "database_backend" { + type = string + default = "mysql" + description = "DATABASE_BACKEND; Cloud SQL in this stack is MySQL 8." + + validation { + condition = contains(["mysql", "postgresql"], var.database_backend) + error_message = "database_backend must be mysql or postgresql." + } +} + +variable "database_port" { + type = string + default = "3306" + description = "DATABASE_PORT for MySQL (default 3306)." +} + +variable "require_admin" { + type = string + default = "true" + description = "REQUIRE_ADMIN: true or false." + + validation { + condition = contains(["true", "false"], var.require_admin) + error_message = "require_admin must be true or false." + } +} + +variable "soft_delete_retention_days" { + type = number + default = 30 + description = "SOFT_DELETE_RETENTION_DAYS (minimum 1)." + + validation { + condition = var.soft_delete_retention_days >= 1 + error_message = "soft_delete_retention_days must be at least 1." + } +} + +variable "syncbot_federation_enabled" { + type = bool + default = false + description = "SYNCBOT_FEDERATION_ENABLED (maps to string true/false in env)." +} + +variable "syncbot_instance_id" { + type = string + default = "" + description = "SYNCBOT_INSTANCE_ID; leave empty for app auto-generation." +} + +variable "syncbot_public_url_override" { + type = string + default = "" + description = "SYNCBOT_PUBLIC_URL (HTTPS base, no path). Set after first deploy if using federation; empty omits the env var." +} + +variable "enable_db_reset" { + type = string + default = "" + description = "ENABLE_DB_RESET: Slack Team ID to scope Reset Database; empty omits the env var." +} + +variable "database_tls_enabled" { + type = string + default = "" + description = "DATABASE_TLS_ENABLED; empty = app default (TLS on outside local dev)." + + validation { + condition = contains(["", "true", "false"], var.database_tls_enabled) + error_message = "database_tls_enabled must be empty, true, or false." + } +} + +variable "database_ssl_ca_path" { + type = string + default = "" + description = "DATABASE_SSL_CA_PATH when TLS is on; empty omits (app default CA path)." +} From baf0e38b90933910d2ff70a7808b3a886e1c748c Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 11:24:29 -0500 Subject: [PATCH 26/45] Fix for modal issues in Lambda. --- docs/DEPLOYMENT.md | 11 +++ syncbot/app.py | 40 +++++---- syncbot/handlers/channel_sync.py | 10 +++ syncbot/slack/deferred_ack_views.py | 18 ++++ tests/conftest.py | 10 ++- tests/test_app_main_response.py | 123 ++++++++++++++++++++++++++++ tests/test_app_registration.py | 41 ++++++++++ tests/test_channel_sync_handlers.py | 21 +++++ tests/test_routing_deferred_ack.py | 13 +++ 9 files changed, 269 insertions(+), 18 deletions(-) create mode 100644 syncbot/slack/deferred_ack_views.py create mode 100644 tests/test_app_main_response.py create mode 100644 tests/test_app_registration.py create mode 100644 tests/test_routing_deferred_ack.py diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 5014754..01e67a0 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -272,6 +272,17 @@ Schema lives under `syncbot/db/alembic/`. On startup the app runs **`alembic upg --- +## Post-deploy: Slack deferred modal flows (manual smoke test) + +After deploying a build that changes Slack listener wiring, verify **in the deployed workspace** (not only local dev) that modals using custom interaction responses still work. These flows rely on `view_submission` acks (`response_action`: `update`, `errors`, or `push`) being returned in the **first** Lambda response: + +1. **Sync Channel (publish)** — Open **Sync Channel**, choose sync mode, press **Next**; confirm step 2 (channel picker) appears. Submit with an invalid state to confirm field errors if applicable. +2. **Backup / Restore** — Open Backup/Restore; try restore validation (e.g. missing file) and, if possible, the integrity-warning confirmation path (`push`). +3. **Data migration** (if federation enabled) — Same style of checks for import validation and confirmation. +4. **Optional** — Trigger a Home tab action that opens a modal via **`views_open`** (uses `trigger_id`) after a cold start to spot-check latency. + +--- + ## Sharing infrastructure across apps (AWS) Reuse one RDS with **different `DatabaseSchema`** per app/environment; set **ExistingDatabaseHost** and distinct schemas. API Gateway and Lambda remain per stack. diff --git a/syncbot/app.py b/syncbot/app.py index 81da070..1620877 100644 --- a/syncbot/app.py +++ b/syncbot/app.py @@ -41,19 +41,9 @@ set_correlation_id, ) from routing import MAIN_MAPPER -from slack.actions import ( - CONFIG_BACKUP_RESTORE_SUBMIT, - CONFIG_DATA_MIGRATION_SUBMIT, - CONFIG_PUBLISH_CHANNEL_SUBMIT, - CONFIG_PUBLISH_MODE_SUBMIT, -) +from slack.deferred_ack_views import DEFERRED_ACK_VIEW_CALLBACK_IDS -_DEFERRED_ACK_VIEWS = frozenset({ - CONFIG_PUBLISH_MODE_SUBMIT, - CONFIG_PUBLISH_CHANNEL_SUBMIT, - CONFIG_BACKUP_RESTORE_SUBMIT, - CONFIG_DATA_MIGRATION_SUBMIT, -}) +_DEFERRED_ACK_VIEWS = DEFERRED_ACK_VIEW_CALLBACK_IDS """view_submission callback_ids whose handlers control their own ack response.""" _SENSITIVE_KEYS = frozenset({ @@ -126,9 +116,9 @@ def _lambda_federation_handler(event: dict) -> dict: def main_response(body: dict, logger, client, ack, context: dict) -> None: """Central dispatcher for every Slack request. - Acknowledges the request immediately (required by Slack's 3-second - timeout), then resolves the ``(request_type, request_id)`` pair to - a handler function via :data:`MAIN_MAPPER` and invokes it. + For most requests, acknowledges immediately (required by Slack's 3-second + timeout). Certain ``view_submission`` handlers defer the ack so they can + return ``response_action`` (see :data:`_DEFERRED_ACK_VIEWS`). A unique correlation ID is assigned to every incoming request and attached to all log entries emitted while processing it. @@ -170,6 +160,13 @@ def _tracked_ack(*args, **kwargs): if isinstance(result, dict): ack(**result) else: + _logger.warning( + "deferred_view_ack_fallback", + extra={ + "request_id": request_id, + "view_callback_id": safe_get(body, "view", "callback_id"), + }, + ) ack() emit_metric( "request_handled", @@ -179,6 +176,14 @@ def _tracked_ack(*args, **kwargs): ) except Exception: if defer_ack and not ack_called: + _logger.warning( + "deferred_view_ack_fallback", + extra={ + "request_id": request_id, + "view_callback_id": safe_get(body, "view", "callback_id"), + "reason": "exception", + }, + ) ack() emit_metric( "request_error", @@ -211,7 +216,10 @@ def _tracked_ack(*args, **kwargs): MATCH_ALL_PATTERN = re.compile(".*") app.event(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS) app.action(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS) -app.view(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS) +# View submissions must run synchronously so deferred ack (response_action: +# update / errors / push) is returned in the same HTTP response to Slack. +# Lazy listeners run after the default ack is sent, which breaks multi-step modals. +app.view(MATCH_ALL_PATTERN)(main_response) if __name__ == "__main__": diff --git a/syncbot/handlers/channel_sync.py b/syncbot/handlers/channel_sync.py index 536f41c..8d47d89 100644 --- a/syncbot/handlers/channel_sync.py +++ b/syncbot/handlers/channel_sync.py @@ -12,6 +12,7 @@ from builders._common import _format_channel_ref, _get_group_members from db import DbManager, schemas from handlers._common import ( + _extract_team_id, _get_authorized_workspace, _get_selected_conversation_or_option, _get_selected_option_value, @@ -196,6 +197,15 @@ def handle_publish_mode_submit( metadata = _parse_private_metadata(body) group_id = metadata.get("group_id") if not group_id: + raw_pm = helpers.safe_get(body, "view", "private_metadata") or "" + _logger.warning( + "publish_mode_submit: missing group_id in metadata", + extra={ + "team_id": _extract_team_id(body), + "workspace_id": metadata.get("workspace_id"), + "private_metadata_len": len(raw_pm) if isinstance(raw_pm, str) else None, + }, + ) return sync_mode = _get_selected_option_value(body, actions.CONFIG_PUBLISH_SYNC_MODE) or "group" diff --git a/syncbot/slack/deferred_ack_views.py b/syncbot/slack/deferred_ack_views.py new file mode 100644 index 0000000..3ffb144 --- /dev/null +++ b/syncbot/slack/deferred_ack_views.py @@ -0,0 +1,18 @@ +"""View submission callback IDs whose handlers control the Slack interaction HTTP ack. + +Kept separate from :mod:`app` so tests can import it without initializing the database. +""" + +from slack.actions import ( + CONFIG_BACKUP_RESTORE_SUBMIT, + CONFIG_DATA_MIGRATION_SUBMIT, + CONFIG_PUBLISH_CHANNEL_SUBMIT, + CONFIG_PUBLISH_MODE_SUBMIT, +) + +DEFERRED_ACK_VIEW_CALLBACK_IDS = frozenset({ + CONFIG_PUBLISH_MODE_SUBMIT, + CONFIG_PUBLISH_CHANNEL_SUBMIT, + CONFIG_BACKUP_RESTORE_SUBMIT, + CONFIG_DATA_MIGRATION_SUBMIT, +}) diff --git a/tests/conftest.py b/tests/conftest.py index 713fc4c..ac90161 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,5 +2,11 @@ import os -# Unit tests use MySQL-style env vars without a real server; keep mysql backend. -os.environ.setdefault("DATABASE_BACKEND", "mysql") +# In-memory SQLite so importing `app` (which calls initialize_database) works without MySQL. +os.environ.setdefault("DATABASE_BACKEND", "sqlite") +os.environ.setdefault("DATABASE_URL", "sqlite:///:memory:") +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") diff --git a/tests/test_app_main_response.py b/tests/test_app_main_response.py new file mode 100644 index 0000000..5662886 --- /dev/null +++ b/tests/test_app_main_response.py @@ -0,0 +1,123 @@ +"""Unit tests for syncbot.app.main_response ack semantics (deferred vs immediate).""" + +import os +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +import app as app_module # noqa: E402 +from slack import actions # noqa: E402 + + +def _body_view_submit(callback_id: str) -> dict: + return { + "type": "view_submission", + "team_id": "T001", + "view": {"callback_id": callback_id}, + } + + +class TestMainResponseDeferredAck: + """Deferred ack views must receive ack(**result) or context['ack'](...) in the same dispatch.""" + + def test_returns_dict_uses_ack_kwargs(self): + ack = MagicMock() + context: dict = {} + + def handler(b, c, log, ctx): + return { + "response_action": "errors", + "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: "bad"}, + } + + custom = {actions.CONFIG_BACKUP_RESTORE_SUBMIT: handler} + with ( + patch.object(app_module, "MAIN_MAPPER", {"view_submission": custom}), + patch.object(app_module, "emit_metric"), + ): + app_module.main_response( + _body_view_submit(actions.CONFIG_BACKUP_RESTORE_SUBMIT), + MagicMock(), + MagicMock(), + ack, + context, + ) + + ack.assert_called_once() + assert ack.call_args.kwargs["response_action"] == "errors" + assert "errors" in ack.call_args.kwargs + + def test_context_ack_called_skips_fallback(self): + ack = MagicMock() + context: dict = {} + + def handler(b, c, log, ctx): + ctx["ack"](response_action="update", view={"type": "modal", "callback_id": "x"}) + + custom = {actions.CONFIG_PUBLISH_MODE_SUBMIT: handler} + with ( + patch.object(app_module, "MAIN_MAPPER", {"view_submission": custom}), + patch.object(app_module, "emit_metric"), + ): + app_module.main_response( + _body_view_submit(actions.CONFIG_PUBLISH_MODE_SUBMIT), + MagicMock(), + MagicMock(), + ack, + context, + ) + + ack.assert_called_once_with(response_action="update", view={"type": "modal", "callback_id": "x"}) + + def test_no_ack_no_dict_logs_warning_and_calls_empty_ack(self): + ack = MagicMock() + context: dict = {} + + def handler(b, c, log, ctx): + return None + + custom = {actions.CONFIG_PUBLISH_MODE_SUBMIT: handler} + with ( + patch.object(app_module, "MAIN_MAPPER", {"view_submission": custom}), + patch.object(app_module, "emit_metric"), + patch.object(app_module, "_logger") as mock_log, + ): + app_module.main_response( + _body_view_submit(actions.CONFIG_PUBLISH_MODE_SUBMIT), + MagicMock(), + MagicMock(), + ack, + context, + ) + + warn_text = " ".join( + str(c.args[0]) if c.args else "" for c in mock_log.warning.call_args_list + ) + assert "deferred_view_ack_fallback" in warn_text + ack.assert_called_once_with() + + +class TestMainResponseImmediateAck: + """Non-deferred view_submission: ack() runs before handler.""" + + def test_non_deferred_ack_before_handler(self): + ack = MagicMock() + context: dict = {} + + def handler(b, c, log, ctx): + assert ack.call_count == 1 + return None + + cid = actions.CONFIG_NEW_SYNC_SUBMIT + custom = {cid: handler} + with ( + patch.object(app_module, "MAIN_MAPPER", {"view_submission": custom}), + patch.object(app_module, "emit_metric"), + ): + app_module.main_response(_body_view_submit(cid), MagicMock(), MagicMock(), ack, context) + + ack.assert_called_once_with() diff --git a/tests/test_app_registration.py b/tests/test_app_registration.py new file mode 100644 index 0000000..eecccc7 --- /dev/null +++ b/tests/test_app_registration.py @@ -0,0 +1,41 @@ +"""Guardrails: Slack Bolt listener wiring for deferred view submissions (sync app.view).""" + +from pathlib import Path + + +def test_app_py_registers_view_listener_synchronously(): + """app.view must call main_response directly so view_submission ack reaches Slack (not lazy).""" + root = Path(__file__).resolve().parents[1] + app_py = root / "syncbot" / "app.py" + text = app_py.read_text(encoding="utf-8") + assert "app.view(MATCH_ALL_PATTERN)(main_response)" in text + assert "app.event(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS)" in text + assert "app.action(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS)" in text + + +def test_bolt_view_listener_has_no_lazy_functions(): + """Bolt CustomListener for app.view should use main_response as ack only (no lazy split).""" + import app as app_module + + bolt_app = app_module.app + sync_main = [ + li + for li in bolt_app._listeners + if getattr(li.ack_function, "__name__", None) == "main_response" and len(li.lazy_functions) == 0 + ] + assert sync_main, "expected at least one listener with ack_function=main_response and empty lazy_functions" + + +def test_bolt_event_or_action_uses_lazy_main_response_in_prod_mode(): + """When not LOCAL_DEVELOPMENT, event/action listeners should defer work to lazy main_response.""" + import app as app_module + + if app_module.LOCAL_DEVELOPMENT: + return + bolt_app = app_module.app + lazy = [ + li + for li in bolt_app._listeners + if li.lazy_functions and any(getattr(f, "__name__", None) == "main_response" for f in li.lazy_functions) + ] + assert lazy, "expected lazy listeners with main_response when LOCAL_DEVELOPMENT is false" diff --git a/tests/test_channel_sync_handlers.py b/tests/test_channel_sync_handlers.py index 5021b40..323b9b5 100644 --- a/tests/test_channel_sync_handlers.py +++ b/tests/test_channel_sync_handlers.py @@ -12,10 +12,31 @@ from handlers.channel_sync import ( # noqa: E402 handle_publish_channel_submit, + handle_publish_mode_submit, handle_subscribe_channel_submit, ) +class TestPublishModeSubmit: + def test_missing_group_id_logs_warning(self): + client = MagicMock() + logger = MagicMock() + context = {"ack": MagicMock()} + workspace = SimpleNamespace(id=10) + body = {"view": {"team_id": "T1", "private_metadata": "{}"}} + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={}), + patch("handlers.channel_sync._logger.warning") as warn_log, + ): + handle_publish_mode_submit(body, client, logger, context) + + assert warn_log.call_args is not None + assert "publish_mode_submit: missing group_id in metadata" in warn_log.call_args.args[0] + context["ack"].assert_not_called() + + class TestPublishChannelSubmit: def test_missing_group_id_exits_early(self): client = MagicMock() diff --git a/tests/test_routing_deferred_ack.py b/tests/test_routing_deferred_ack.py new file mode 100644 index 0000000..1ac6a40 --- /dev/null +++ b/tests/test_routing_deferred_ack.py @@ -0,0 +1,13 @@ +"""Invariant: deferred-ack view callback IDs stay registered in VIEW_MAPPER.""" + +from routing import VIEW_MAPPER +from slack.deferred_ack_views import DEFERRED_ACK_VIEW_CALLBACK_IDS + + +def test_deferred_ack_views_are_routed(): + for callback_id in DEFERRED_ACK_VIEW_CALLBACK_IDS: + assert callback_id in VIEW_MAPPER, f"missing VIEW_MAPPER entry for deferred view {callback_id!r}" + + +def test_deferred_ack_set_is_nonempty(): + assert len(DEFERRED_ACK_VIEW_CALLBACK_IDS) >= 1 From 00c7d2d3747ff6ff7935a458426698cb2756ad0b Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 13:11:02 -0500 Subject: [PATCH 27/45] Redesigned Slack ack flow to work better in Lambda. --- syncbot/app.py | 129 +++++---- syncbot/constants.py | 4 +- syncbot/db/alembic/env.py | 7 +- syncbot/db/alembic/versions/001_baseline.py | 10 +- syncbot/handlers/__init__.py | 22 +- syncbot/handlers/channel_sync.py | 94 ++++--- syncbot/handlers/export_import.py | 288 +++++++++++++++----- syncbot/routing.py | 19 +- tests/test_app_main_response.py | 75 +++-- tests/test_app_registration.py | 23 +- tests/test_channel_sync_handlers.py | 48 ++-- tests/test_db.py | 5 +- tests/test_db_setup.py | 49 +++- tests/test_export_import_handlers.py | 8 +- tests/test_routing_deferred_ack.py | 20 +- 15 files changed, 511 insertions(+), 290 deletions(-) diff --git a/syncbot/app.py b/syncbot/app.py index 1620877..ecd80f3 100644 --- a/syncbot/app.py +++ b/syncbot/app.py @@ -4,8 +4,10 @@ local development (``python app.py`` starts a Bolt dev server on port 3000). All incoming Slack events, actions, view submissions, and slash commands are -funnelled through :func:`main_response`, which looks up the appropriate -handler in :data:`~utils.routing.MAIN_MAPPER` and dispatches the request. +dispatched through :func:`main_response`. In production (non-local), view +submissions first run :func:`view_ack` for the HTTP response, then :func:`main_response` +for the work phase (lazy). Handlers are looked up in :data:`routing.MAIN_MAPPER` +and :data:`routing.VIEW_ACK_MAPPER`. Federation API endpoints (``/api/federation/*``) handle cross-instance communication and are dispatched separately from Slack events. @@ -40,11 +42,7 @@ get_request_duration_ms, set_correlation_id, ) -from routing import MAIN_MAPPER -from slack.deferred_ack_views import DEFERRED_ACK_VIEW_CALLBACK_IDS - -_DEFERRED_ACK_VIEWS = DEFERRED_ACK_VIEW_CALLBACK_IDS -"""view_submission callback_ids whose handlers control their own ack response.""" +from routing import MAIN_MAPPER, VIEW_ACK_MAPPER, VIEW_MAPPER _SENSITIVE_KEYS = frozenset({ "token", "bot_token", "access_token", "shared_secret", @@ -113,12 +111,43 @@ def _lambda_federation_handler(event: dict) -> dict: _logger = logging.getLogger(__name__) +def view_ack(body: dict, logger, client, ack, context: dict) -> None: + """Production ack handler for ``view_submission``: fast response to Slack (3s budget). + + Deferred-ack views use :data:`~routing.VIEW_ACK_MAPPER`; all others get an empty ``ack()``. + """ + set_correlation_id() + request_type, request_id = get_request_type(body) + _logger.info( + "request_received", + extra={ + "request_type": request_type, + "request_id": request_id, + "team_id": safe_get(body, "team_id"), + "phase": "view_ack", + }, + ) + _logger.debug("request_body", extra={"body": json.dumps(_redact_sensitive(body))}) + + ack_handler = VIEW_ACK_MAPPER.get(request_id) + if ack_handler: + result = ack_handler(body, client, context) + if isinstance(result, dict): + ack(**result) + else: + ack() + else: + ack() + + def main_response(body: dict, logger, client, ack, context: dict) -> None: - """Central dispatcher for every Slack request. + """Central dispatcher for every Slack request (lazy work phase in production). + + In production, ``view_submission`` HTTP ack is sent by :func:`view_ack` first; + this function runs afterward and must not call ``ack()`` again for views. - For most requests, acknowledges immediately (required by Slack's 3-second - timeout). Certain ``view_submission`` handlers defer the ack so they can - return ``response_action`` (see :data:`_DEFERRED_ACK_VIEWS`). + In local development, view ack + work run in one invocation: deferred views + call the ack handler from :data:`~routing.VIEW_ACK_MAPPER`, then the work handler. A unique correlation ID is assigned to every incoming request and attached to all log entries emitted while processing it. @@ -126,19 +155,18 @@ def main_response(body: dict, logger, client, ack, context: dict) -> None: set_correlation_id() request_type, request_id = get_request_type(body) - # Most requests are acked immediately. Certain view_submission - # handlers need to control the ack themselves (e.g. to respond with - # response_action="update" for multi-step modals). For those, we - # defer the ack and expose it via context["ack"]. - defer_ack = request_type == "view_submission" and request_id in _DEFERRED_ACK_VIEWS - ack_called = False - - if defer_ack: - def _tracked_ack(*args, **kwargs): - nonlocal ack_called - ack_called = True - return ack(*args, **kwargs) - context["ack"] = _tracked_ack + if request_type == "view_submission": + if LOCAL_DEVELOPMENT: + ack_handler = VIEW_ACK_MAPPER.get(request_id) + if ack_handler: + result = ack_handler(body, client, context) + if isinstance(result, dict): + ack(**result) + else: + ack() + else: + ack() + # Production: ack already sent by view_ack else: ack() @@ -155,19 +183,7 @@ def _tracked_ack(*args, **kwargs): run_function = MAIN_MAPPER.get(request_type, {}).get(request_id) if run_function: try: - result = run_function(body, client, logger, context) - if defer_ack and not ack_called: - if isinstance(result, dict): - ack(**result) - else: - _logger.warning( - "deferred_view_ack_fallback", - extra={ - "request_id": request_id, - "view_callback_id": safe_get(body, "view", "callback_id"), - }, - ) - ack() + run_function(body, client, logger, context) emit_metric( "request_handled", duration_ms=round(get_request_duration_ms(), 1), @@ -175,16 +191,6 @@ def _tracked_ack(*args, **kwargs): request_id=request_id, ) except Exception: - if defer_ack and not ack_called: - _logger.warning( - "deferred_view_ack_fallback", - extra={ - "request_id": request_id, - "view_callback_id": safe_get(body, "view", "callback_id"), - "reason": "exception", - }, - ) - ack() emit_metric( "request_error", request_type=request_type, @@ -192,15 +198,18 @@ def _tracked_ack(*args, **kwargs): ) raise else: - if defer_ack and not ack_called: - ack() - _logger.error( - "no_handler", - extra={ - "request_type": request_type, - "request_id": request_id, - }, - ) + if not ( + request_type == "view_submission" + and request_id in VIEW_ACK_MAPPER + and request_id not in VIEW_MAPPER + ): + _logger.error( + "no_handler", + extra={ + "request_type": request_type, + "request_id": request_id, + }, + ) if LOCAL_DEVELOPMENT: @@ -216,10 +225,10 @@ def _tracked_ack(*args, **kwargs): MATCH_ALL_PATTERN = re.compile(".*") app.event(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS) app.action(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS) -# View submissions must run synchronously so deferred ack (response_action: -# update / errors / push) is returned in the same HTTP response to Slack. -# Lazy listeners run after the default ack is sent, which breaks multi-step modals. -app.view(MATCH_ALL_PATTERN)(main_response) +if LOCAL_DEVELOPMENT: + app.view(MATCH_ALL_PATTERN)(main_response) +else: + app.view(MATCH_ALL_PATTERN)(ack=view_ack, lazy=[main_response]) if __name__ == "__main__": diff --git a/syncbot/constants.py b/syncbot/constants.py index c7ec5e5..29394e9 100644 --- a/syncbot/constants.py +++ b/syncbot/constants.py @@ -174,9 +174,7 @@ def _encryption_active() -> bool: key = (os.environ.get(TOKEN_ENCRYPTION_KEY) or "").strip() if not key or len(key) < _TOKEN_ENCRYPTION_KEY_MIN_LEN: return False - if key.lower() in _TOKEN_ENCRYPTION_KEY_PLACEHOLDERS: - return False - return True + return key.lower() not in _TOKEN_ENCRYPTION_KEY_PLACEHOLDERS def validate_config() -> None: diff --git a/syncbot/db/alembic/env.py b/syncbot/db/alembic/env.py index 155e952..0ff5e25 100644 --- a/syncbot/db/alembic/env.py +++ b/syncbot/db/alembic/env.py @@ -21,12 +21,11 @@ except ImportError: pass -from logging.config import fileConfig +from logging.config import fileConfig # noqa: E402 -from alembic import context -from sqlalchemy import engine_from_config, pool +from alembic import context # noqa: E402 -from db import get_engine +from db import get_engine # noqa: E402 config = context.config if config.config_file_name is not None: diff --git a/syncbot/db/alembic/versions/001_baseline.py b/syncbot/db/alembic/versions/001_baseline.py index 5645140..eeec36e 100644 --- a/syncbot/db/alembic/versions/001_baseline.py +++ b/syncbot/db/alembic/versions/001_baseline.py @@ -5,17 +5,17 @@ Create Date: Baseline from ORM models + OAuth tables """ -from typing import Sequence, Union +from collections.abc import Sequence -from alembic import op import sqlalchemy as sa +from alembic import op from db.schemas import BaseClass revision: str = "001_baseline" -down_revision: Union[str, None] = None -branch_labels: Union[str, Sequence[str], None] = None -depends_on: Union[str, Sequence[str], None] = None +down_revision: str | None = None +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None def upgrade() -> None: diff --git a/syncbot/handlers/__init__.py b/syncbot/handlers/__init__.py index 76ca0f7..0bffebd 100644 --- a/syncbot/handlers/__init__.py +++ b/syncbot/handlers/__init__.py @@ -13,8 +13,9 @@ from handlers.channel_sync import ( handle_pause_sync, handle_publish_channel, - handle_publish_channel_submit, - handle_publish_mode_submit, + handle_publish_channel_submit_ack, + handle_publish_channel_submit_work, + handle_publish_mode_submit_ack, handle_resume_sync, handle_stop_sync, handle_stop_sync_confirm, @@ -26,11 +27,13 @@ handle_backup_download, handle_backup_restore, handle_backup_restore_proceed, - handle_backup_restore_submit, + handle_backup_restore_submit_ack, + handle_backup_restore_submit_work, handle_data_migration, handle_data_migration_export, handle_data_migration_proceed, - handle_data_migration_submit, + handle_data_migration_submit_ack, + handle_data_migration_submit_work, ) from handlers.federation_cmds import ( handle_enter_federation_code, @@ -92,11 +95,13 @@ "handle_backup_download", "handle_backup_restore", "handle_backup_restore_proceed", - "handle_backup_restore_submit", + "handle_backup_restore_submit_ack", + "handle_backup_restore_submit_work", "handle_data_migration", "handle_data_migration_proceed", "handle_data_migration_export", - "handle_data_migration_submit", + "handle_data_migration_submit_ack", + "handle_data_migration_submit_work", "handle_db_reset", "handle_db_reset_proceed", "handle_accept_group_invite", @@ -118,8 +123,9 @@ "handle_new_sync_submission", "handle_pause_sync", "handle_publish_channel", - "handle_publish_channel_submit", - "handle_publish_mode_submit", + "handle_publish_channel_submit_ack", + "handle_publish_channel_submit_work", + "handle_publish_mode_submit_ack", "handle_refresh_home", "handle_remove_federation_connection", "handle_remove_sync", diff --git a/syncbot/handlers/channel_sync.py b/syncbot/handlers/channel_sync.py index 8d47d89..fefa1f8 100644 --- a/syncbot/handlers/channel_sync.py +++ b/syncbot/handlers/channel_sync.py @@ -182,16 +182,15 @@ def handle_publish_channel( ) -def handle_publish_mode_submit( +def handle_publish_mode_submit_ack( body: dict, client: WebClient, - logger: Logger, context: dict, -) -> None: - """Handle step 1 submission: read the selected sync mode and show step 2.""" +) -> dict | None: + """Ack phase for step 1: read sync mode and return ``response_action=update`` for step 2.""" auth_result = _get_authorized_workspace(body, client, context, "publish_mode_submit") if not auth_result: - return + return None _, workspace_record = auth_result metadata = _parse_private_metadata(body) @@ -206,11 +205,10 @@ def handle_publish_mode_submit( "private_metadata_len": len(raw_pm) if isinstance(raw_pm, str) else None, }, ) - return + return None sync_mode = _get_selected_option_value(body, actions.CONFIG_PUBLISH_SYNC_MODE) or "group" - other_members = [] group_members = _get_group_members(group_id) other_members = [ member for member in group_members if member.workspace_id != workspace_record.id and member.workspace_id @@ -222,20 +220,69 @@ def handle_publish_mode_submit( submit_button_text="Publish", parent_metadata={"group_id": group_id, "sync_mode": sync_mode}, ) - ack_fn = context.get("ack") - if ack_fn: - ack_fn(response_action="update", view=updated_view) - else: - _logger.warning("handle_publish_mode_submit: no ack function in context") + return {"response_action": "update", "view": updated_view} + + +def handle_publish_channel_submit_ack( + body: dict, + client: WebClient, + context: dict, +) -> dict | None: + """Ack phase for publish: validate and close modal (errors) or empty ack (success).""" + auth_result = _get_authorized_workspace(body, client, context, "publish_channel_submit") + if not auth_result: + return None + _, workspace_record = auth_result + + metadata = _parse_private_metadata(body) + group_id = metadata.get("group_id") + + if not group_id: + _logger.warning("publish_channel_submit: missing group_id in metadata") + return None + + sync_mode = metadata.get("sync_mode", "group") + target_workspace_id = None + selected_target = _get_selected_option_value(body, actions.CONFIG_PUBLISH_DIRECT_TARGET) + if selected_target: + with contextlib.suppress(TypeError, ValueError): + target_workspace_id = int(selected_target) + + if sync_mode == "direct" and not target_workspace_id: + sync_mode = "group" + + channel_id = _get_selected_conversation_or_option(body, actions.CONFIG_PUBLISH_CHANNEL_SELECT) + + if not channel_id or channel_id == "__none__": + return { + "response_action": "errors", + "errors": {actions.CONFIG_PUBLISH_CHANNEL_SELECT: "Select a Channel to publish."}, + } + + existing = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + if existing: + return { + "response_action": "errors", + "errors": {actions.CONFIG_PUBLISH_CHANNEL_SELECT: "This Channel is already being synced."}, + } + return None -def handle_publish_channel_submit( + +def handle_publish_channel_submit_work( body: dict, client: WebClient, logger: Logger, context: dict, ) -> None: - """Create a Sync + SyncChannel for the publisher's channel, scoped to a group.""" + """Lazy work phase: create Sync + SyncChannel after modal closed.""" auth_result = _get_authorized_workspace(body, client, context, "publish_channel_submit") if not auth_result: return @@ -245,7 +292,6 @@ def handle_publish_channel_submit( group_id = metadata.get("group_id") if not group_id: - _logger.warning("publish_channel_submit: missing group_id in metadata") return sync_mode = metadata.get("sync_mode", "group") @@ -258,16 +304,9 @@ def handle_publish_channel_submit( if sync_mode == "direct" and not target_workspace_id: sync_mode = "group" - ack_fn = context.get("ack") - channel_id = _get_selected_conversation_or_option(body, actions.CONFIG_PUBLISH_CHANNEL_SELECT) if not channel_id or channel_id == "__none__": - if ack_fn: - ack_fn( - response_action="errors", - errors={actions.CONFIG_PUBLISH_CHANNEL_SELECT: "Select a Channel to publish."}, - ) return existing = DbManager.find_records( @@ -279,21 +318,13 @@ def handle_publish_channel_submit( ], ) if existing: - if ack_fn: - ack_fn( - response_action="errors", - errors={actions.CONFIG_PUBLISH_CHANNEL_SELECT: "This Channel is already being synced."}, - ) return - if ack_fn: - ack_fn() - try: conv_info = client.conversations_info(channel=channel_id) channel_name = helpers.safe_get(conv_info, "channel", "name") or channel_id except Exception as exc: - _logger.debug(f"handle_publish_channel_submit: conversations_info failed for {channel_id}: {exc}") + _logger.debug(f"handle_publish_channel_submit_work: conversations_info failed for {channel_id}: {exc}") channel_name = channel_id try: @@ -330,7 +361,6 @@ def handle_publish_channel_submit( except Exception as e: _logger.error(f"Failed to publish channel {channel_id}: {e}") - # Refresh Home for all admins in current workspace, then other group members builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) _refresh_group_member_homes(group_id, workspace_record.id, logger, context=context) diff --git a/syncbot/handlers/export_import.py b/syncbot/handlers/export_import.py index e31e8ed..2d8bf3d 100644 --- a/syncbot/handlers/export_import.py +++ b/syncbot/handlers/export_import.py @@ -17,6 +17,44 @@ _logger = logging.getLogger(__name__) +# Uploaded JSON (backup / migration) download limits — matches interaction-time budget. +_UPLOAD_DOWNLOAD_TIMEOUT = 10 +_MAX_IMPORT_BYTES = 50 * 1024 * 1024 # 50 MiB + + +def _download_uploaded_file(file_url: str, token: str) -> tuple[str | None, str | None]: + """Download a Slack-hosted uploaded file. Returns ``(utf8_text, None)`` or ``(None, error_message)``.""" + import urllib.error + import urllib.request + + req = urllib.request.Request(file_url, headers={"Authorization": f"Bearer {token}"}) + try: + with urllib.request.urlopen(req, timeout=_UPLOAD_DOWNLOAD_TIMEOUT) as resp: + chunks: list[bytes] = [] + total = 0 + while True: + chunk = resp.read(65536) + if not chunk: + break + total += len(chunk) + if total > _MAX_IMPORT_BYTES: + return None, "Uploaded file exceeds maximum size (50 MB)." + chunks.append(chunk) + raw = b"".join(chunks) + except urllib.error.HTTPError as e: + _logger.exception("upload download HTTP error: %s", e) + return None, "Failed to download the uploaded file." + except TimeoutError as e: + _logger.exception("upload download timed out: %s", e) + return None, "Failed to download the uploaded file." + except OSError as e: + _logger.exception("upload download failed: %s", e) + return None, "Failed to download the uploaded file." + try: + return raw.decode("utf-8"), None + except UnicodeDecodeError as e: + return None, f"Invalid encoding in uploaded file: {e}" + def _is_admin(client: WebClient, user_id: str, body: dict) -> bool: return helpers.is_user_authorized(client, user_id) @@ -140,13 +178,12 @@ def handle_backup_download( ) -def handle_backup_restore_submit( +def handle_backup_restore_submit_ack( body: dict, client: WebClient, - logger: Logger, context: dict, ) -> dict | None: - """Process restore submission. Returns response dict with errors or None to close.""" + """Ack phase: validate upload; return errors, push confirm modal, or ``None`` to close.""" user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) if not _is_admin(client, user_id, body): return None @@ -171,17 +208,11 @@ def handle_backup_restore_submit( "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: "Could not retrieve the uploaded file."}, } - try: - import urllib.request - - req = urllib.request.Request(file_url, headers={"Authorization": f"Bearer {client.token}"}) - with urllib.request.urlopen(req) as resp: - json_text = resp.read().decode("utf-8") - except Exception as e: - _logger.exception("backup_restore: failed to download uploaded file: %s", e) + json_text, dl_err = _download_uploaded_file(file_url, client.token) + if dl_err: return { "response_action": "errors", - "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: "Failed to download the uploaded file."}, + "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: dl_err}, } try: @@ -203,7 +234,6 @@ def handle_backup_restore_submit( hmac_ok = ei.verify_backup_hmac(data) key_ok = ei.verify_backup_encryption_key(data) - # If warnings needed, store payload in cache and show confirmation modal if not hmac_ok or not key_ok: from helpers._cache import _cache_set @@ -251,11 +281,53 @@ def handle_backup_restore_submit( }, } - context["ack"]() - _do_restore(data, client, user_id) return None +def handle_backup_restore_submit_work( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Lazy work phase: run restore after modal closed (happy path).""" + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return + + values = helpers.safe_get(body, "view", "state", "values") or {} + file_data = helpers.safe_get( + values, actions.CONFIG_BACKUP_RESTORE_JSON_INPUT, actions.CONFIG_BACKUP_RESTORE_JSON_INPUT + ) + files = file_data.get("files") if file_data else None + if not files: + return + + file_info = files[0] + file_url = file_info.get("url_private_download") or file_info.get("url_private") + if not file_url: + return + + json_text, dl_err = _download_uploaded_file(file_url, client.token) + if dl_err: + return + + try: + data = json.loads(json_text) + except json.JSONDecodeError: + return + + if data.get("version") != ei.BACKUP_VERSION: + return + + hmac_ok = ei.verify_backup_hmac(data) + key_ok = ei.verify_backup_encryption_key(data) + if not hmac_ok or not key_ok: + return + + _do_restore(data, client, user_id) + + def handle_backup_restore_proceed( body: dict, client: WebClient, @@ -393,19 +465,21 @@ def handle_data_migration_export( _logger.exception("data_migration_export failed: %s", e) -def handle_data_migration_submit( +def _data_migration_prepare( body: dict, client: WebClient, - logger: Logger, context: dict, -) -> dict | None: - """Process migration import submission.""" +) -> tuple[dict | None, dict | None, int | None, dict[str, int] | None, object | None]: + """Shared validation for migration ack/work. + + Returns ``(error_ack_dict, data, group_id, team_id_to_workspace_id, workspace_record)``. + """ if not constants.FEDERATION_ENABLED: - return None + return None, None, None, None, None user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) team_id = helpers.safe_get(body, "view", "team_id") or helpers.safe_get(body, "team_id") if not _is_admin(client, user_id, body): - return None + return None, None, None, None, None values = helpers.safe_get(body, "view", "state", "values") or {} file_data = helpers.safe_get( @@ -414,73 +488,107 @@ def handle_data_migration_submit( files = file_data.get("files") if file_data else None if not files: - return { - "response_action": "errors", - "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Upload a migration JSON file to import."}, - } + return ( + { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Upload a migration JSON file to import."}, + }, + None, + None, + None, + None, + ) file_info = files[0] file_url = file_info.get("url_private_download") or file_info.get("url_private") if not file_url: - return { - "response_action": "errors", - "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Could not retrieve the uploaded file."}, - } - - try: - import urllib.request + return ( + { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Could not retrieve the uploaded file."}, + }, + None, + None, + None, + None, + ) - req = urllib.request.Request(file_url, headers={"Authorization": f"Bearer {client.token}"}) - with urllib.request.urlopen(req) as resp: - json_text = resp.read().decode("utf-8") - except Exception as e: - _logger.exception("data_migration_submit: failed to download uploaded file: %s", e) - return { - "response_action": "errors", - "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Failed to download the uploaded file."}, - } + json_text, dl_err = _download_uploaded_file(file_url, client.token) + if dl_err: + return ( + { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: dl_err}, + }, + None, + None, + None, + None, + ) try: data = json.loads(json_text) except json.JSONDecodeError as e: - return { - "response_action": "errors", - "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: f"Invalid JSON in uploaded file: {e}"}, - } + return ( + { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: f"Invalid JSON in uploaded file: {e}"}, + }, + None, + None, + None, + None, + ) if data.get("version") != ei.MIGRATION_VERSION: - return { - "response_action": "errors", - "errors": { - actions.CONFIG_DATA_MIGRATION_JSON_INPUT: f"Unsupported migration version (expected {ei.MIGRATION_VERSION})." + return ( + { + "response_action": "errors", + "errors": { + actions.CONFIG_DATA_MIGRATION_JSON_INPUT: f"Unsupported migration version (expected {ei.MIGRATION_VERSION})." + }, }, - } + None, + None, + None, + None, + ) workspace_payload = data.get("workspace", {}) export_team_id = workspace_payload.get("team_id") if not export_team_id: - return { - "response_action": "errors", - "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Migration file missing workspace.team_id."}, - } + return ( + { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Migration file missing workspace.team_id."}, + }, + None, + None, + None, + None, + ) workspace_record = helpers.get_workspace_record(team_id, body, context, client) if not workspace_record or workspace_record.team_id != export_team_id: - return { - "response_action": "errors", - "errors": { - actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "This migration file is for a different workspace. Open the app from the workspace that matches the migration file." + return ( + { + "response_action": "errors", + "errors": { + actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "This migration file is for a different workspace. Open the app from the workspace that matches the migration file." + }, }, - } + None, + None, + None, + None, + ) - # Build team_id -> workspace_id on B team_id_to_workspace_id = {workspace_record.team_id: workspace_record.id} workspaces_b = DbManager.find_records(schemas.Workspace, [schemas.Workspace.deleted_at.is_(None)]) for w in workspaces_b: if w.team_id: team_id_to_workspace_id[w.team_id] = w.id - # Optional: establish connection if source_instance present source = data.get("source_instance") if source and source.get("connection_code"): import secrets @@ -544,7 +652,6 @@ def handle_data_migration_submit( ) ) - # Resolve federated group (W + connection to source instance) my_groups = helpers.get_groups_for_workspace(workspace_record.id) my_group_ids = {g.id for g, _ in my_groups} fed_members = DbManager.find_records( @@ -558,16 +665,38 @@ def handle_data_migration_submit( candidate_groups = [fm.group_id for fm in fed_members if fm.group_id in my_group_ids] group_id = candidate_groups[0] if candidate_groups else None if not group_id: - return { - "response_action": "errors", - "errors": { - actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "No federation connection found. Connect to the other instance first (Enter Connection Code), then import." + return ( + { + "response_action": "errors", + "errors": { + actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "No federation connection found. Connect to the other instance first (Enter Connection Code), then import." + }, }, - } + None, + None, + None, + None, + ) + + return None, data, group_id, team_id_to_workspace_id, workspace_record + +def handle_data_migration_submit_ack( + body: dict, + client: WebClient, + context: dict, +) -> dict | None: + """Ack phase: validate; return errors, push confirm, or ``None`` to close before lazy import.""" + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + err, data, group_id, team_id_to_workspace_id, workspace_record = _data_migration_prepare(body, client, context) + if err is not None: + return err + if data is None or group_id is None or team_id_to_workspace_id is None or workspace_record is None: + return None + + source = data.get("source_instance") sig_ok = ei.verify_migration_signature(data) if not sig_ok and source: - # Store in cache and show confirmation modal (private_metadata size limit) from helpers._cache import _cache_set cache_key = f"migration_import_pending:{user_id}" @@ -611,7 +740,27 @@ def handle_data_migration_submit( }, } - context["ack"]() + return None + + +def handle_data_migration_submit_work( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Lazy work phase: import migration data after modal closed.""" + if not constants.FEDERATION_ENABLED: + return + err, data, group_id, team_id_to_workspace_id, workspace_record = _data_migration_prepare(body, client, context) + if err is not None or data is None or group_id is None or team_id_to_workspace_id is None or workspace_record is None: + return + + source = data.get("source_instance") + sig_ok = ei.verify_migration_signature(data) + if not sig_ok and source: + return + ei.import_migration_data( data, workspace_record.id, @@ -619,7 +768,6 @@ def handle_data_migration_submit( team_id_to_workspace_id=team_id_to_workspace_id, ) ei.invalidate_home_tab_caches_for_team(workspace_record.team_id) - return None def handle_data_migration_proceed( diff --git a/syncbot/routing.py b/syncbot/routing.py index 51d4ed6..de4f5db 100644 --- a/syncbot/routing.py +++ b/syncbot/routing.py @@ -6,6 +6,8 @@ the specific identifier (action ID, event type, or callback ID). :func:`~app.main_response` uses these tables to dispatch every request. +:data:`VIEW_ACK_MAPPER` lists view submission callback IDs handled by the fast ack +path in :mod:`app` (``view_ack``) before lazy work runs in :func:`~app.main_response`. """ import builders @@ -69,16 +71,23 @@ actions.CONFIG_JOIN_GROUP_SUBMIT: handlers.handle_join_group_submit, actions.CONFIG_INVITE_WORKSPACE_SUBMIT: handlers.handle_invite_workspace_submit, actions.CONFIG_LEAVE_GROUP_CONFIRM: handlers.handle_leave_group_confirm, - actions.CONFIG_PUBLISH_MODE_SUBMIT: handlers.handle_publish_mode_submit, - actions.CONFIG_PUBLISH_CHANNEL_SUBMIT: handlers.handle_publish_channel_submit, + actions.CONFIG_PUBLISH_CHANNEL_SUBMIT: handlers.handle_publish_channel_submit_work, actions.CONFIG_SUBSCRIBE_CHANNEL_SUBMIT: handlers.handle_subscribe_channel_submit, actions.CONFIG_STOP_SYNC_CONFIRM: handlers.handle_stop_sync_confirm, actions.CONFIG_FEDERATION_CODE_SUBMIT: handlers.handle_federation_code_submit, actions.CONFIG_FEDERATION_LABEL_SUBMIT: handlers.handle_federation_label_submit, - actions.CONFIG_BACKUP_RESTORE_SUBMIT: handlers.handle_backup_restore_submit, - actions.CONFIG_DATA_MIGRATION_SUBMIT: handlers.handle_data_migration_submit, + actions.CONFIG_BACKUP_RESTORE_SUBMIT: handlers.handle_backup_restore_submit_work, + actions.CONFIG_DATA_MIGRATION_SUBMIT: handlers.handle_data_migration_submit_work, } -"""View submission ``callback_id`` -> handler.""" +"""View submission ``callback_id`` -> lazy work handler (after HTTP ack).""" + +VIEW_ACK_MAPPER = { + actions.CONFIG_PUBLISH_MODE_SUBMIT: handlers.handle_publish_mode_submit_ack, + actions.CONFIG_PUBLISH_CHANNEL_SUBMIT: handlers.handle_publish_channel_submit_ack, + actions.CONFIG_BACKUP_RESTORE_SUBMIT: handlers.handle_backup_restore_submit_ack, + actions.CONFIG_DATA_MIGRATION_SUBMIT: handlers.handle_data_migration_submit_ack, +} +"""Deferred-ack view submissions: fast ack handler (``dict`` or ``None`` for Slack ``ack()``).""" MAIN_MAPPER = { "block_actions": ACTION_MAPPER, diff --git a/tests/test_app_main_response.py b/tests/test_app_main_response.py index 5662886..359379b 100644 --- a/tests/test_app_main_response.py +++ b/tests/test_app_main_response.py @@ -1,4 +1,4 @@ -"""Unit tests for syncbot.app.main_response ack semantics (deferred vs immediate).""" +"""Unit tests for syncbot.app.view_ack and main_response (ack + lazy work).""" import os from unittest.mock import MagicMock, patch @@ -21,25 +21,22 @@ def _body_view_submit(callback_id: str) -> dict: } -class TestMainResponseDeferredAck: - """Deferred ack views must receive ack(**result) or context['ack'](...) in the same dispatch.""" +class TestViewAck: + """Production ``view_ack``: deferred views get custom ack kwargs.""" def test_returns_dict_uses_ack_kwargs(self): ack = MagicMock() context: dict = {} - def handler(b, c, log, ctx): + def ack_handler(b, c, ctx): return { "response_action": "errors", "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: "bad"}, } - custom = {actions.CONFIG_BACKUP_RESTORE_SUBMIT: handler} - with ( - patch.object(app_module, "MAIN_MAPPER", {"view_submission": custom}), - patch.object(app_module, "emit_metric"), - ): - app_module.main_response( + custom = {actions.CONFIG_BACKUP_RESTORE_SUBMIT: ack_handler} + with patch.object(app_module, "VIEW_ACK_MAPPER", custom): + app_module.view_ack( _body_view_submit(actions.CONFIG_BACKUP_RESTORE_SUBMIT), MagicMock(), MagicMock(), @@ -51,19 +48,16 @@ def handler(b, c, log, ctx): assert ack.call_args.kwargs["response_action"] == "errors" assert "errors" in ack.call_args.kwargs - def test_context_ack_called_skips_fallback(self): + def test_returns_none_calls_empty_ack(self): ack = MagicMock() context: dict = {} - def handler(b, c, log, ctx): - ctx["ack"](response_action="update", view={"type": "modal", "callback_id": "x"}) + def ack_handler(b, c, ctx): + return None - custom = {actions.CONFIG_PUBLISH_MODE_SUBMIT: handler} - with ( - patch.object(app_module, "MAIN_MAPPER", {"view_submission": custom}), - patch.object(app_module, "emit_metric"), - ): - app_module.main_response( + custom = {actions.CONFIG_PUBLISH_MODE_SUBMIT: ack_handler} + with patch.object(app_module, "VIEW_ACK_MAPPER", custom): + app_module.view_ack( _body_view_submit(actions.CONFIG_PUBLISH_MODE_SUBMIT), MagicMock(), MagicMock(), @@ -71,45 +65,48 @@ def handler(b, c, log, ctx): context, ) - ack.assert_called_once_with(response_action="update", view={"type": "modal", "callback_id": "x"}) + ack.assert_called_once_with() + + def test_unknown_callback_calls_empty_ack(self): + ack = MagicMock() + context: dict = {} + with patch.object(app_module, "VIEW_ACK_MAPPER", {}): + app_module.view_ack(_body_view_submit("unknown_callback"), MagicMock(), MagicMock(), ack, context) + ack.assert_called_once_with() + + +class TestMainResponseLocalDevViewSubmission: + """With LOCAL_DEVELOPMENT, main_response runs ack + work in one call.""" - def test_no_ack_no_dict_logs_warning_and_calls_empty_ack(self): + @patch.object(app_module, "LOCAL_DEVELOPMENT", True) + def test_non_deferred_ack_before_handler(self): ack = MagicMock() context: dict = {} def handler(b, c, log, ctx): + assert ack.call_count == 1 return None - custom = {actions.CONFIG_PUBLISH_MODE_SUBMIT: handler} + cid = actions.CONFIG_NEW_SYNC_SUBMIT + custom = {cid: handler} with ( patch.object(app_module, "MAIN_MAPPER", {"view_submission": custom}), patch.object(app_module, "emit_metric"), - patch.object(app_module, "_logger") as mock_log, ): - app_module.main_response( - _body_view_submit(actions.CONFIG_PUBLISH_MODE_SUBMIT), - MagicMock(), - MagicMock(), - ack, - context, - ) + app_module.main_response(_body_view_submit(cid), MagicMock(), MagicMock(), ack, context) - warn_text = " ".join( - str(c.args[0]) if c.args else "" for c in mock_log.warning.call_args_list - ) - assert "deferred_view_ack_fallback" in warn_text ack.assert_called_once_with() -class TestMainResponseImmediateAck: - """Non-deferred view_submission: ack() runs before handler.""" +class TestMainResponseProdViewSubmission: + """Production main_response (lazy): does not call ack for view_submission.""" - def test_non_deferred_ack_before_handler(self): + @patch.object(app_module, "LOCAL_DEVELOPMENT", False) + def test_view_submission_skips_ack_in_main_response(self): ack = MagicMock() context: dict = {} def handler(b, c, log, ctx): - assert ack.call_count == 1 return None cid = actions.CONFIG_NEW_SYNC_SUBMIT @@ -120,4 +117,4 @@ def handler(b, c, log, ctx): ): app_module.main_response(_body_view_submit(cid), MagicMock(), MagicMock(), ack, context) - ack.assert_called_once_with() + ack.assert_not_called() diff --git a/tests/test_app_registration.py b/tests/test_app_registration.py index eecccc7..1d111cc 100644 --- a/tests/test_app_registration.py +++ b/tests/test_app_registration.py @@ -1,29 +1,34 @@ -"""Guardrails: Slack Bolt listener wiring for deferred view submissions (sync app.view).""" +"""Guardrails: Slack Bolt listener wiring for view ack + lazy main_response.""" from pathlib import Path -def test_app_py_registers_view_listener_synchronously(): - """app.view must call main_response directly so view_submission ack reaches Slack (not lazy).""" +def test_app_py_view_listener_has_ack_and_lazy_in_prod_branch(): + """Production registers view with view_ack + lazy main_response.""" root = Path(__file__).resolve().parents[1] app_py = root / "syncbot" / "app.py" text = app_py.read_text(encoding="utf-8") - assert "app.view(MATCH_ALL_PATTERN)(main_response)" in text + assert "ack=view_ack" in text + assert "lazy=[main_response]" in text assert "app.event(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS)" in text assert "app.action(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS)" in text -def test_bolt_view_listener_has_no_lazy_functions(): - """Bolt CustomListener for app.view should use main_response as ack only (no lazy split).""" +def test_bolt_view_listener_uses_view_ack_when_not_local_dev(): + """Bolt view listener should use view_ack as ack_function when not LOCAL_DEVELOPMENT.""" import app as app_module + if app_module.LOCAL_DEVELOPMENT: + return bolt_app = app_module.app - sync_main = [ + view_ack_listeners = [ li for li in bolt_app._listeners - if getattr(li.ack_function, "__name__", None) == "main_response" and len(li.lazy_functions) == 0 + if getattr(li.ack_function, "__name__", None) == "view_ack" + and li.lazy_functions + and any(getattr(f, "__name__", None) == "main_response" for f in li.lazy_functions) ] - assert sync_main, "expected at least one listener with ack_function=main_response and empty lazy_functions" + assert view_ack_listeners, "expected view listener with ack_function=view_ack and lazy main_response" def test_bolt_event_or_action_uses_lazy_main_response_in_prod_mode(): diff --git a/tests/test_channel_sync_handlers.py b/tests/test_channel_sync_handlers.py index 323b9b5..f70c2c6 100644 --- a/tests/test_channel_sync_handlers.py +++ b/tests/test_channel_sync_handlers.py @@ -11,17 +11,16 @@ os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") from handlers.channel_sync import ( # noqa: E402 - handle_publish_channel_submit, - handle_publish_mode_submit, + handle_publish_channel_submit_ack, + handle_publish_mode_submit_ack, handle_subscribe_channel_submit, ) -class TestPublishModeSubmit: +class TestPublishModeSubmitAck: def test_missing_group_id_logs_warning(self): client = MagicMock() - logger = MagicMock() - context = {"ack": MagicMock()} + context = {} workspace = SimpleNamespace(id=10) body = {"view": {"team_id": "T1", "private_metadata": "{}"}} @@ -30,18 +29,17 @@ def test_missing_group_id_logs_warning(self): patch("handlers.channel_sync._parse_private_metadata", return_value={}), patch("handlers.channel_sync._logger.warning") as warn_log, ): - handle_publish_mode_submit(body, client, logger, context) + result = handle_publish_mode_submit_ack(body, client, context) + assert result is None assert warn_log.call_args is not None assert "publish_mode_submit: missing group_id in metadata" in warn_log.call_args.args[0] - context["ack"].assert_not_called() -class TestPublishChannelSubmit: +class TestPublishChannelSubmitAck: def test_missing_group_id_exits_early(self): client = MagicMock() - logger = MagicMock() - context = {"ack": MagicMock()} + context = {} workspace = SimpleNamespace(id=10) with ( @@ -49,16 +47,14 @@ def test_missing_group_id_exits_early(self): patch("handlers.channel_sync._parse_private_metadata", return_value={}), patch("handlers.channel_sync.DbManager.create_record") as create_record, ): - handle_publish_channel_submit({}, client, logger, context) + result = handle_publish_channel_submit_ack({}, client, context) - context["ack"].assert_not_called() + assert result is None create_record.assert_not_called() def test_missing_channel_selection_returns_ack_error(self): client = MagicMock() - logger = MagicMock() - ack = MagicMock() - context = {"ack": ack} + context = {} workspace = SimpleNamespace(id=10) with ( @@ -67,19 +63,16 @@ def test_missing_channel_selection_returns_ack_error(self): patch("handlers.channel_sync._get_selected_conversation_or_option", return_value="__none__"), patch("handlers.channel_sync.DbManager.create_record") as create_record, ): - handle_publish_channel_submit({}, client, logger, context) + result = handle_publish_channel_submit_ack({}, client, context) - ack.assert_called_once() - kwargs = ack.call_args.kwargs - assert kwargs["response_action"] == "errors" - assert "Select a Channel to publish." in kwargs["errors"].values() + assert result is not None + assert result["response_action"] == "errors" + assert "Select a Channel to publish." in result["errors"].values() create_record.assert_not_called() def test_existing_sync_channel_returns_ack_error(self): client = MagicMock() - logger = MagicMock() - ack = MagicMock() - context = {"ack": ack} + context = {} workspace = SimpleNamespace(id=10) with ( @@ -89,12 +82,11 @@ def test_existing_sync_channel_returns_ack_error(self): patch("handlers.channel_sync.DbManager.find_records", return_value=[object()]), patch("handlers.channel_sync.DbManager.create_record") as create_record, ): - handle_publish_channel_submit({}, client, logger, context) + result = handle_publish_channel_submit_ack({}, client, context) - ack.assert_called_once() - kwargs = ack.call_args.kwargs - assert kwargs["response_action"] == "errors" - assert "already being synced" in next(iter(kwargs["errors"].values())) + assert result is not None + assert result["response_action"] == "errors" + assert "already being synced" in next(iter(result["errors"].values())) create_record.assert_not_called() diff --git a/tests/test_db.py b/tests/test_db.py index 3602fef..e7d09af 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -1,5 +1,6 @@ """Unit tests for ``syncbot/db`` connection pooling, retry logic, and backend parity (MySQL/SQLite).""" +import contextlib import os from unittest.mock import patch @@ -197,10 +198,8 @@ def test_sqlite_initialize_database_creates_tables(self, sqlite_url): db_mod.GLOBAL_ENGINE = old_engine db_mod.GLOBAL_SCHEMA = old_schema if "DATABASE_URL" in os.environ and "test_bootstrap" in os.environ["DATABASE_URL"]: - try: + with contextlib.suppress(Exception): (__import__("pathlib").Path("test_bootstrap.db")).unlink(missing_ok=True) - except Exception: - pass def test_get_required_db_vars_mysql_without_url(self): with patch.dict(os.environ, {"DATABASE_BACKEND": "mysql"}, clear=False): diff --git a/tests/test_db_setup.py b/tests/test_db_setup.py index e51b479..9bf794f 100644 --- a/tests/test_db_setup.py +++ b/tests/test_db_setup.py @@ -1,9 +1,29 @@ """Unit tests for infra/aws/db_setup/handler.py (MySQL vs PostgreSQL branches).""" +import importlib +import sys from unittest.mock import MagicMock, patch import pytest +# handler.py does ``import psycopg2`` at the top level. The package +# (psycopg2-binary) may not ship wheels for every Python version +# (e.g. 3.14). Stub the module so the import succeeds regardless. +if "psycopg2" not in sys.modules: + _pg_stub = MagicMock() + _pg_stub.sql = MagicMock() + sys.modules["psycopg2"] = _pg_stub + sys.modules["psycopg2.sql"] = _pg_stub.sql + + +def _fresh_handler(): + """(Re-)import handler so patches take effect.""" + if "handler" in sys.modules: + return importlib.reload(sys.modules["handler"]) + import handler + + return handler + @pytest.fixture def cfn_create_event(): @@ -26,14 +46,14 @@ def cfn_create_event(): def test_handler_calls_mysql_setup(cfn_create_event): + handler = _fresh_handler() with ( - patch("handler.send") as mock_send, - patch("handler.get_app_password", return_value="apppw"), - patch("handler.setup_database_mysql") as mock_mysql, - patch("handler.setup_database_postgresql") as mock_pg, + patch.object(handler, "send") as mock_send, + patch.object(handler, "get_secret_value", return_value="apppw"), + patch.object(handler, "_assert_tcp_reachable"), + patch.object(handler, "setup_database_mysql") as mock_mysql, + patch.object(handler, "setup_database_postgresql") as mock_pg, ): - import handler - handler._handler_impl(cfn_create_event, MagicMock()) mock_mysql.assert_called_once() mock_pg.assert_not_called() @@ -50,9 +70,8 @@ def test_handler_delete_uses_physical_resource_id(): "LogicalResourceId": "AppDbSetup", "PhysicalResourceId": "syncbot_test", } - with patch("handler.send") as mock_send: - import handler - + handler = _fresh_handler() + with patch.object(handler, "send") as mock_send: handler._handler_impl(delete_event, MagicMock()) mock_send.assert_called_once() assert mock_send.call_args[0][2] == "SUCCESS" @@ -61,14 +80,14 @@ def test_handler_delete_uses_physical_resource_id(): def test_handler_calls_postgresql_setup(cfn_create_event): cfn_create_event["ResourceProperties"]["DatabaseEngine"] = "postgresql" + handler = _fresh_handler() with ( - patch("handler.send") as mock_send, - patch("handler.get_app_password", return_value="apppw"), - patch("handler.setup_database_mysql") as mock_mysql, - patch("handler.setup_database_postgresql") as mock_pg, + patch.object(handler, "send") as mock_send, + patch.object(handler, "get_secret_value", return_value="apppw"), + patch.object(handler, "_assert_tcp_reachable"), + patch.object(handler, "setup_database_mysql") as mock_mysql, + patch.object(handler, "setup_database_postgresql") as mock_pg, ): - import handler - handler._handler_impl(cfn_create_event, MagicMock()) mock_pg.assert_called_once() mock_mysql.assert_not_called() diff --git a/tests/test_export_import_handlers.py b/tests/test_export_import_handlers.py index ac39edd..cd25d56 100644 --- a/tests/test_export_import_handlers.py +++ b/tests/test_export_import_handlers.py @@ -9,25 +9,23 @@ os.environ.setdefault("DATABASE_SCHEMA", "syncbot") os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") -from handlers.export_import import handle_backup_restore_submit # noqa: E402 +from handlers.export_import import handle_backup_restore_submit_ack # noqa: E402 from slack import actions # noqa: E402 class TestBackupRestoreSubmitValidation: def test_returns_error_when_file_missing(self): client = MagicMock() - logger = MagicMock() body = {"user": {"id": "U1"}, "view": {"state": {"values": {}}}} with patch("handlers.export_import._is_admin", return_value=True): - resp = handle_backup_restore_submit(body, client, logger, context={}) + resp = handle_backup_restore_submit_ack(body, client, context={}) assert resp["response_action"] == "errors" assert actions.CONFIG_BACKUP_RESTORE_JSON_INPUT in resp["errors"] def test_returns_error_when_uploaded_file_has_no_url(self): client = MagicMock() - logger = MagicMock() body = { "user": {"id": "U1"}, "view": { @@ -44,7 +42,7 @@ def test_returns_error_when_uploaded_file_has_no_url(self): } with patch("handlers.export_import._is_admin", return_value=True): - resp = handle_backup_restore_submit(body, client, logger, context={}) + resp = handle_backup_restore_submit_ack(body, client, context={}) assert resp["response_action"] == "errors" assert "Could not retrieve the uploaded file." in resp["errors"][actions.CONFIG_BACKUP_RESTORE_JSON_INPUT] diff --git a/tests/test_routing_deferred_ack.py b/tests/test_routing_deferred_ack.py index 1ac6a40..c024888 100644 --- a/tests/test_routing_deferred_ack.py +++ b/tests/test_routing_deferred_ack.py @@ -1,12 +1,24 @@ -"""Invariant: deferred-ack view callback IDs stay registered in VIEW_MAPPER.""" +"""Invariant: deferred-ack view callback IDs stay registered in VIEW_ACK_MAPPER / VIEW_MAPPER.""" -from routing import VIEW_MAPPER +from routing import VIEW_ACK_MAPPER, VIEW_MAPPER +from slack import actions from slack.deferred_ack_views import DEFERRED_ACK_VIEW_CALLBACK_IDS -def test_deferred_ack_views_are_routed(): +def test_deferred_ack_matches_view_ack_mapper(): + assert frozenset(VIEW_ACK_MAPPER.keys()) == DEFERRED_ACK_VIEW_CALLBACK_IDS + + +def test_publish_mode_is_ack_only_not_in_work_mapper(): + assert actions.CONFIG_PUBLISH_MODE_SUBMIT in VIEW_ACK_MAPPER + assert actions.CONFIG_PUBLISH_MODE_SUBMIT not in VIEW_MAPPER + + +def test_deferred_work_views_have_work_handlers(): for callback_id in DEFERRED_ACK_VIEW_CALLBACK_IDS: - assert callback_id in VIEW_MAPPER, f"missing VIEW_MAPPER entry for deferred view {callback_id!r}" + if callback_id == actions.CONFIG_PUBLISH_MODE_SUBMIT: + continue + assert callback_id in VIEW_MAPPER, f"missing VIEW_MAPPER work entry for {callback_id!r}" def test_deferred_ack_set_is_nonempty(): From 2b00cac476d95f63e1fef1481f2da2a9beb0713f Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 13:17:34 -0500 Subject: [PATCH 28/45] Poetry update for requests package. --- poetry.lock | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2b0a970..b3bc2e2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -790,25 +790,26 @@ cli = ["click (>=5.0)"] [[package]] name = "requests" -version = "2.32.5" +version = "2.33.0" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" groups = ["main"] files = [ - {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, - {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, + {file = "requests-2.33.0-py3-none-any.whl", hash = "sha256:3324635456fa185245e24865e810cecec7b4caf933d7eb133dcde67d48cee69b"}, + {file = "requests-2.33.0.tar.gz", hash = "sha256:c7ebc5e8b0f21837386ad0e1c8fe8b829fa5f544d8df3b2253bff14ef29d7652"}, ] [package.dependencies] -certifi = ">=2017.4.17" +certifi = ">=2023.5.7" charset_normalizer = ">=2,<4" idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" +urllib3 = ">=1.26,<3" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +test = ["PySocks (>=1.5.6,!=1.5.7)", "pytest (>=3)", "pytest-cov", "pytest-httpbin (==2.1.0)", "pytest-mock", "pytest-xdist"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<8)"] [[package]] name = "s3transfer" From 8d8f2c43cd2fc56dd56801989b5992dd7e0ddecf Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 13:28:05 -0500 Subject: [PATCH 29/45] Update to packages and GitHub Dependabot. --- .github/dependabot.yml | 5 ++++- .github/workflows/ci.yml | 20 ++++++++++++++++++++ README.md | 7 +++++++ pyproject.toml | 3 +++ syncbot/requirements.txt | 25 ++++++++++++++----------- 5 files changed, 48 insertions(+), 12 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b3283b5..e644f37 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,10 +1,13 @@ version: 2 updates: - package-ecosystem: "pip" - directory: "/" + directory: "/syncbot" schedule: interval: "weekly" open-pull-requests-limit: 10 + groups: + minor-and-patch: + update-types: ["minor", "patch"] - package-ecosystem: "pip" directory: "/infra/aws/db_setup" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 163b8c1..6e5b739 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,6 +11,26 @@ concurrency: cancel-in-progress: true jobs: + requirements-sync: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install Poetry and export plugin + run: | + python -m pip install --upgrade pip + pip install poetry + poetry self add poetry-plugin-export + - name: Check requirements.txt is in sync with poetry.lock + run: | + poetry export -f requirements.txt --without-hashes -o /tmp/expected.txt + diff -u syncbot/requirements.txt /tmp/expected.txt || { + echo "::error::syncbot/requirements.txt is out of sync with poetry.lock. Run: poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt (Poetry 2.x: poetry self add poetry-plugin-export first)" + exit 1 + } + sam-lint: runs-on: ubuntu-latest steps: diff --git a/README.md b/README.md index 4746e56..d4e443d 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,13 @@ App on port **3000**; restart the `app` service after code changes. **Needs:** Python 3.12+, Poetry. Run MySQL locally (e.g. `docker run ... mysql:8`) or SQLite. See `.env.example` and [INFRA_CONTRACT.md](docs/INFRA_CONTRACT.md). +After `poetry add` / `poetry update`, regenerate the pinned file used by the Docker image and `pip-audit` in CI so it matches `poetry.lock`: + +```bash +poetry self add poetry-plugin-export # Poetry 2.x; once per Poetry install +poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt +``` + --- ## Configuration reference diff --git a/pyproject.toml b/pyproject.toml index 273be2a..7ea7a32 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,6 +5,9 @@ description = "" authors = ["Klint Van Tassel ", "Evan Petzoldt "] readme = "README.md" +[tool.poetry.requires-plugins] +poetry-plugin-export = ">=1.8" + [tool.poetry.dependencies] python = "^3.12" alembic = "^1.13" diff --git a/syncbot/requirements.txt b/syncbot/requirements.txt index f40c23b..2d1d7a6 100644 --- a/syncbot/requirements.txt +++ b/syncbot/requirements.txt @@ -1,16 +1,19 @@ alembic==1.18.4 ; python_version >= "3.12" and python_version < "4.0" -certifi==2026.1.4 ; python_version >= "3.12" and python_version < "4.0" -cffi==2.0.0 ; python_version >= "3.12" and python_version < "4.0" -charset-normalizer==3.4.4 ; python_version >= "3.12" and python_version < "4.0" +certifi==2023.7.22 ; python_version >= "3.12" and python_version < "4.0" +cffi==2.0.0 ; python_version >= "3.12" and python_version < "4.0" and platform_python_implementation != "PyPy" +charset-normalizer==3.3.0 ; python_version >= "3.12" and python_version < "4.0" cryptography==46.0.5 ; python_version >= "3.12" and python_version < "4.0" -greenlet==3.1.1 ; python_version >= "3.12" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") and python_version < "4.0" -idna==3.11 ; python_version >= "3.12" and python_version < "4.0" -pycparser==2.23 ; python_version >= "3.12" and python_version < "4.0" +greenlet==3.0.0 ; python_version >= "3.12" and python_version < "4.0" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") +idna==3.4 ; python_version >= "3.12" and python_version < "4.0" +mako==1.3.10 ; python_version >= "3.12" and python_version < "4.0" +markupsafe==3.0.3 ; python_version >= "3.12" and python_version < "4.0" psycopg2-binary==2.9.11 ; python_version >= "3.12" and python_version < "4.0" +pycparser==2.21 ; python_version >= "3.12" and python_version < "4.0" and platform_python_implementation != "PyPy" and implementation_name != "PyPy" pymysql==1.1.2 ; python_version >= "3.12" and python_version < "4.0" -python-dotenv==1.2.1 ; python_version >= "3.12" and python_version < "4.0" -requests==2.32.5 ; python_version >= "3.12" and python_version < "4.0" +python-dotenv==1.2.2 ; python_version >= "3.12" and python_version < "4.0" +requests==2.33.0 ; python_version >= "3.12" and python_version < "4.0" slack-bolt==1.27.0 ; python_version >= "3.12" and python_version < "4.0" -slack-sdk==3.40.0 ; python_version >= "3.12" and python_version < "4.0" -sqlalchemy==2.0.38 ; python_version >= "3.12" and python_version < "4.0" -urllib3==2.6.3 ; python_version >= "3.12" and python_version < "4.0" +slack-sdk==3.40.1 ; python_version >= "3.12" and python_version < "4.0" +sqlalchemy==2.0.48 ; python_version >= "3.12" and python_version < "4.0" +typing-extensions==4.15.0 ; python_version >= "3.12" and python_version < "4.0" +urllib3==1.26.17 ; python_version >= "3.12" and python_version < "4.0" From d69b380c8c07c563bc683aee9b21157598f3f47d Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 13:36:42 -0500 Subject: [PATCH 30/45] Poetry updates. --- poetry.lock | 470 +++++++++++++++++++++------------------ syncbot/requirements.txt | 14 +- 2 files changed, 259 insertions(+), 225 deletions(-) diff --git a/poetry.lock b/poetry.lock index b3bc2e2..eb0573a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -22,54 +22,54 @@ tz = ["tzdata"] [[package]] name = "boto3" -version = "1.28.60" +version = "1.42.75" description = "The AWS SDK for Python" optional = false -python-versions = ">= 3.7" +python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "boto3-1.28.60-py3-none-any.whl", hash = "sha256:d5f270c2c9a051f78c308cbba4268458e8df441057b73ba140742707ac1bc7ea"}, - {file = "boto3-1.28.60.tar.gz", hash = "sha256:dccb49cc10b31314b8553c6c9614c44b2249e0d0285d73f608a5d2010f6e1d82"}, + {file = "boto3-1.42.75-py3-none-any.whl", hash = "sha256:16bc657d16403ee8e11c8b6920c245629e37a36ea60352b919da566f82b4cb4c"}, + {file = "boto3-1.42.75.tar.gz", hash = "sha256:3c7fd95a50c69271bd7707b7eda07dcfddb30e961a392613010f7ee81d91acb3"}, ] [package.dependencies] -botocore = ">=1.31.60,<1.32.0" +botocore = ">=1.42.75,<1.43.0" jmespath = ">=0.7.1,<2.0.0" -s3transfer = ">=0.7.0,<0.8.0" +s3transfer = ">=0.16.0,<0.17.0" [package.extras] crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.31.60" +version = "1.42.75" description = "Low-level, data-driven core of boto 3." optional = false -python-versions = ">= 3.7" +python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "botocore-1.31.60-py3-none-any.whl", hash = "sha256:b6de7a6a03ca3da18b78615a2cb5221c9fdb9483d3f50cb4281ae038b3f22d9f"}, - {file = "botocore-1.31.60.tar.gz", hash = "sha256:578470a15a5bd64f67437a81f23feccba85084167acf63c56acada2c1c1d95d8"}, + {file = "botocore-1.42.75-py3-none-any.whl", hash = "sha256:915e43b7ac8f50cf3dbc937ba713de5acb999ea48ad8fecd1589d92ad415f787"}, + {file = "botocore-1.42.75.tar.gz", hash = "sha256:95c8e716b6be903ee1601531caa4f50217400aa877c18fe9a2c3047d2945d477"}, ] [package.dependencies] jmespath = ">=0.7.1,<2.0.0" python-dateutil = ">=2.1,<3.0.0" -urllib3 = ">=1.25.4,<1.27" +urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""} [package.extras] -crt = ["awscrt (==0.16.26)"] +crt = ["awscrt (==0.31.2)"] [[package]] name = "certifi" -version = "2023.7.22" +version = "2026.2.25" description = "Python package for providing Mozilla's CA Bundle." optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" groups = ["main"] files = [ - {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, - {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, + {file = "certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa"}, + {file = "certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7"}, ] [[package]] @@ -172,102 +172,141 @@ pycparser = {version = "*", markers = "implementation_name != \"PyPy\""} [[package]] name = "charset-normalizer" -version = "3.3.0" +version = "3.4.6" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.7" groups = ["main"] files = [ - {file = "charset-normalizer-3.3.0.tar.gz", hash = "sha256:63563193aec44bce707e0c5ca64ff69fa72ed7cf34ce6e11d5127555756fd2f6"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:effe5406c9bd748a871dbcaf3ac69167c38d72db8c9baf3ff954c344f31c4cbe"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4162918ef3098851fcd8a628bf9b6a98d10c380725df9e04caf5ca6dd48c847a"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0570d21da019941634a531444364f2482e8db0b3425fcd5ac0c36565a64142c8"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5707a746c6083a3a74b46b3a631d78d129edab06195a92a8ece755aac25a3f3d"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:278c296c6f96fa686d74eb449ea1697f3c03dc28b75f873b65b5201806346a69"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a4b71f4d1765639372a3b32d2638197f5cd5221b19531f9245fcc9ee62d38f56"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5969baeaea61c97efa706b9b107dcba02784b1601c74ac84f2a532ea079403e"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3f93dab657839dfa61025056606600a11d0b696d79386f974e459a3fbc568ec"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:db756e48f9c5c607b5e33dd36b1d5872d0422e960145b08ab0ec7fd420e9d649"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:232ac332403e37e4a03d209a3f92ed9071f7d3dbda70e2a5e9cff1c4ba9f0678"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e5c1502d4ace69a179305abb3f0bb6141cbe4714bc9b31d427329a95acfc8bdd"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:2502dd2a736c879c0f0d3e2161e74d9907231e25d35794584b1ca5284e43f596"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23e8565ab7ff33218530bc817922fae827420f143479b753104ab801145b1d5b"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-win32.whl", hash = "sha256:1872d01ac8c618a8da634e232f24793883d6e456a66593135aeafe3784b0848d"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:557b21a44ceac6c6b9773bc65aa1b4cc3e248a5ad2f5b914b91579a32e22204d"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d7eff0f27edc5afa9e405f7165f85a6d782d308f3b6b9d96016c010597958e63"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6a685067d05e46641d5d1623d7c7fdf15a357546cbb2f71b0ebde91b175ffc3e"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d3d5b7db9ed8a2b11a774db2bbea7ba1884430a205dbd54a32d61d7c2a190fa"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2935ffc78db9645cb2086c2f8f4cfd23d9b73cc0dc80334bc30aac6f03f68f8c"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fe359b2e3a7729010060fbca442ca225280c16e923b37db0e955ac2a2b72a05"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:380c4bde80bce25c6e4f77b19386f5ec9db230df9f2f2ac1e5ad7af2caa70459"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0d1e3732768fecb052d90d62b220af62ead5748ac51ef61e7b32c266cac9293"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b2919306936ac6efb3aed1fbf81039f7087ddadb3160882a57ee2ff74fd2382"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f8888e31e3a85943743f8fc15e71536bda1c81d5aa36d014a3c0c44481d7db6e"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:82eb849f085624f6a607538ee7b83a6d8126df6d2f7d3b319cb837b289123078"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7b8b8bf1189b3ba9b8de5c8db4d541b406611a71a955bbbd7385bbc45fcb786c"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5adf257bd58c1b8632046bbe43ee38c04e1038e9d37de9c57a94d6bd6ce5da34"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c350354efb159b8767a6244c166f66e67506e06c8924ed74669b2c70bc8735b1"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-win32.whl", hash = "sha256:02af06682e3590ab952599fbadac535ede5d60d78848e555aa58d0c0abbde786"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:86d1f65ac145e2c9ed71d8ffb1905e9bba3a91ae29ba55b4c46ae6fc31d7c0d4"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:3b447982ad46348c02cb90d230b75ac34e9886273df3a93eec0539308a6296d7"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:abf0d9f45ea5fb95051c8bfe43cb40cda383772f7e5023a83cc481ca2604d74e"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b09719a17a2301178fac4470d54b1680b18a5048b481cb8890e1ef820cb80455"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3d9b48ee6e3967b7901c052b670c7dda6deb812c309439adaffdec55c6d7b78"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:edfe077ab09442d4ef3c52cb1f9dab89bff02f4524afc0acf2d46be17dc479f5"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3debd1150027933210c2fc321527c2299118aa929c2f5a0a80ab6953e3bd1908"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86f63face3a527284f7bb8a9d4f78988e3c06823f7bea2bd6f0e0e9298ca0403"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24817cb02cbef7cd499f7c9a2735286b4782bd47a5b3516a0e84c50eab44b98e"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c71f16da1ed8949774ef79f4a0260d28b83b3a50c6576f8f4f0288d109777989"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9cf3126b85822c4e53aa28c7ec9869b924d6fcfb76e77a45c44b83d91afd74f9"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:b3b2316b25644b23b54a6f6401074cebcecd1244c0b8e80111c9a3f1c8e83d65"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:03680bb39035fbcffe828eae9c3f8afc0428c91d38e7d61aa992ef7a59fb120e"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cc152c5dd831641e995764f9f0b6589519f6f5123258ccaca8c6d34572fefa8"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-win32.whl", hash = "sha256:b8f3307af845803fb0b060ab76cf6dd3a13adc15b6b451f54281d25911eb92df"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:8eaf82f0eccd1505cf39a45a6bd0a8cf1c70dcfc30dba338207a969d91b965c0"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dc45229747b67ffc441b3de2f3ae5e62877a282ea828a5bdb67883c4ee4a8810"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f4a0033ce9a76e391542c182f0d48d084855b5fcba5010f707c8e8c34663d77"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ada214c6fa40f8d800e575de6b91a40d0548139e5dc457d2ebb61470abf50186"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b1121de0e9d6e6ca08289583d7491e7fcb18a439305b34a30b20d8215922d43c"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1063da2c85b95f2d1a430f1c33b55c9c17ffaf5e612e10aeaad641c55a9e2b9d"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70f1d09c0d7748b73290b29219e854b3207aea922f839437870d8cc2168e31cc"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:250c9eb0f4600361dd80d46112213dff2286231d92d3e52af1e5a6083d10cad9"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:750b446b2ffce1739e8578576092179160f6d26bd5e23eb1789c4d64d5af7dc7"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:fc52b79d83a3fe3a360902d3f5d79073a993597d48114c29485e9431092905d8"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:588245972aca710b5b68802c8cad9edaa98589b1b42ad2b53accd6910dad3545"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e39c7eb31e3f5b1f88caff88bcff1b7f8334975b46f6ac6e9fc725d829bc35d4"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-win32.whl", hash = "sha256:abecce40dfebbfa6abf8e324e1860092eeca6f7375c8c4e655a8afb61af58f2c"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:24a91a981f185721542a0b7c92e9054b7ab4fea0508a795846bc5b0abf8118d4"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:67b8cc9574bb518ec76dc8e705d4c39ae78bb96237cb533edac149352c1f39fe"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac71b2977fb90c35d41c9453116e283fac47bb9096ad917b8819ca8b943abecd"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3ae38d325b512f63f8da31f826e6cb6c367336f95e418137286ba362925c877e"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:542da1178c1c6af8873e143910e2269add130a299c9106eef2594e15dae5e482"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30a85aed0b864ac88309b7d94be09f6046c834ef60762a8833b660139cfbad13"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aae32c93e0f64469f74ccc730a7cb21c7610af3a775157e50bbd38f816536b38"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b26ddf78d57f1d143bdf32e820fd8935d36abe8a25eb9ec0b5a71c82eb3895"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f5d10bae5d78e4551b7be7a9b29643a95aded9d0f602aa2ba584f0388e7a557"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:249c6470a2b60935bafd1d1d13cd613f8cd8388d53461c67397ee6a0f5dce741"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c5a74c359b2d47d26cdbbc7845e9662d6b08a1e915eb015d044729e92e7050b7"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b5bcf60a228acae568e9911f410f9d9e0d43197d030ae5799e20dca8df588287"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:187d18082694a29005ba2944c882344b6748d5be69e3a89bf3cc9d878e548d5a"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:81bf654678e575403736b85ba3a7867e31c2c30a69bc57fe88e3ace52fb17b89"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-win32.whl", hash = "sha256:85a32721ddde63c9df9ebb0d2045b9691d9750cb139c161c80e500d210f5e26e"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:468d2a840567b13a590e67dd276c570f8de00ed767ecc611994c301d0f8c014f"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e0fc42822278451bc13a2e8626cf2218ba570f27856b536e00cfa53099724828"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:09c77f964f351a7369cc343911e0df63e762e42bac24cd7d18525961c81754f4"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:12ebea541c44fdc88ccb794a13fe861cc5e35d64ed689513a5c03d05b53b7c82"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:805dfea4ca10411a5296bcc75638017215a93ffb584c9e344731eef0dcfb026a"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96c2b49eb6a72c0e4991d62406e365d87067ca14c1a729a870d22354e6f68115"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaf7b34c5bc56b38c931a54f7952f1ff0ae77a2e82496583b247f7c969eb1479"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:619d1c96099be5823db34fe89e2582b336b5b074a7f47f819d6b3a57ff7bdb86"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0ac5e7015a5920cfce654c06618ec40c33e12801711da6b4258af59a8eff00a"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:93aa7eef6ee71c629b51ef873991d6911b906d7312c6e8e99790c0f33c576f89"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7966951325782121e67c81299a031f4c115615e68046f79b85856b86ebffc4cd"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:02673e456dc5ab13659f85196c534dc596d4ef260e4d86e856c3b2773ce09843"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:c2af80fb58f0f24b3f3adcb9148e6203fa67dd3f61c4af146ecad033024dde43"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:153e7b6e724761741e0974fc4dcd406d35ba70b92bfe3fedcb497226c93b9da7"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-win32.whl", hash = "sha256:d47ecf253780c90ee181d4d871cd655a789da937454045b17b5798da9393901a"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:d97d85fa63f315a8bdaba2af9a6a686e0eceab77b3089af45133252618e70884"}, - {file = "charset_normalizer-3.3.0-py3-none-any.whl", hash = "sha256:e46cd37076971c1040fc8c41273a8b3e2c624ce4f2be3f5dfcb7a430c1d3acc2"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2e1d8ca8611099001949d1cdfaefc510cf0f212484fe7c565f735b68c78c3c95"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e25369dc110d58ddf29b949377a93e0716d72a24f62bad72b2b39f155949c1fd"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:259695e2ccc253feb2a016303543d691825e920917e31f894ca1a687982b1de4"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:dda86aba335c902b6149a02a55b38e96287157e609200811837678214ba2b1db"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:51fb3c322c81d20567019778cb5a4a6f2dc1c200b886bc0d636238e364848c89"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:4482481cb0572180b6fd976a4d5c72a30263e98564da68b86ec91f0fe35e8565"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:39f5068d35621da2881271e5c3205125cc456f54e9030d3f723288c873a71bf9"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8bea55c4eef25b0b19a0337dc4e3f9a15b00d569c77211fa8cde38684f234fb7"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:f0cdaecd4c953bfae0b6bb64910aaaca5a424ad9c72d85cb88417bb9814f7550"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:150b8ce8e830eb7ccb029ec9ca36022f756986aaaa7956aad6d9ec90089338c0"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:e68c14b04827dd76dcbd1aeea9e604e3e4b78322d8faf2f8132c7138efa340a8"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:3778fd7d7cd04ae8f54651f4a7a0bd6e39a0cf20f801720a4c21d80e9b7ad6b0"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:dad6e0f2e481fffdcf776d10ebee25e0ef89f16d691f1e5dee4b586375fdc64b"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-win32.whl", hash = "sha256:74a2e659c7ecbc73562e2a15e05039f1e22c75b7c7618b4b574a3ea9118d1557"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-win_amd64.whl", hash = "sha256:aa9cccf4a44b9b62d8ba8b4dd06c649ba683e4bf04eea606d2e94cfc2d6ff4d6"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-win_arm64.whl", hash = "sha256:e985a16ff513596f217cee86c21371b8cd011c0f6f056d0920aa2d926c544058"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:82060f995ab5003a2d6e0f4ad29065b7672b6593c8c63559beefe5b443242c3e"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:60c74963d8350241a79cb8feea80e54d518f72c26db618862a8f53e5023deaf9"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f6e4333fb15c83f7d1482a76d45a0818897b3d33f00efd215528ff7c51b8e35d"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:bc72863f4d9aba2e8fd9085e63548a324ba706d2ea2c83b260da08a59b9482de"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9cc4fc6c196d6a8b76629a70ddfcd4635a6898756e2d9cac5565cf0654605d73"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:0c173ce3a681f309f31b87125fecec7a5d1347261ea11ebbb856fa6006b23c8c"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c907cdc8109f6c619e6254212e794d6548373cc40e1ec75e6e3823d9135d29cc"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:404a1e552cf5b675a87f0651f8b79f5f1e6fd100ee88dc612f89aa16abd4486f"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:e3c701e954abf6fc03a49f7c579cc80c2c6cc52525340ca3186c41d3f33482ef"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7a6967aaf043bceabab5412ed6bd6bd26603dae84d5cb75bf8d9a74a4959d398"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:5feb91325bbceade6afab43eb3b508c63ee53579fe896c77137ded51c6b6958e"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f820f24b09e3e779fe84c3c456cb4108a7aa639b0d1f02c28046e11bfcd088ed"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b35b200d6a71b9839a46b9b7fff66b6638bb52fc9658aa58796b0326595d3021"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-win32.whl", hash = "sha256:9ca4c0b502ab399ef89248a2c84c54954f77a070f28e546a85e91da627d1301e"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-win_amd64.whl", hash = "sha256:a9e68c9d88823b274cf1e72f28cb5dc89c990edf430b0bfd3e2fb0785bfeabf4"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-win_arm64.whl", hash = "sha256:97d0235baafca5f2b09cf332cc275f021e694e8362c6bb9c96fc9a0eb74fc316"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ef7fedc7a6ecbe99969cd09632516738a97eeb8bd7258bf8a0f23114c057dab"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a4ea868bc28109052790eb2b52a9ab33f3aa7adc02f96673526ff47419490e21"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:836ab36280f21fc1a03c99cd05c6b7af70d2697e374c7af0b61ed271401a72a2"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f1ce721c8a7dfec21fcbdfe04e8f68174183cf4e8188e0645e92aa23985c57ff"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e28d62a8fc7a1fa411c43bd65e346f3bce9716dc51b897fbe930c5987b402d5"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:530d548084c4a9f7a16ed4a294d459b4f229db50df689bfe92027452452943a0"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:30f445ae60aad5e1f8bdbb3108e39f6fbc09f4ea16c815c66578878325f8f15a"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ac2393c73378fea4e52aa56285a3d64be50f1a12395afef9cce47772f60334c2"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:90ca27cd8da8118b18a52d5f547859cc1f8354a00cd1e8e5120df3e30d6279e5"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e5a94886bedca0f9b78fecd6afb6629142fd2605aa70a125d49f4edc6037ee6"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:695f5c2823691a25f17bc5d5ffe79fa90972cc34b002ac6c843bb8a1720e950d"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:231d4da14bcd9301310faf492051bee27df11f2bc7549bc0bb41fef11b82daa2"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a056d1ad2633548ca18ffa2f85c202cfb48b68615129143915b8dc72a806a923"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-win32.whl", hash = "sha256:c2274ca724536f173122f36c98ce188fd24ce3dad886ec2b7af859518ce008a4"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-win_amd64.whl", hash = "sha256:c8ae56368f8cc97c7e40a7ee18e1cedaf8e780cd8bc5ed5ac8b81f238614facb"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-win_arm64.whl", hash = "sha256:899d28f422116b08be5118ef350c292b36fc15ec2daeb9ea987c89281c7bb5c4"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:11afb56037cbc4b1555a34dd69151e8e069bee82e613a73bef6e714ce733585f"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:423fb7e748a08f854a08a222b983f4df1912b1daedce51a72bd24fe8f26a1843"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d73beaac5e90173ac3deb9928a74763a6d230f494e4bfb422c217a0ad8e629bf"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d60377dce4511655582e300dc1e5a5f24ba0cb229005a1d5c8d0cb72bb758ab8"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:530e8cebeea0d76bdcf93357aa5e41336f48c3dc709ac52da2bb167c5b8271d9"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:a26611d9987b230566f24a0a125f17fe0de6a6aff9f25c9f564aaa2721a5fb88"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:34315ff4fc374b285ad7f4a0bf7dcbfe769e1b104230d40f49f700d4ab6bbd84"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ddd609f9e1af8c7bd6e2aca279c931aefecd148a14402d4e368f3171769fd"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:80d0a5615143c0b3225e5e3ef22c8d5d51f3f72ce0ea6fb84c943546c7b25b6c"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:92734d4d8d187a354a556626c221cd1a892a4e0802ccb2af432a1d85ec012194"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:613f19aa6e082cf96e17e3ffd89383343d0d589abda756b7764cf78361fd41dc"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2b1a63e8224e401cafe7739f77efd3f9e7f5f2026bda4aead8e59afab537784f"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6cceb5473417d28edd20c6c984ab6fee6c6267d38d906823ebfe20b03d607dc2"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-win32.whl", hash = "sha256:d7de2637729c67d67cf87614b566626057e95c303bc0a55ffe391f5205e7003d"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-win_amd64.whl", hash = "sha256:572d7c822caf521f0525ba1bce1a622a0b85cf47ffbdae6c9c19e3b5ac3c4389"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-win_arm64.whl", hash = "sha256:a4474d924a47185a06411e0064b803c68be044be2d60e50e8bddcc2649957c1f"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:9cc6e6d9e571d2f863fa77700701dae73ed5f78881efc8b3f9a4398772ff53e8"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5960d965e67165d75b7c7ffc60a83ec5abfc5c11b764ec13ea54fbef8b4421"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b3694e3f87f8ac7ce279d4355645b3c878d24d1424581b46282f24b92f5a4ae2"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5d11595abf8dd942a77883a39d81433739b287b6aa71620f15164f8096221b30"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7bda6eebafd42133efdca535b04ccb338ab29467b3f7bf79569883676fc628db"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:bbc8c8650c6e51041ad1be191742b8b421d05bbd3410f43fa2a00c8db87678e8"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:22c6f0c2fbc31e76c3b8a86fba1a56eda6166e238c29cdd3d14befdb4a4e4815"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7edbed096e4a4798710ed6bc75dcaa2a21b68b6c356553ac4823c3658d53743a"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:7f9019c9cb613f084481bd6a100b12e1547cf2efe362d873c2e31e4035a6fa43"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:58c948d0d086229efc484fe2f30c2d382c86720f55cd9bc33591774348ad44e0"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:419a9d91bd238052642a51938af8ac05da5b3343becde08d5cdeab9046df9ee1"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5273b9f0b5835ff0350c0828faea623c68bfa65b792720c453e22b25cc72930f"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0e901eb1049fdb80f5bd11ed5ea1e498ec423102f7a9b9e4645d5b8204ff2815"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-win32.whl", hash = "sha256:b4ff1d35e8c5bd078be89349b6f3a845128e685e751b6ea1169cf2160b344c4d"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-win_amd64.whl", hash = "sha256:74119174722c4349af9708993118581686f343adc1c8c9c007d59be90d077f3f"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-win_arm64.whl", hash = "sha256:e5bcc1a1ae744e0bb59641171ae53743760130600da8db48cbb6e4918e186e4e"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ad8faf8df23f0378c6d527d8b0b15ea4a2e23c89376877c598c4870d1b2c7866"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f5ea69428fa1b49573eef0cc44a1d43bebd45ad0c611eb7d7eac760c7ae771bc"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:06a7e86163334edfc5d20fe104db92fcd666e5a5df0977cb5680a506fe26cc8e"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e1f6e2f00a6b8edb562826e4632e26d063ac10307e80f7461f7de3ad8ef3f077"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95b52c68d64c1878818687a473a10547b3292e82b6f6fe483808fb1468e2f52f"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:7504e9b7dc05f99a9bbb4525c67a2c155073b44d720470a148b34166a69c054e"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:172985e4ff804a7ad08eebec0a1640ece87ba5041d565fff23c8f99c1f389484"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:4be9f4830ba8741527693848403e2c457c16e499100963ec711b1c6f2049b7c7"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:79090741d842f564b1b2827c0b82d846405b744d31e84f18d7a7b41c20e473ff"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:87725cfb1a4f1f8c2fc9890ae2f42094120f4b44db9360be5d99a4c6b0e03a9e"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:fcce033e4021347d80ed9c66dcf1e7b1546319834b74445f561d2e2221de5659"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:ca0276464d148c72defa8bb4390cce01b4a0e425f3b50d1435aa6d7a18107602"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:197c1a244a274bb016dd8b79204850144ef77fe81c5b797dc389327adb552407"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-win32.whl", hash = "sha256:2a24157fa36980478dd1770b585c0f30d19e18f4fb0c47c13aa568f871718579"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-win_amd64.whl", hash = "sha256:cd5e2801c89992ed8c0a3f0293ae83c159a60d9a5d685005383ef4caca77f2c4"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-win_arm64.whl", hash = "sha256:47955475ac79cc504ef2704b192364e51d0d473ad452caedd0002605f780101c"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:659a1e1b500fac8f2779dd9e1570464e012f43e580371470b45277a27baa7532"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f61aa92e4aad0be58eb6eb4e0c21acf32cf8065f4b2cae5665da756c4ceef982"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f50498891691e0864dc3da965f340fada0771f6142a378083dc4608f4ea513e2"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:bf625105bb9eef28a56a943fec8c8a98aeb80e7d7db99bd3c388137e6eb2d237"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2bd9d128ef93637a5d7a6af25363cf5dec3fa21cf80e68055aad627f280e8afa"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux_2_31_armv7l.whl", hash = "sha256:d08ec48f0a1c48d75d0356cea971921848fb620fdeba805b28f937e90691209f"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1ed80ff870ca6de33f4d953fda4d55654b9a2b340ff39ab32fa3adbcd718f264"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f98059e4fcd3e3e4e2d632b7cf81c2faae96c43c60b569e9c621468082f1d104"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:ab30e5e3e706e3063bc6de96b118688cb10396b70bb9864a430f67df98c61ecc"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:d5f5d1e9def3405f60e3ca8232d56f35c98fb7bf581efcc60051ebf53cb8b611"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_riscv64.whl", hash = "sha256:461598cd852bfa5a61b09cae2b1c02e2efcd166ee5516e243d540ac24bfa68a7"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:71be7e0e01753a89cf024abf7ecb6bca2c81738ead80d43004d9b5e3f1244e64"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:df01808ee470038c3f8dc4f48620df7225c49c2d6639e38f96e6d6ac6e6f7b0e"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-win32.whl", hash = "sha256:69dd852c2f0ad631b8b60cfbe25a28c0058a894de5abb566619c205ce0550eae"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-win_amd64.whl", hash = "sha256:517ad0e93394ac532745129ceabdf2696b609ec9f87863d337140317ebce1c14"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:31215157227939b4fb3d740cd23fe27be0439afef67b785a1eb78a3ae69cba9e"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ecbbd45615a6885fe3240eb9db73b9e62518b611850fdf8ab08bd56de7ad2b17"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c45a03a4c69820a399f1dda9e1d8fbf3562eda46e7720458180302021b08f778"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e8aeb10fcbe92767f0fa69ad5a72deca50d0dca07fbde97848997d778a50c9fe"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:54fae94be3d75f3e573c9a1b5402dc593de19377013c9a0e4285e3d402dd3a2a"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux_2_31_armv7l.whl", hash = "sha256:2f7fdd9b6e6c529d6a2501a2d36b240109e78a8ceaef5687cfcfa2bbe671d297"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4d1d02209e06550bdaef34af58e041ad71b88e624f5d825519da3a3308e22687"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8bc5f0687d796c05b1e28ab0d38a50e6309906ee09375dd3aff6a9c09dd6e8f4"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:ee4ec14bc1680d6b0afab9aea2ef27e26d2024f18b24a2d7155a52b60da7e833"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d1a2ee9c1499fc8f86f4521f27a973c914b211ffa87322f4ee33bb35392da2c5"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:48696db7f18afb80a068821504296eb0787d9ce239b91ca15059d1d3eaacf13b"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4f41da960b196ea355357285ad1316a00099f22d0929fe168343b99b254729c9"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:802168e03fba8bbc5ce0d866d589e4b1ca751d06edee69f7f3a19c5a9fe6b597"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-win32.whl", hash = "sha256:8761ac29b6c81574724322a554605608a9960769ea83d2c73e396f3df896ad54"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-win_amd64.whl", hash = "sha256:1cf0a70018692f85172348fe06d3a4b63f94ecb055e13a00c644d368eb82e5b8"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-win_arm64.whl", hash = "sha256:3516bbb8d42169de9e61b8520cbeeeb716f12f4ecfe3fd30a9919aa16c806ca8"}, + {file = "charset_normalizer-3.4.6-py3-none-any.whl", hash = "sha256:947cf925bc916d90adba35a64c82aace04fa39b46b52d4630ece166655905a69"}, + {file = "charset_normalizer-3.4.6.tar.gz", hash = "sha256:1ae6b62897110aa7c79ea2f5dd38d1abca6db663687c0b1ad9aed6f6bae3d9d6"}, ] [[package]] @@ -357,93 +396,87 @@ test-randomorder = ["pytest-randomly"] [[package]] name = "greenlet" -version = "3.0.0" +version = "3.3.2" description = "Lightweight in-process concurrent programming" optional = false -python-versions = ">=3.7" +python-versions = ">=3.10" groups = ["main"] markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\"" files = [ - {file = "greenlet-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e09dea87cc91aea5500262993cbd484b41edf8af74f976719dd83fe724644cd6"}, - {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f47932c434a3c8d3c86d865443fadc1fbf574e9b11d6650b656e602b1797908a"}, - {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bdfaeecf8cc705d35d8e6de324bf58427d7eafb55f67050d8f28053a3d57118c"}, - {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a68d670c8f89ff65c82b936275369e532772eebc027c3be68c6b87ad05ca695"}, - {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ad562a104cd41e9d4644f46ea37167b93190c6d5e4048fcc4b80d34ecb278f"}, - {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02a807b2a58d5cdebb07050efe3d7deaf915468d112dfcf5e426d0564aa3aa4a"}, - {file = "greenlet-3.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b1660a15a446206c8545edc292ab5c48b91ff732f91b3d3b30d9a915d5ec4779"}, - {file = "greenlet-3.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:813720bd57e193391dfe26f4871186cf460848b83df7e23e6bef698a7624b4c9"}, - {file = "greenlet-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:aa15a2ec737cb609ed48902b45c5e4ff6044feb5dcdfcf6fa8482379190330d7"}, - {file = "greenlet-3.0.0-cp310-universal2-macosx_11_0_x86_64.whl", hash = "sha256:7709fd7bb02b31908dc8fd35bfd0a29fc24681d5cc9ac1d64ad07f8d2b7db62f"}, - {file = "greenlet-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:211ef8d174601b80e01436f4e6905aca341b15a566f35a10dd8d1e93f5dbb3b7"}, - {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6512592cc49b2c6d9b19fbaa0312124cd4c4c8a90d28473f86f92685cc5fef8e"}, - {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:871b0a8835f9e9d461b7fdaa1b57e3492dd45398e87324c047469ce2fc9f516c"}, - {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b505fcfc26f4148551826a96f7317e02c400665fa0883fe505d4fcaab1dabfdd"}, - {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:123910c58234a8d40eaab595bc56a5ae49bdd90122dde5bdc012c20595a94c14"}, - {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:96d9ea57292f636ec851a9bb961a5cc0f9976900e16e5d5647f19aa36ba6366b"}, - {file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0b72b802496cccbd9b31acea72b6f87e7771ccfd7f7927437d592e5c92ed703c"}, - {file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:527cd90ba3d8d7ae7dceb06fda619895768a46a1b4e423bdb24c1969823b8362"}, - {file = "greenlet-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:37f60b3a42d8b5499be910d1267b24355c495064f271cfe74bf28b17b099133c"}, - {file = "greenlet-3.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1482fba7fbed96ea7842b5a7fc11d61727e8be75a077e603e8ab49d24e234383"}, - {file = "greenlet-3.0.0-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:be557119bf467d37a8099d91fbf11b2de5eb1fd5fc5b91598407574848dc910f"}, - {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73b2f1922a39d5d59cc0e597987300df3396b148a9bd10b76a058a2f2772fc04"}, - {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1e22c22f7826096ad503e9bb681b05b8c1f5a8138469b255eb91f26a76634f2"}, - {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d363666acc21d2c204dd8705c0e0457d7b2ee7a76cb16ffc099d6799744ac99"}, - {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:334ef6ed8337bd0b58bb0ae4f7f2dcc84c9f116e474bb4ec250a8bb9bd797a66"}, - {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6672fdde0fd1a60b44fb1751a7779c6db487e42b0cc65e7caa6aa686874e79fb"}, - {file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:952256c2bc5b4ee8df8dfc54fc4de330970bf5d79253c863fb5e6761f00dda35"}, - {file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:269d06fa0f9624455ce08ae0179430eea61085e3cf6457f05982b37fd2cefe17"}, - {file = "greenlet-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9adbd8ecf097e34ada8efde9b6fec4dd2a903b1e98037adf72d12993a1c80b51"}, - {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6b5ce7f40f0e2f8b88c28e6691ca6806814157ff05e794cdd161be928550f4c"}, - {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecf94aa539e97a8411b5ea52fc6ccd8371be9550c4041011a091eb8b3ca1d810"}, - {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80dcd3c938cbcac986c5c92779db8e8ce51a89a849c135172c88ecbdc8c056b7"}, - {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e52a712c38e5fb4fd68e00dc3caf00b60cb65634d50e32281a9d6431b33b4af1"}, - {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5539f6da3418c3dc002739cb2bb8d169056aa66e0c83f6bacae0cd3ac26b423"}, - {file = "greenlet-3.0.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:343675e0da2f3c69d3fb1e894ba0a1acf58f481f3b9372ce1eb465ef93cf6fed"}, - {file = "greenlet-3.0.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:abe1ef3d780de56defd0c77c5ba95e152f4e4c4e12d7e11dd8447d338b85a625"}, - {file = "greenlet-3.0.0-cp37-cp37m-win32.whl", hash = "sha256:e693e759e172fa1c2c90d35dea4acbdd1d609b6936115d3739148d5e4cd11947"}, - {file = "greenlet-3.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:bdd696947cd695924aecb3870660b7545a19851f93b9d327ef8236bfc49be705"}, - {file = "greenlet-3.0.0-cp37-universal2-macosx_11_0_x86_64.whl", hash = "sha256:cc3e2679ea13b4de79bdc44b25a0c4fcd5e94e21b8f290791744ac42d34a0353"}, - {file = "greenlet-3.0.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:63acdc34c9cde42a6534518e32ce55c30f932b473c62c235a466469a710bfbf9"}, - {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a1a6244ff96343e9994e37e5b4839f09a0207d35ef6134dce5c20d260d0302c"}, - {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b822fab253ac0f330ee807e7485769e3ac85d5eef827ca224feaaefa462dc0d0"}, - {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8060b32d8586e912a7b7dac2d15b28dbbd63a174ab32f5bc6d107a1c4143f40b"}, - {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:621fcb346141ae08cb95424ebfc5b014361621b8132c48e538e34c3c93ac7365"}, - {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6bb36985f606a7c49916eff74ab99399cdfd09241c375d5a820bb855dfb4af9f"}, - {file = "greenlet-3.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:10b5582744abd9858947d163843d323d0b67be9432db50f8bf83031032bc218d"}, - {file = "greenlet-3.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f351479a6914fd81a55c8e68963609f792d9b067fb8a60a042c585a621e0de4f"}, - {file = "greenlet-3.0.0-cp38-cp38-win32.whl", hash = "sha256:9de687479faec7db5b198cc365bc34addd256b0028956501f4d4d5e9ca2e240a"}, - {file = "greenlet-3.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:3fd2b18432e7298fcbec3d39e1a0aa91ae9ea1c93356ec089421fabc3651572b"}, - {file = "greenlet-3.0.0-cp38-universal2-macosx_11_0_x86_64.whl", hash = "sha256:3c0d36f5adc6e6100aedbc976d7428a9f7194ea79911aa4bf471f44ee13a9464"}, - {file = "greenlet-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4cd83fb8d8e17633ad534d9ac93719ef8937568d730ef07ac3a98cb520fd93e4"}, - {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a5b2d4cdaf1c71057ff823a19d850ed5c6c2d3686cb71f73ae4d6382aaa7a06"}, - {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e7dcdfad252f2ca83c685b0fa9fba00e4d8f243b73839229d56ee3d9d219314"}, - {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c94e4e924d09b5a3e37b853fe5924a95eac058cb6f6fb437ebb588b7eda79870"}, - {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad6fb737e46b8bd63156b8f59ba6cdef46fe2b7db0c5804388a2d0519b8ddb99"}, - {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d55db1db455c59b46f794346efce896e754b8942817f46a1bada2d29446e305a"}, - {file = "greenlet-3.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:56867a3b3cf26dc8a0beecdb4459c59f4c47cdd5424618c08515f682e1d46692"}, - {file = "greenlet-3.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a812224a5fb17a538207e8cf8e86f517df2080c8ee0f8c1ed2bdaccd18f38f4"}, - {file = "greenlet-3.0.0-cp39-cp39-win32.whl", hash = "sha256:0d3f83ffb18dc57243e0151331e3c383b05e5b6c5029ac29f754745c800f8ed9"}, - {file = "greenlet-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:831d6f35037cf18ca5e80a737a27d822d87cd922521d18ed3dbc8a6967be50ce"}, - {file = "greenlet-3.0.0-cp39-universal2-macosx_11_0_x86_64.whl", hash = "sha256:a048293392d4e058298710a54dfaefcefdf49d287cd33fb1f7d63d55426e4355"}, - {file = "greenlet-3.0.0.tar.gz", hash = "sha256:19834e3f91f485442adc1ee440171ec5d9a4840a1f7bd5ed97833544719ce10b"}, + {file = "greenlet-3.3.2-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9bc885b89709d901859cf95179ec9f6bb67a3d2bb1f0e88456461bd4b7f8fd0d"}, + {file = "greenlet-3.3.2-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b568183cf65b94919be4438dc28416b234b678c608cafac8874dfeeb2a9bbe13"}, + {file = "greenlet-3.3.2-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:527fec58dc9f90efd594b9b700662ed3fb2493c2122067ac9c740d98080a620e"}, + {file = "greenlet-3.3.2-cp310-cp310-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:508c7f01f1791fbc8e011bd508f6794cb95397fdb198a46cb6635eb5b78d85a7"}, + {file = "greenlet-3.3.2-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ad0c8917dd42a819fe77e6bdfcb84e3379c0de956469301d9fd36427a1ca501f"}, + {file = "greenlet-3.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:97245cc10e5515dbc8c3104b2928f7f02b6813002770cfaffaf9a6e0fc2b94ef"}, + {file = "greenlet-3.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8c1fdd7d1b309ff0da81d60a9688a8bd044ac4e18b250320a96fc68d31c209ca"}, + {file = "greenlet-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:5d0e35379f93a6d0222de929a25ab47b5eb35b5ef4721c2b9cbcc4036129ff1f"}, + {file = "greenlet-3.3.2-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:c56692189a7d1c7606cb794be0a8381470d95c57ce5be03fb3d0ef57c7853b86"}, + {file = "greenlet-3.3.2-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ebd458fa8285960f382841da585e02201b53a5ec2bac6b156fc623b5ce4499f"}, + {file = "greenlet-3.3.2-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a443358b33c4ec7b05b79a7c8b466f5d275025e750298be7340f8fc63dff2a55"}, + {file = "greenlet-3.3.2-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4375a58e49522698d3e70cc0b801c19433021b5c37686f7ce9c65b0d5c8677d2"}, + {file = "greenlet-3.3.2-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8e2cd90d413acbf5e77ae41e5d3c9b3ac1d011a756d7284d7f3f2b806bbd6358"}, + {file = "greenlet-3.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:442b6057453c8cb29b4fb36a2ac689382fc71112273726e2423f7f17dc73bf99"}, + {file = "greenlet-3.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:45abe8eb6339518180d5a7fa47fa01945414d7cca5ecb745346fc6a87d2750be"}, + {file = "greenlet-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e692b2dae4cc7077cbb11b47d258533b48c8fde69a33d0d8a82e2fe8d8531d5"}, + {file = "greenlet-3.3.2-cp311-cp311-win_arm64.whl", hash = "sha256:02b0a8682aecd4d3c6c18edf52bc8e51eacdd75c8eac52a790a210b06aa295fd"}, + {file = "greenlet-3.3.2-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:ac8d61d4343b799d1e526db579833d72f23759c71e07181c2d2944e429eb09cd"}, + {file = "greenlet-3.3.2-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3ceec72030dae6ac0c8ed7591b96b70410a8be370b6a477b1dbc072856ad02bd"}, + {file = "greenlet-3.3.2-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2a5be83a45ce6188c045bcc44b0ee037d6a518978de9a5d97438548b953a1ac"}, + {file = "greenlet-3.3.2-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ae9e21c84035c490506c17002f5c8ab25f980205c3e61ddb3a2a2a2e6c411fcb"}, + {file = "greenlet-3.3.2-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43e99d1749147ac21dde49b99c9abffcbc1e2d55c67501465ef0930d6e78e070"}, + {file = "greenlet-3.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4c956a19350e2c37f2c48b336a3afb4bff120b36076d9d7fb68cb44e05d95b79"}, + {file = "greenlet-3.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6c6f8ba97d17a1e7d664151284cb3315fc5f8353e75221ed4324f84eb162b395"}, + {file = "greenlet-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:34308836d8370bddadb41f5a7ce96879b72e2fdfb4e87729330c6ab52376409f"}, + {file = "greenlet-3.3.2-cp312-cp312-win_arm64.whl", hash = "sha256:d3a62fa76a32b462a97198e4c9e99afb9ab375115e74e9a83ce180e7a496f643"}, + {file = "greenlet-3.3.2-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:aa6ac98bdfd716a749b84d4034486863fd81c3abde9aa3cf8eff9127981a4ae4"}, + {file = "greenlet-3.3.2-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ab0c7e7901a00bc0a7284907273dc165b32e0d109a6713babd04471327ff7986"}, + {file = "greenlet-3.3.2-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d248d8c23c67d2291ffd47af766e2a3aa9fa1c6703155c099feb11f526c63a92"}, + {file = "greenlet-3.3.2-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ccd21bb86944ca9be6d967cf7691e658e43417782bce90b5d2faeda0ff78a7dd"}, + {file = "greenlet-3.3.2-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b6997d360a4e6a4e936c0f9625b1c20416b8a0ea18a8e19cabbefc712e7397ab"}, + {file = "greenlet-3.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:64970c33a50551c7c50491671265d8954046cb6e8e2999aacdd60e439b70418a"}, + {file = "greenlet-3.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1a9172f5bf6bd88e6ba5a84e0a68afeac9dc7b6b412b245dd64f52d83c81e55b"}, + {file = "greenlet-3.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:a7945dd0eab63ded0a48e4dcade82939783c172290a7903ebde9e184333ca124"}, + {file = "greenlet-3.3.2-cp313-cp313-win_arm64.whl", hash = "sha256:394ead29063ee3515b4e775216cb756b2e3b4a7e55ae8fd884f17fa579e6b327"}, + {file = "greenlet-3.3.2-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:8d1658d7291f9859beed69a776c10822a0a799bc4bfe1bd4272bb60e62507dab"}, + {file = "greenlet-3.3.2-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:18cb1b7337bca281915b3c5d5ae19f4e76d35e1df80f4ad3c1a7be91fadf1082"}, + {file = "greenlet-3.3.2-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c2e47408e8ce1c6f1ceea0dffcdf6ebb85cc09e55c7af407c99f1112016e45e9"}, + {file = "greenlet-3.3.2-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e3cb43ce200f59483eb82949bf1835a99cf43d7571e900d7c8d5c62cdf25d2f9"}, + {file = "greenlet-3.3.2-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63d10328839d1973e5ba35e98cccbca71b232b14051fd957b6f8b6e8e80d0506"}, + {file = "greenlet-3.3.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8e4ab3cfb02993c8cc248ea73d7dae6cec0253e9afa311c9b37e603ca9fad2ce"}, + {file = "greenlet-3.3.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:94ad81f0fd3c0c0681a018a976e5c2bd2ca2d9d94895f23e7bb1af4e8af4e2d5"}, + {file = "greenlet-3.3.2-cp314-cp314-win_amd64.whl", hash = "sha256:8c4dd0f3997cf2512f7601563cc90dfb8957c0cff1e3a1b23991d4ea1776c492"}, + {file = "greenlet-3.3.2-cp314-cp314-win_arm64.whl", hash = "sha256:cd6f9e2bbd46321ba3bbb4c8a15794d32960e3b0ae2cc4d49a1a53d314805d71"}, + {file = "greenlet-3.3.2-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:e26e72bec7ab387ac80caa7496e0f908ff954f31065b0ffc1f8ecb1338b11b54"}, + {file = "greenlet-3.3.2-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b466dff7a4ffda6ca975979bab80bdadde979e29fc947ac3be4451428d8b0e4"}, + {file = "greenlet-3.3.2-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b8bddc5b73c9720bea487b3bffdb1840fe4e3656fba3bd40aa1489e9f37877ff"}, + {file = "greenlet-3.3.2-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:59b3e2c40f6706b05a9cd299c836c6aa2378cabe25d021acd80f13abf81181cf"}, + {file = "greenlet-3.3.2-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b26b0f4428b871a751968285a1ac9648944cea09807177ac639b030bddebcea4"}, + {file = "greenlet-3.3.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1fb39a11ee2e4d94be9a76671482be9398560955c9e568550de0224e41104727"}, + {file = "greenlet-3.3.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:20154044d9085151bc309e7689d6f7ba10027f8f5a8c0676ad398b951913d89e"}, + {file = "greenlet-3.3.2-cp314-cp314t-win_amd64.whl", hash = "sha256:c04c5e06ec3e022cbfe2cd4a846e1d4e50087444f875ff6d2c2ad8445495cf1a"}, + {file = "greenlet-3.3.2.tar.gz", hash = "sha256:2eaf067fc6d886931c7962e8c6bede15d2f01965560f3359b27c80bde2d151f2"}, ] [package.extras] -docs = ["Sphinx"] -test = ["objgraph", "psutil"] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil", "setuptools"] [[package]] name = "idna" -version = "3.4" +version = "3.11" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.8" groups = ["main"] files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, + {file = "idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea"}, + {file = "idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "iniconfig" version = "2.3.0" @@ -458,14 +491,14 @@ files = [ [[package]] name = "jmespath" -version = "1.0.1" +version = "1.1.0" description = "JSON Matching Expressions" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, - {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, + {file = "jmespath-1.1.0-py3-none-any.whl", hash = "sha256:a5663118de4908c91729bea0acadca56526eb2698e83de10cd116ae0f4e97c64"}, + {file = "jmespath-1.1.0.tar.gz", hash = "sha256:472c87d80f36026ae83c6ddd0f1d05d4e510134ed462851fd5f754c8c3cbb88d"}, ] [[package]] @@ -589,14 +622,14 @@ files = [ [[package]] name = "packaging" -version = "23.2" +version = "26.0" description = "Core utilities for Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, + {file = "packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529"}, + {file = "packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4"}, ] [[package]] @@ -694,31 +727,31 @@ files = [ [[package]] name = "pycparser" -version = "2.21" +version = "3.0" description = "C parser in Python" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.10" groups = ["main"] markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\"" files = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, + {file = "pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992"}, + {file = "pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29"}, ] [[package]] name = "pygments" -version = "2.16.1" +version = "2.19.2" description = "Pygments is a syntax highlighting package written in Python." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"}, - {file = "Pygments-2.16.1.tar.gz", hash = "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"}, + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, ] [package.extras] -plugins = ["importlib-metadata ; python_version < \"3.8\""] +windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pymysql" @@ -760,14 +793,14 @@ dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests [[package]] name = "python-dateutil" -version = "2.8.2" +version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" groups = ["dev"] files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] [package.dependencies] @@ -813,32 +846,32 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<8)"] [[package]] name = "s3transfer" -version = "0.7.0" +version = "0.16.0" description = "An Amazon S3 Transfer Manager" optional = false -python-versions = ">= 3.7" +python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "s3transfer-0.7.0-py3-none-any.whl", hash = "sha256:10d6923c6359175f264811ef4bf6161a3156ce8e350e705396a7557d6293c33a"}, - {file = "s3transfer-0.7.0.tar.gz", hash = "sha256:fd3889a66f5fe17299fe75b82eae6cf722554edca744ca5d5fe308b104883d2e"}, + {file = "s3transfer-0.16.0-py3-none-any.whl", hash = "sha256:18e25d66fed509e3868dc1572b3f427ff947dd2c56f844a5bf09481ad3f3b2fe"}, + {file = "s3transfer-0.16.0.tar.gz", hash = "sha256:8e990f13268025792229cd52fa10cb7163744bf56e719e0b9cb925ab79abf920"}, ] [package.dependencies] -botocore = ">=1.12.36,<2.0a.0" +botocore = ">=1.37.4,<2.0a.0" [package.extras] -crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] +crt = ["botocore[crt] (>=1.37.4,<2.0a.0)"] [[package]] name = "six" -version = "1.16.0" +version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" groups = ["dev"] files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] [[package]] @@ -858,14 +891,14 @@ slack_sdk = ">=3.38.0,<4" [[package]] name = "slack-sdk" -version = "3.40.1" +version = "3.41.0" description = "The Slack API Platform SDK for Python" optional = false python-versions = ">=3.7" groups = ["main"] files = [ - {file = "slack_sdk-3.40.1-py2.py3-none-any.whl", hash = "sha256:cd8902252979aa248092b0d77f3a9ea3cc605bc5d53663ad728e892e26e14a65"}, - {file = "slack_sdk-3.40.1.tar.gz", hash = "sha256:a215333bc251bc90abf5f5110899497bf61a3b5184b6d9ee35d73ebf09ec3fd0"}, + {file = "slack_sdk-3.41.0-py2.py3-none-any.whl", hash = "sha256:bb18dcdfff1413ec448e759cf807ec3324090993d8ab9111c74081623b692a89"}, + {file = "slack_sdk-3.41.0.tar.gz", hash = "sha256:eb61eb12a65bebeca9cb5d36b3f799e836ed2be21b456d15df2627cfe34076ca"}, ] [package.extras] @@ -987,20 +1020,21 @@ files = [ [[package]] name = "urllib3" -version = "1.26.17" +version = "2.6.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +python-versions = ">=3.9" groups = ["main", "dev"] files = [ - {file = "urllib3-1.26.17-py2.py3-none-any.whl", hash = "sha256:94a757d178c9be92ef5539b8840d48dc9cf1b2709c9d6b588232a055c524458b"}, - {file = "urllib3-1.26.17.tar.gz", hash = "sha256:24d6a242c28d29af46c3fae832c36db3bbebcc533dd1bb549172cd739c82df21"}, + {file = "urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4"}, + {file = "urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed"}, ] [package.extras] -brotli = ["brotli (==1.0.9) ; os_name != \"nt\" and python_version < \"3\" and platform_python_implementation == \"CPython\"", "brotli (>=1.0.9) ; python_version >= \"3\" and platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; (os_name != \"nt\" or python_version >= \"3\") and platform_python_implementation != \"CPython\"", "brotlipy (>=0.6.0) ; os_name == \"nt\" and python_version < \"3\""] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress ; python_version == \"2.7\"", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] +brotli = ["brotli (>=1.2.0) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=1.2.0.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""] [metadata] lock-version = "2.1" diff --git a/syncbot/requirements.txt b/syncbot/requirements.txt index 2d1d7a6..95d10f9 100644 --- a/syncbot/requirements.txt +++ b/syncbot/requirements.txt @@ -1,19 +1,19 @@ alembic==1.18.4 ; python_version >= "3.12" and python_version < "4.0" -certifi==2023.7.22 ; python_version >= "3.12" and python_version < "4.0" +certifi==2026.2.25 ; python_version >= "3.12" and python_version < "4.0" cffi==2.0.0 ; python_version >= "3.12" and python_version < "4.0" and platform_python_implementation != "PyPy" -charset-normalizer==3.3.0 ; python_version >= "3.12" and python_version < "4.0" +charset-normalizer==3.4.6 ; python_version >= "3.12" and python_version < "4.0" cryptography==46.0.5 ; python_version >= "3.12" and python_version < "4.0" -greenlet==3.0.0 ; python_version >= "3.12" and python_version < "4.0" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") -idna==3.4 ; python_version >= "3.12" and python_version < "4.0" +greenlet==3.3.2 ; python_version >= "3.12" and python_version < "4.0" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") +idna==3.11 ; python_version >= "3.12" and python_version < "4.0" mako==1.3.10 ; python_version >= "3.12" and python_version < "4.0" markupsafe==3.0.3 ; python_version >= "3.12" and python_version < "4.0" psycopg2-binary==2.9.11 ; python_version >= "3.12" and python_version < "4.0" -pycparser==2.21 ; python_version >= "3.12" and python_version < "4.0" and platform_python_implementation != "PyPy" and implementation_name != "PyPy" +pycparser==3.0 ; python_version >= "3.12" and python_version < "4.0" and platform_python_implementation != "PyPy" and implementation_name != "PyPy" pymysql==1.1.2 ; python_version >= "3.12" and python_version < "4.0" python-dotenv==1.2.2 ; python_version >= "3.12" and python_version < "4.0" requests==2.33.0 ; python_version >= "3.12" and python_version < "4.0" slack-bolt==1.27.0 ; python_version >= "3.12" and python_version < "4.0" -slack-sdk==3.40.1 ; python_version >= "3.12" and python_version < "4.0" +slack-sdk==3.41.0 ; python_version >= "3.12" and python_version < "4.0" sqlalchemy==2.0.48 ; python_version >= "3.12" and python_version < "4.0" typing-extensions==4.15.0 ; python_version >= "3.12" and python_version < "4.0" -urllib3==1.26.17 ; python_version >= "3.12" and python_version < "4.0" +urllib3==2.6.3 ; python_version >= "3.12" and python_version < "4.0" From 033239cc5ea2b5004c482fe649f74bceb6a456bb Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 17:51:27 -0500 Subject: [PATCH 31/45] Prepping for initial public release. --- .env.example | 2 +- .github/ISSUE_TEMPLATE/bug_report.md | 25 ++ .github/ISSUE_TEMPLATE/feature_request.md | 17 + .github/pull_request_template.md | 13 + .github/workflows/ci.yml | 28 +- .pre-commit-config.yaml | 9 + CHANGELOG.md | 25 ++ CONTRIBUTING.md | 19 + README.md | 27 +- deploy.sh | 14 + docs/DEPLOYMENT.md | 6 +- docs/IMPROVEMENTS.md | 509 ---------------------- docs/INFRA_CONTRACT.md | 2 +- pyproject.toml | 6 +- syncbot/app.py | 6 + syncbot/db/__init__.py | 6 +- syncbot/slack/orm.py | 9 +- 17 files changed, 190 insertions(+), 533 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/pull_request_template.md create mode 100644 CHANGELOG.md create mode 100644 CONTRIBUTING.md delete mode 100644 docs/IMPROVEMENTS.md diff --git a/.env.example b/.env.example index 629a19f..d82597f 100644 --- a/.env.example +++ b/.env.example @@ -8,7 +8,7 @@ # For native Python development, source it: source .env or export $(cat .env | xargs) # ----------------------------------------------------------------------------- -# Database (mysql, postgresql, or sqlite) — pre-release: fresh installs only +# Database (mysql, postgresql, or sqlite) # ----------------------------------------------------------------------------- # Option A — MySQL (default): legacy vars or DATABASE_URL DATABASE_BACKEND=mysql diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..431ca64 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,25 @@ +--- +name: Bug report +about: Report something that is not working as expected +labels: bug +--- + +## What happened + + + +## Steps to reproduce + +1. +2. +3. + +## Expected behavior + + + +## Environment + +- Cloud / deploy: +- Database: +- Browser (if UI-related): diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..07e6dc0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,17 @@ +--- +name: Feature request +about: Suggest an idea or improvement +labels: enhancement +--- + +## Problem or use case + + + +## Proposed solution + + + +## Alternatives (optional) + + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..994ec77 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,13 @@ +## Summary + + + +## How to test + + + +## Checklist + +- [ ] CI passes (requirements sync, SAM lint, tests) +- [ ] Docs updated if behavior or deploy steps changed +- [ ] No new cloud-provider-specific code under `syncbot/` (keep infra in `infra/` and workflows) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6e5b739..bdab18e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,7 +4,7 @@ name: CI on: pull_request: push: - branches: [main, master, test, prod] + branches: [main, test, prod] concurrency: group: ci-${{ github.workflow }}-${{ github.ref }} @@ -13,8 +13,13 @@ concurrency: jobs: requirements-sync: runs-on: ubuntu-latest + permissions: + contents: write steps: - uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref || github.ref_name }} + fetch-depth: 0 - uses: actions/setup-python@v5 with: python-version: "3.12" @@ -23,13 +28,24 @@ jobs: python -m pip install --upgrade pip pip install poetry poetry self add poetry-plugin-export - - name: Check requirements.txt is in sync with poetry.lock + - name: Sync requirements.txt with poetry.lock + env: + PR_HEAD_REPO: ${{ github.event.pull_request.head.repo.full_name }} run: | - poetry export -f requirements.txt --without-hashes -o /tmp/expected.txt - diff -u syncbot/requirements.txt /tmp/expected.txt || { - echo "::error::syncbot/requirements.txt is out of sync with poetry.lock. Run: poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt (Poetry 2.x: poetry self add poetry-plugin-export first)" + poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt + if git diff --quiet syncbot/requirements.txt; then + echo "requirements.txt is already in sync." + elif [[ -n "${PR_HEAD_REPO}" && "${PR_HEAD_REPO}" != "${GITHUB_REPOSITORY}" ]]; then + echo "::error::syncbot/requirements.txt is out of sync with poetry.lock. From the repo root run: poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt" exit 1 - } + else + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git add syncbot/requirements.txt + git commit -m "chore: sync requirements.txt with poetry.lock" + git push + echo "::notice::requirements.txt was out of sync and has been auto-fixed." + fi sam-lint: runs-on: ubuntu-latest diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 83dfa57..3bf51ed 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,3 +23,12 @@ repos: - id: ruff args: [--fix, --exit-non-zero-on-fix] - id: ruff-format + + - repo: local + hooks: + - id: sync-requirements + name: Sync requirements.txt with poetry.lock + entry: bash -c 'poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt && git add syncbot/requirements.txt' + language: system + files: ^poetry\.lock$ + pass_filenames: false diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..a2f4453 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,25 @@ +# Changelog + +All notable changes to this project are documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.0.0] - 2026-03-25 + +### Added + +- Multi-workspace message sync: messages, threads, edits, deletes, reactions, images, videos, and GIFs +- Cross-workspace @mention resolution (email, name, and manual matching) +- Workspace Groups with invite codes (many-to-many collaboration; direct and group-wide sync modes) +- Pause, resume, and stop per-channel sync controls +- App Home tab for configuration (no slash commands) +- Cross-instance federation (optional, HMAC-authenticated) +- Backup/restore and workspace data migration +- Bot token encryption at rest (Fernet) +- AWS deployment (SAM/CloudFormation) with optional CI/CD via GitHub Actions +- GCP deployment (Terraform/Cloud Run) with interactive deploy script; GitHub Actions workflow for GCP is not yet fully wired +- Dev Container and Docker Compose for local development +- Structured JSON logging with correlation IDs and CloudWatch alarms (AWS) +- PostgreSQL, MySQL, and SQLite database backends +- Alembic-managed schema migrations applied at startup diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..04846d1 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,19 @@ +# Contributing + +Thanks for helping improve SyncBot. + +## Workflow + +1. **Fork** the repository and create a branch from **`main`**. +2. Open a **pull request** targeting **`main`** on the upstream repo (or the repo you were asked to contribute to). +3. Keep application code **provider-neutral**: put cloud-specific logic only under `infra//` and in `deploy-.yml` workflows. See [docs/INFRA_CONTRACT.md](docs/INFRA_CONTRACT.md) (Fork Compatibility Policy). + +## Before you submit + +- Run **`pre-commit run --all-files`** (install with `pip install pre-commit && pre-commit install` if needed). +- Ensure **CI passes**: requirements export check, SAM template lint, and tests (see [.github/workflows/ci.yml](.github/workflows/ci.yml)). +- If you change dependencies in `pyproject.toml`, refresh the lockfile and `syncbot/requirements.txt` as described in the README. + +## Questions + +Use [GitHub Issues](https://github.com/F3Nation-Community/syncbot/issues) for bugs and feature ideas, or check [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md) for deploy-related questions. diff --git a/README.md b/README.md index d4e443d..97f1afb 100644 --- a/README.md +++ b/README.md @@ -7,9 +7,27 @@ SyncBot is a Slack app for replicating messages and replies across workspaces on --- +## Branching (upstream vs downstream) + +This repo is the **canonical** project. **Forks** (downstream installations) should use Git like this: + +| Branch | Role | +|--------|------| +| **`main`** | Tracks upstream. Use it to merge PRs and to **sync with the upstream repository** (`git pull upstream main`, etc.). | +| **`test`** / **`prod`** | On your fork, use these for **deployments**: GitHub Actions deploy workflows run on **push** to `test` and `prod` (see [DEPLOYMENT.md](docs/DEPLOYMENT.md)). | + +Typical flow: develop on a feature branch → open a PR to **`main`** → merge → when ready to deploy, merge **`main`** into **`test`** or **`prod`** on your fork. + +--- + ## Deploy (AWS or GCP) -From the **repository root**, use the infra-agnostic launcher: +You can deploy in two ways: + +1. **Download or clone and run the deploy script** — No GitHub Actions required. From the **repository root**, run `./deploy.sh` (or `.\deploy.ps1` on Windows). The script walks you through provider choice, cloud auth, and optional GitHub variable setup. +2. **Fork the repo and use CI/CD** — Configure repository variables and secrets (see [DEPLOYMENT.md](docs/DEPLOYMENT.md)), then push to **`test`** or **`prod`** on your fork to trigger automated deploys. + +From the **repository root**, the infra-agnostic launcher is: | OS | Command | |----|---------| @@ -18,6 +36,10 @@ From the **repository root**, use the infra-agnostic launcher: The launcher lists providers under `infra//scripts/deploy.sh` (e.g. **aws**, **gcp**), prompts for a choice, and runs that script. Shortcuts: `./deploy.sh aws`, `./deploy.sh gcp`, `./deploy.sh 1`. On **Windows**, `deploy.ps1` checks for **Git Bash** or **WSL** bash, then runs the same `deploy.sh` paths (provider prerequisites are enforced inside those bash scripts). +If **Poetry** is on your `PATH`, the root launcher first runs `poetry update` and regenerates `syncbot/requirements.txt` from `poetry.lock` so deploys match the pinned Python deps (Poetry 2.x: install the export plugin once with `poetry self add poetry-plugin-export`). If Poetry is missing, the launcher skips this step and continues. + +**GCP CI:** Interactive deploy via `./deploy.sh` → **gcp** is supported. The **GitHub Actions** workflow for GCP (`.github/workflows/deploy-gcp.yml`) is a stub until Workload Identity Federation and image build/push steps are wired — use the guided script for GCP until then. + ### What to install first | Tool | Why | @@ -95,7 +117,8 @@ poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt | [ARCHITECTURE.md](docs/ARCHITECTURE.md) | Sync flow, AWS reference architecture | | [BACKUP_AND_MIGRATION.md](docs/BACKUP_AND_MIGRATION.md) | Backup/restore and federation migration | | [API_REFERENCE.md](docs/API_REFERENCE.md) | HTTP routes and Slack events | -| [IMPROVEMENTS.md](docs/IMPROVEMENTS.md) | Changelog / planned work | +| [CHANGELOG.md](CHANGELOG.md) | Release history | +| [CONTRIBUTING.md](CONTRIBUTING.md) | How to contribute | ### Project layout diff --git a/deploy.sh b/deploy.sh index a652aff..cc0e106 100755 --- a/deploy.sh +++ b/deploy.sh @@ -665,6 +665,20 @@ main() { exit 1 fi + echo "=== Sync Python Dependencies ===" + if command -v poetry &>/dev/null; then + poetry update --quiet + if poetry self show plugins 2>/dev/null | grep -q poetry-plugin-export; then + poetry export -f requirements.txt --without-hashes -o "$REPO_ROOT/syncbot/requirements.txt" + echo "syncbot/requirements.txt updated from poetry.lock." + else + echo "Warning: poetry-plugin-export not installed. Run: poetry self add poetry-plugin-export" >&2 + echo "Skipping requirements.txt sync." >&2 + fi + else + echo "Warning: poetry not found. Skipping dependency sync." >&2 + fi + echo "=== Run Provider Script ===" echo "Running: $script_path" bash "$script_path" diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 01e67a0..225b13c 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -68,6 +68,8 @@ See [infra/gcp/README.md](../infra/gcp/README.md) for Terraform variables and ou ## Fork-First model (recommended for forks) +**Branch roles** (see also the root [README](../README.md) **Branching** section): use **`main`** to track upstream and merge contributions; on your fork, use **`test`** and **`prod`** for automated deploys (CI runs on push to those branches). + 1. Keep `syncbot/` provider-neutral; use only env vars from [INFRA_CONTRACT.md](INFRA_CONTRACT.md). 2. Put provider code in `infra//` and `.github/workflows/deploy-.yml`. 3. Prefer the AWS layout as reference; treat other providers as swappable scaffolds. @@ -88,7 +90,7 @@ See [infra/gcp/README.md](../infra/gcp/README.md) for Terraform variables and ou ## Database backends -The app supports **MySQL** (default), **PostgreSQL**, and **SQLite**. **Pre-release:** DB flow targets **fresh installs**; schema is applied at startup via Alembic. +The app supports **MySQL** (default), **PostgreSQL**, and **SQLite**. Schema changes are applied at startup via Alembic (`alembic upgrade head`). - **AWS:** Choose engine in the deploy script or pass `DatabaseEngine=mysql` / `postgresql` to `sam deploy`. - **Contract:** [INFRA_CONTRACT.md](INFRA_CONTRACT.md) — `DATABASE_BACKEND`, `DATABASE_URL` or host/user/password/schema. @@ -268,7 +270,7 @@ See also [Sharing infrastructure across apps](#sharing-infrastructure-across-app ## Database schema (Alembic) -Schema lives under `syncbot/db/alembic/`. On startup the app runs **`alembic upgrade head`** (pre-release: fresh installs). +Schema lives under `syncbot/db/alembic/`. On startup the app runs **`alembic upgrade head`**. --- diff --git a/docs/IMPROVEMENTS.md b/docs/IMPROVEMENTS.md deleted file mode 100644 index c894242..0000000 --- a/docs/IMPROVEMENTS.md +++ /dev/null @@ -1,509 +0,0 @@ -# SyncBot Improvements Summary - -This document outlines the improvements made to the SyncBot application and additional recommendations for future enhancements. - -> Historical changelog note: this file tracks work over time and may reference superseded implementation details. -> For current deployment/runtime requirements, use `docs/INFRA_CONTRACT.md` and `docs/DEPLOYMENT.md` as the source of truth. - -## ✅ Completed Improvements - -### 1. Database Management Fixes -- **Added `@staticmethod` decorators** to all `DbManager` methods for proper static method usage -- **Fixed session management** - All database methods now properly close sessions in finally blocks -- **Improved error handling** in database operations - -### 2. Code Quality Improvements -- **Removed duplicate constant definitions** in `constants.py` where env-var names were defined twice -- **Fixed type hints**: - - `get_request_type()` now correctly returns `tuple[str, str]` instead of `tuple[str]` - - `apply_mentioned_users()` now correctly returns `str` instead of `List[Dict]` - -### 3. Error Handling Enhancements -- **Replaced bare `except Exception:` clauses** with proper error logging: - - `handle_remove_sync()` now logs warnings when failing to leave channels - - `handle_join_sync_submission()` now logs errors with context - - Added null check for `sync_channel_record` before use -- **Improved exception handling** in `announcements.py`: - - Replaced print statements with proper logging - - Better handling of rate limiting errors - - More descriptive error messages - -### 4. Logging Improvements -- **Replaced all `print()` statements** with proper logging: - - `orm.py`: Added logger and replaced print statements with `logger.error()` and `logger.debug()` - - `announcements.py`: Replaced print statements with appropriate log levels (info, warning, error) - - `handlers.py`: Removed debug print statement -- **Added logging module** where needed - -### 5. Database Connection Pooling -- **Replaced `pool.NullPool` with `pool.QueuePool`** (`pool_size=3`, `max_overflow=2`, `pool_recycle=3600`) for connection reuse across warm Lambda invocations -- **Added `pool_pre_ping=True`** to detect and replace stale connections transparently -- **Added `_with_retry` decorator** on all `DbManager` methods to automatically retry on transient `OperationalError` (up to 2 retries with engine disposal between attempts) -- **Simplified `close_session()`** to return connections to the pool instead of disposing the entire engine - -### 6. Rate Limiting Handling -- **Created `slack_retry` decorator** with exponential backoff for all Slack API calls: - - Honors `Retry-After` headers on HTTP 429 responses - - Retries on transient 5xx server errors - - Configurable max retries (default 3) with exponential backoff (capped at 30s) -- **Refactored `parse_mentioned_users()`** to use individual `users.info()` calls instead of the heavy `users.list()` endpoint that is easily rate-limited -- **Refactored `apply_mentioned_users()`** to use `users.lookupByEmail()` for individual lookups instead of `users.list()` -- **Added user profile caching** (`_get_user_profile()`) with a 5-minute TTL to avoid redundant API calls for the same user -- **Applied `@slack_retry`** to `post_message()`, `delete_message()`, `_users_info()`, and `_lookup_user_by_email()` - -### 7. Error Recovery -- **Added error isolation in sync loops** - a failure syncing to one channel no longer prevents syncing to the remaining channels: - - `_handle_new_post()`: Individual channel failures are caught and logged; remaining channels continue - - `_handle_thread_reply()`: Same per-channel error isolation - - `_handle_message_edit()`: Same per-channel error isolation - - `_handle_message_delete()`: Same per-channel error isolation -- **Guard against empty post lists** - `DbManager.create_records()` is only called when there are records to persist - -### 8. Type Safety -- **Added `EventContext` TypedDict** for the parsed message event context, replacing untyped `dict` -- **Updated all sub-handler signatures** (`_handle_new_post`, `_handle_thread_reply`, `_handle_message_edit`, `_handle_message_delete`) to use `EventContext` -- **Added comprehensive type hints** across the codebase: - - `helpers.py`: `safe_get()`, `get_user_info()`, `post_message()`, `delete_message()`, `update_modal()`, `parse_mentioned_users()`, `apply_mentioned_users()` and all new functions - - `handlers.py`: `_build_photo_context()`, `_get_team_name()` return types - - `schemas.py`: `GetDBClass` mixin methods (`get_id`, `get`, `to_json`, `__repr__`) -- **Improved exception handling in `safe_get()`** to also catch `AttributeError` and `IndexError` - -### 9. Testing -- **Created unit test suite** with 40 tests across 3 modules: - - `tests/test_helpers.py`: `safe_get()` (9 tests), encryption roundtrip/failure/wrong-key (5 tests), TTL cache (4 tests), `get_request_type()` (4 tests), `slack_retry` decorator (3 tests) - - `tests/test_db.py`: `_with_retry` decorator (4 tests), engine QueuePool verification (1 test) - - `tests/test_handlers.py`: `_parse_event_fields()` (4 tests), `EventContext` TypedDict (1 test), `_sanitize_text()` (5 tests) -- **Added pytest configuration** in `pyproject.toml` with `testpaths` and `pythonpath` - -### 10. Code Organization (Medium Priority - Previously Completed) -- **Refactored `respond_to_message_event()`** (170+ lines) into a thin dispatcher and focused sub-handlers: `_parse_event_fields`, `_build_photo_context`, `_get_team__name`, `_handle_new_post`, `_handle_thread_reply`, `_handle_message_edit`, `_handle_message_delete` - -### 11. Configuration Management (Medium Priority - Previously Completed) -- **Added `validate_config()`** startup validation for required environment variables -- **Fails fast in production** (raises `EnvironmentError`); warns in local development -- **Separate required lists** for always-required and production-only variables - -### 12. Database Schema (Medium Priority - Previously Completed) -- **Added soft deletes** for `sync_channels` via `deleted_at` column with index -- **Created SQL migration scripts**: `migrate_001_security.sql`, `migrate_003_soft_deletes.sql` -- **Created Python migration script**: `migrate_002_encrypt_tokens.py` for encrypting existing tokens -- **Updated all queries** to filter out soft-deleted records - -### 13. Security (Medium Priority - Previously Completed) -- **Bot token encryption** at rest using Fernet (AES-128-CBC + HMAC-SHA256) -- **Fail-closed decryption** - refuses to use tokens that fail decryption -- **Input sanitization** via `_sanitize_text()` on user-submitted form data -- **RDS SSL/TLS enforcement** (server-side parameter group + client-side connect_args) -- **API Gateway throttling** (20 burst / 10 sustained requests per second) - -### 14. Performance (Medium Priority - Previously Completed) -- **In-process TTL cache** for `get_sync_list()` (60s TTL) and user info lookups (300s TTL) -- **Hoisted `get_user_info()` calls** outside loops where possible -- **Connection pooling** reuses DB connections across invocations in warm Lambda containers - -### 15. Infrastructure as Code -- **AWS SAM template** (`infra/aws/template.yaml`) defining VPC, RDS, Lambda, API Gateway (SAM artifact S3 used for deploy packaging only) -- **Free-tier optimized** (128 MB Lambda, db.t4g.micro RDS, gp2 storage, no NAT Gateway) -- **CI/CD pipeline** (`.github/workflows/sam-pipeline.yml`) for automated build/deploy -- **SAM config** (`samconfig.toml`) for staging and production environments - -### 16. Documentation (Low Priority - Completed) -- **Added module-level docstrings** to all Python modules across all packages -- **Added function-level docstrings** to all public functions across the codebase (encryption helpers, cache functions, Slack API wrappers, DB helpers, OAuth flow, photo upload, mention parsing, modal updates, request dispatch) -- **Added inline docstrings** to routing table dicts and action ID constants -- **Documented API endpoints** in the README (HTTP routes, subscribed events) -- **Documented deployment process** in the README (first-time deploy, subsequent deploys, CI/CD, migrations, shared infrastructure) - -### 17. Monitoring & Observability (Low Priority - Completed) -- **Added structured JSON logging** via `StructuredFormatter` — every log entry is a single JSON object with `timestamp`, `level`, `correlation_id`, `module`, `function`, `message`, and optional extra fields -- **Added correlation IDs** — a unique 12-character ID is assigned at the start of each incoming Slack request (`set_correlation_id()`) and automatically included in every log line during that request -- **Added metrics emission** via `emit_metric()` — structured log entries for key operational metrics: - - `request_handled` (with `duration_ms`, `request_type`, `request_id`) - - `request_error` (with `request_type`, `request_id`) - - `messages_synced` (with `sync_type`: `new_post`, `thread_reply`, `message_edit`, `message_delete`) - - `sync_failures` (with `sync_type`) -- **Added CloudWatch Alarms** in `infra/aws/template.yaml` (within free-tier's 10-alarm limit): - - `LambdaErrorAlarm` — fires on 3+ errors in 5 minutes - - `LambdaThrottleAlarm` — fires on any throttling - - `LambdaDurationAlarm` — fires when average duration exceeds 10 seconds - - `ApiGateway5xxAlarm` — fires on 5+ server errors in 5 minutes -- **X-Ray distributed tracing** was already enabled (`Tracing: Active` in SAM template) - -### 18. Code Style (Low Priority - Completed) -- **Configured `ruff`** as the project linter and formatter (added `[tool.ruff]` section to `pyproject.toml` with rules for pycodestyle, pyflakes, isort, pyupgrade, flake8-bugbear, flake8-simplify, flake8-logging) -- **Ran `ruff format`** across the entire codebase (all Python files in `syncbot/` and `tests/`) -- **Ran `ruff check --fix`** to auto-fix 123 issues (import sorting, deprecated typing imports, style modernization) -- **Manually fixed remaining issues**: mutable default argument (`orm.py`), `return` inside `finally` (`db/__init__.py`), `raise ... from None` for exception chaining (`helpers.py`), ternary ordering (`handlers.py`) -- **Created `.pre-commit-config.yaml`** with hooks for: - - `trailing-whitespace`, `end-of-file-fixer`, `check-yaml`, `check-added-large-files`, `check-merge-conflict`, `detect-private-key` - - `ruff` lint (with `--fix`) - - `ruff-format` - -### 19. Architecture Diagrams (Low Priority - Completed) -- **Added message sync flow sequence diagram** (Mermaid) to README showing the full request path from user message through API Gateway, Lambda, DB lookup, image upload, mention re-mapping, cross-workspace posting, and metric emission -- **Added AWS infrastructure diagram** (Mermaid) to ARCHITECTURE.md showing API Gateway, Lambda, RDS, EventBridge keep-warm, and CloudWatch monitoring - -### 20. Admin Authorization and Security Hardening (Completed) -- **Added admin/owner authorization** — only workspace admins and owners can run `/config-syncbot` and all related configuration actions (create sync, join sync, remove sync) - - `is_user_authorized(client, user_id)` checks `is_admin` / `is_owner` from the Slack `users.info` API, with caching - - `get_user_id_from_body(body)` extracts the user ID from any Slack request type (commands, actions, views) - - Unauthorized users receive an ephemeral message: ":lock: Only workspace admins and owners can configure SyncBot." -- **Defense-in-depth** — authorization checks are enforced at both the entry points (`build_config_form`, `build_join_sync_form`, `build_new_sync_form`) and the mutation handlers (`handle_remove_sync`, `handle_join_sync_submission`, `handle_new_sync_submission`) -- **Configurable via `REQUIRE_ADMIN` env var** (default `"true"`) — set to `"false"` to allow all users (for small teams) -- **Removed `/send-syncbot-announcement` command** — the broadcast command could be triggered by any admin in any connected workspace, affecting all workspaces; removed entirely as a security risk -- **Fixed input validation in `handle_remove_sync`** — `int()` conversion now wrapped in try/except to prevent crashes on malformed payloads -- **Fixed join-sync ordering in `handle_join_sync_submission`** — `conversations_join` now runs before `DbManager.create_record` so the DB record isn't created if the bot can't actually join the channel - -### 21. Cross-Workspace User Matching (Completed) -- **Persistent user matching pipeline** — @mentions in synced messages are resolved to the correct user in the target workspace using a multi-step algorithm: email lookup → name-based directory matching → bracketed fallback -- **New database tables**: - - `user_directory` — cached copy of each workspace's user profiles (slack_user_id, email, real_name, display_name, normalized_name), refreshed every 24h - - `user_mappings` — cross-workspace match results with TTL-based freshness (email: 30d, name: 14d, manual: never expires, none: 90d) -- **Name normalization** (`_normalize_name`) — trims trailing title/qualifier from display names (e.g., "Johnny B (Title)" → "Johnny B") while preserving original casing and spacing -- **Reactive matching via `team_join` event** — when a new user joins a connected workspace, their profile is added to the directory and all unmatched mappings targeting that workspace are re-checked automatically -- **Admin UI in `/config-syncbot`** — "User Matching" button opens a child modal showing: - - Stats: "X matched, Y unmatched" - - Refresh button to re-run auto-matching across all linked workspaces - - Unmatched users with native Slack user-picker dropdowns for manual matching (saved as `match_method='manual'`) - - Matched users with "Unlink" buttons to remove mappings -- **Fallback display** — unmatched mentions render as `[Display Name]` in square brackets instead of broken `@mentions` -- **Migration script** — `db/migrate_004_user_matching.sql` for existing deployments - -### 22. Bot Message Syncing (Completed) -- **Selective bot filtering** — only messages from SyncBot itself are ignored (to prevent infinite loops); messages from all other bots are synced normally -- **Bot identity detection** (`get_own_bot_id`) — resolves SyncBot's `bot_id` using `context` or `auth.test`, with caching -- **Bot attribution** (`get_bot_info_from_event`) — extracts `username` and `icons` from bot message events so synced bot messages preserve the original bot's name and avatar -- **Unit tests** for `_is_own_bot_message` (own bot, other bots, user messages, message_changed events, auth.test fallback) and `get_bot_info_from_event` - -### 23. Simplified Sync Creation (Completed) -- **One-step sync creation** — replaced the two-step flow (create sync title → join channel) with a single channel picker modal -- **`ConversationsSelectElement`** — new Block Kit element that shows both public and private channels (with `exclude_bot_users: true`) -- **Auto-naming** — the sync is named after the selected channel (resolved via `conversations.info`) -- **Combined operation** — on submit, the handler joins the channel, creates the `Sync` record, creates the `SyncChannel` link, and posts a welcome message in one step -- **Private channel support** — the "Join existing Sync" channel picker also upgraded to `ConversationsSelectElement` so private channels are now selectable - -### 24. Workspace Pairing with Directed Trust Codes (Completed) -- **Directed workspace pairing** — the Workspace Pairing screen lists every workspace that has SyncBot installed, with its pairing status (Paired, Pending, or Not paired) -- **Pairing flow**: Admin A sees Workspace B listed as "Not paired" → clicks "Generate Code" → a code locked to Workspace B is created → Admin A shares the code out-of-band → Admin B enters the code → pairing is activated bidirectionally -- **Locked codes** — pairing codes are generated for a specific target workspace; if a different workspace tries to redeem the code, it is rejected -- **New database table** — `workspace_pairings` with `initiator_workspace_id`, `partner_workspace_id`, `invite_code`, `status` (`pending`/`active`), `created_at`, `paired_at` -- **Code validation** — codes are 7-character alphanumeric with format `XXX-XXXX`; pending codes expire after 24 hours; self-pairing, wrong-workspace, and duplicate pairing are all rejected -- **Pairing UI in `/config-syncbot`** — "Workspace Pairing" button opens a modal showing: - - All installed workspaces with status: Paired (with Remove button), Pending (with code displayed and Cancel button), or Not paired (with Generate Code button) - - "Enter Pairing Code" button at the top for the receiving side -- **Cascading unpair** — removing a pairing soft-deletes all `SyncChannel` records shared between the two workspaces and has the bot leave those channels -- **Migration script** — `db/migrate_005_workspace_pairings.sql` for existing deployments - -### 25. Config Screen Redesign — Channel Sync & User Matching Overhaul (Completed) -- **Three-button config screen** — replaced the four-button layout (Join existing Sync, Create new Sync, User Matching, Workspace Pairing) with three focused buttons: **Workspace Pairing**, **User Matching**, **Channel Sync** -- **1-to-1 Channel Sync (publish/subscribe model)**: - - A workspace "publishes" one of its channels to a specific paired workspace, making it available for syncing - - The paired workspace "subscribes" by selecting one of their own channels to receive messages - - Each publish is scoped to exactly one pairing — publishing to workspace B and workspace C are separate operations - - Channel Sync modal shows: published channels (with Unpublish buttons), available channels from other group members (with Subscribe buttons), and a Publish Channel button - - Welcome messages are posted in both channels when a subscription is established - - Unpublishing cleans up both sides (soft-deletes SyncChannels, bot leaves channels) -- **Database changes** — added `pairing_id` column to `syncs` table (FK to `workspace_pairings`, `ON DELETE CASCADE`), removed UNIQUE constraint on `syncs.title` (same channel can be published to multiple pairings) -- **Workspace picker pattern** — both Channel Sync and User Matching now show a workspace picker modal when multiple pairings exist; auto-selects when only one pairing is active -- **User Matching improvements**: - - **Auto-sync on pairing activation** — when a pairing code is accepted, both workspaces' user directories are refreshed and auto-matching runs immediately in both directions - - **Scoped to pairing** — user matching is now filtered to the selected paired workspace instead of showing all linked workspaces at once - - **Filtered unmatchable users** — users with no possible candidate in the target workspace (by normalized display name or email) are hidden from the unmatched list - - **Override dropdowns for matched users** — matched users now show a `UsersSelectElement` pre-populated with the current match, allowing direct reassignment without unlinking first -- **New action constants** — ~12 new Block Kit action/callback IDs for channel sync flows, workspace pickers, publish/subscribe, and user matching workspace selection -- **New form templates** — `WORKSPACE_PICKER_FORM`, `PUBLISH_CHANNEL_FORM`, `SUBSCRIBE_CHANNEL_FORM` -- **Prefix-match routing** — added entries for `CONFIG_UNPUBLISH_CHANNEL` and `CONFIG_SUBSCRIBE_CHANNEL` (suffix contains sync/channel IDs) -- **ORM fix** — `update_modal` now supports `submit_button_text="None"` to render modals without a submit button (consistent with `post_modal`) - -### 26. Docker Local Development (Completed) -- **Dev Container support** — added `.devcontainer/devcontainer.json` and `.devcontainer/docker-compose.dev.yml` for full in-editor development inside a Docker container (Cursor / VS Code) - - Python, Pylance, and Ruff extensions pre-configured with format-on-save - - `PYTHONPATH` and database env vars set automatically - - Ports 3000 (app) and 3306 (MySQL) forwarded to host - - AWS CLI feature included for SAM operations - - `pytest` and `boto3` installed on container creation -- **Docker Compose** — added `Dockerfile` and `docker-compose.yml` for standalone container-based development without the Dev Container extension - - MySQL 8 with automatic schema initialization via `init.sql` mount - - App code mounted as a volume for live editing without rebuilds - - Named volume for database persistence across restarts -- **README updated** with three local development options: Dev Container (recommended), Docker Compose, and native Python - -### 27. App Home Tab Migration (Completed) -- **Replaced `/config-syncbot` slash command** with a persistent **App Home tab** — all configuration is now managed through the Home tab instead of slash commands and nested modals -- **Inline content** — workspace pairings and channel syncs are rendered directly on the Home tab instead of requiring modal navigation -- **Per-pairing sections** — each paired workspace shows its own section with a "Manage User Matching" button and channel sync controls (publish/unpublish/subscribe) -- **Simplified modal flow** — sub-screens (enter pairing code, publish channel, subscribe channel, user matching) now open as standalone modals (`views.open`) instead of stacked modals (`views.push`) -- **Auto-refresh** — all mutations (generate code, cancel, remove pairing, publish/unpublish/subscribe channel) automatically re-publish the Home tab -- **Manifest updated** — added `app_home_opened` to bot events, removed `slash_commands` section and `commands` OAuth scope -- **Non-admin users** see a locked message on the Home tab instead of an error - -### 28. Uninstall Soft-Delete & Reinstall Recovery (Completed) -- **Soft-delete on uninstall** — when a workspace uninstalls SyncBot, its record, pairings, and sync channels are soft-deleted (`deleted_at` timestamp) rather than hard-deleted -- **Automatic reinstall recovery** — if the workspace reinstalls within the retention period, all pairings and sync channels are automatically restored -- **Lifecycle notifications** — consistent notification model using channel messages and admin DMs: - - **Started** — new pairing activated: admin DMs in both workspaces - - **Paused** — workspace uninstalls: admin DMs + channel messages in member workspaces - - **Resumed** — workspace reinstalls: admin DMs + channel messages in member workspaces - - **Stopped** — manual removal: admin DMs + channel messages in member workspaces - - **Purged** — auto-cleanup after retention period: admin DMs to member workspaces -- **Paused indicator** — Home tab and pairing form show `:double_vertical_bar: Paused (uninstalled)` for soft-deleted member workspaces with no action buttons -- **Configurable retention** — `SOFT_DELETE_RETENTION_DAYS` env var (default 30 days) controls how long soft-deleted data is kept before permanent purge -- **Lazy daily purge** — stale soft-deleted workspaces are hard-deleted via `ON DELETE CASCADE` during the first `app_home_opened` event each day -- **Manifest updated** — added `tokens_revoked` to bot events, `im:write` to OAuth scopes -- **Migration** — `db/migrate_007_uninstall_soft_delete.sql` adds `deleted_at` to `workspaces` and `workspace_pairings` - -### 29. External Connections — Cross-Instance Federation (Completed) -- **Cross-instance sync** — independent SyncBot deployments (e.g., on separate AWS accounts, GCP, or Cloudflare) can now connect and sync messages, edits, deletes, reactions, and user matching across instances -- **Connection pairing flow** — admin generates a connection code on one instance, shares it out-of-band, and the other admin enters it to establish a secure connection - - Codes encode the instance's public URL and a unique instance ID in a base64 payload - - On acceptance, both sides exchange a shared secret and store a `federated_workspaces` record -- **HMAC-SHA256 request authentication** — all inter-instance webhook calls (except the initial pairing handshake and health checks) are signed using the shared secret, with replay protection via 5-minute timestamp validation -- **Federation API endpoints** — seven new HTTP endpoints for cross-instance communication: - - `POST /api/federation/pair` — accept an incoming connection request - - `POST /api/federation/message` — receive forwarded messages (new posts and thread replies) - - `POST /api/federation/message/edit` — receive message edits - - `POST /api/federation/message/delete` — receive message deletions - - `POST /api/federation/message/react` — receive reaction add/remove - - `POST /api/federation/users` — exchange user directory for mention matching - - `GET /api/federation/ping` — health check / connectivity test -- **Transparent message forwarding** — the core message handlers (`_handle_new_post`, `_handle_thread_reply`, `_handle_message_edit`, `_handle_message_delete`) detect whether a sync target is local or remote and dispatch accordingly — local channels are posted to directly, remote channels are forwarded via the federation webhook -- **User directory exchange** — when a connection is established, both instances exchange their user directories so @mention resolution works across instances -- **Image handling** — images are forwarded as file uploads or public URLs; the receiving instance uses them in Slack blocks -- **Retry with exponential backoff** — all outgoing federation HTTP calls retry up to 3 times with 1s/2s/4s backoff on transient failures (5xx, timeouts, connection errors) -- **Home tab UI** — "External Connections" section on the Home tab with "Generate Connection Code" and "Enter Connection Code" buttons, active connection display with status and remove button, and pending code display with cancel button -- **Connection label prompt** — generating a connection code prompts for a friendly name (e.g. "East Coast SyncBot") which is displayed on the Home tab and used as the remote workspace's display name -- **Code delivery via DM** — both internal pairing codes and external connection codes are sent as a DM to the admin for easy copy/paste (Slack Block Kit does not support clipboard buttons) -- **Opt-in feature flag** — external connections are disabled by default; set `SYNCBOT_FEDERATION_ENABLED=true` to enable. All UI, handlers, and API endpoints are gated behind this flag -- **New database table** — `federated_workspaces` (instance_id, webhook_url, public_key, status, name) -- **Schema change** — `federated_workspace_id` added to group members (NULL = local workspace, non-NULL = remote) -- **Environment variables** — `SYNCBOT_FEDERATION_ENABLED` (opt-in flag, default `false`), `SYNCBOT_INSTANCE_ID` (auto-generated UUID), `SYNCBOT_PUBLIC_URL` (required when enabled) -- **Federation package** — `syncbot/federation/core.py` (signing, HTTP client, payload builders), `syncbot/federation/api.py` (API endpoint handlers) -- **Migration** — `db/migrate_009_federated_workspaces.sql` - -### 30. Reaction Syncing (Completed) -- **Threaded reaction messages** — emoji reactions (`reaction_added` / `reaction_removed`) are synced to all linked channels as threaded replies on the corresponding message -- **Bidirectional** — reactions work in both directions across workspaces -- **User attribution** — reaction messages display the reacting user's display name and workspace -- **Permalink reference** — each reaction message includes a link to the original message -- **PostMeta lookup** — uses the existing `PostMeta` table to resolve source timestamps to target message timestamps for accurate threading -- **File message timestamp extraction** — `_extract_file_message_ts` uses a retry loop on `files.info` (up to 4 attempts) to reliably capture the message timestamp for files uploaded via `files_upload_v2`, ensuring reactions work on image and video messages - -### 31. GIF Syncing (Completed) -- **Slack GIF picker support** — GIFs sent via Slack's built-in `/giphy` picker or GIPHY integration are detected and synced -- **Nested block parsing** — `_build_file_context` extracts `image_url` from nested `image` blocks within `attachments`, which is how Slack structures GIF picker messages -- **Direct ImageBlock posting** — GIFs are always posted as `ImageBlock` elements via `chat.postMessage` using their public URLs, ensuring a proper message `ts` is captured for `PostMeta` (enabling reactions on GIFs) -- **GIF sync** — GIF URLs are publicly accessible and posted as image blocks; no file download needed - -### 32. Video & Image Direct Upload (Completed) -- **Direct upload only** — images and videos are synced via Slack's `files_upload_v2` (no S3); media is downloaded from the source and uploaded to each target channel -- **User attribution** — direct uploads include "Shared by User (Workspace)" in the `initial_comment` -- **Fallback text** — `post_message` supports a `fallback_text` argument for messages that contain only blocks (no text), satisfying Slack's accessibility requirements - -### 33. Pause/Resume/Stop Sync (Completed) -- **Sync lifecycle controls** — individual channel syncs can be paused, resumed, or stopped from the Home tab -- **`status` column** on `sync_channels` — supports `active` and `paused` states -- **Paused syncs** — messages, threads, edits, deletes, and reactions are not processed for paused channels; the handler checks `status` before dispatching -- **Stop with confirmation** — stopping a sync shows a confirmation modal before soft-deleting; the bot leaves the channel and notifies other member workspaces -- **Admin attribution** — pause/resume/stop actions are attributed to the admin who performed them in notification messages -- **Home tab indicators** — paused syncs show a `:double_vertical_bar: Paused` status on the Home tab with a Resume button - -### 34. User Profile Auto-Refresh (Completed) -- **`user_profile_changed` event** — subscribed in manifest and handled by `handle_user_profile_changed` -- **Directory update** — when a user changes their display name, real name, or email, the `user_directory` record is updated automatically -- **Mapping re-check** — after updating the directory, all user mappings involving the changed user are re-evaluated to detect new matches or update stale data - -### 35. Member Joined Channel Handler (Completed) -- **`member_joined_channel` event** — subscribed in manifest and handled by `handle_member_joined_channel` -- **Untracked channel detection** — when SyncBot is added to a channel that is not part of any active sync, it posts a friendly message and leaves automatically -- **Self-check** — the handler verifies the joined user is SyncBot itself (via `get_own_bot_user_id`) before acting - -### 36. Direct Pairing Requests (Completed) -- **Request-based pairing** — admins can send a direct pairing request to another workspace instead of manually sharing codes -- **DM notifications** — the target workspace's admins receive a DM with Accept/Decline buttons and context about the requesting workspace -- **Home tab notification** — pending inbound pairing requests are shown on the target workspace's Home tab with Accept/Decline buttons -- **Bidirectional activation** — accepting a request activates the pairing on both sides, refreshes user directories, runs auto-matching, and updates both Home tabs -- **DM cleanup** — pairing request DMs are replaced with updated status messages when accepted, declined, or cancelled - -### 37. Home Tab UI Enhancements (Completed) -- **Synced-since with year** — channel sync dates always display the full year (e.g., "February 18, 2026") using Python `datetime` formatting instead of Slack's `` token which omits the current year -- **Message count** — each sync displays the number of tracked messages from `PostMeta` (e.g., "Synced since: February 18, 2026 · 42 messages tracked") -- **Remote channel deep links** — target channel names in the Home tab and subscription modals are rendered as deep links using `slack://channel?team=T...&id=C...` URLs -- **Consolidated published channels** — all synced channels across pairings are shown in a single sorted list on the Home tab -- **Member Home tab refresh** — all mutations (publish, unpublish, subscribe, pause, resume, stop, pairing changes) automatically re-publish every affected group member's Home tab - -### 38. User Mapping Screen Redesign (Completed) -- **Dedicated Home tab screen** — user mapping is now a full-screen Home tab view instead of a nested modal, providing more space and a better experience -- **Remote user avatars** — each mapped/unmapped user row displays the remote workspace user's profile photo as a right-aligned `ImageAccessoryElement` -- **Section headers with icons** — `:warning: *Unmapped Users*`, `:pencil2: *Soft / Manual Matches*`, `:lock: *Email Matches*` with `DividerBlock` separators -- **Edit modal avatars** — the user mapping edit modal also displays the remote user's avatar -- **Back navigation** — "Back to Home" button returns to the main Home tab view -- **Avatar caching** — `_avatar_lookup` fetches and caches profile photo URLs from the remote workspace - -### 39. Code Refactoring — Module Split & Package Structure (Completed) -- **Flattened `utils/` directory** — all modules moved to top-level packages under `syncbot/` (no more `utils/` nesting) -- **Split monolithic files** into focused packages: - - `helpers.py` → `helpers/` package (`core.py`, `slack_api.py`, `encryption.py`, `files.py`, `notifications.py`, `user_matching.py`, `workspace.py`, `oauth.py`, `_cache.py`) - - `handlers.py` → `handlers/` package (`messages.py`, `groups.py`, `group_manage.py`, `channel_sync.py`, `users.py`, `tokens.py`, `federation_cmds.py`, `sync.py`, `_common.py`) - - `builders.py` → `builders/` package (`home.py`, `channel_sync.py`, `user_mapping.py`, `sync.py`, `_common.py`) - - `federation.py` + `federation_api.py` → `federation/` package (`core.py`, `api.py`) -- **Renamed `logging_config.py` to `logger.py`** — shorter, clearer module name -- **Added `__init__.py` re-exports** — `helpers/__init__.py` and `handlers/__init__.py` re-export public APIs for clean imports -- **Updated `pyproject.toml`** — `ruff` `known-first-party` updated, `per-file-ignores` for `app.py` E402 - -### 40. Security Audit — Dependency Updates & Hardening (Completed) -- **Dependency updates** — updated `cryptography`, `urllib3`, `certifi`, `requests`, and `pillow` to latest versions -- **Path traversal prevention** — file name sanitization via `_safe_file_parts` strips non-alphanumeric characters from file IDs and extensions -- **PyMySQL SSL hardening** — explicit SSL context with `certifi` CA bundle, `check_hostname=True`, `PROTOCOL_TLS_CLIENT` -- **URL-escaped credentials** — database username and password are `urllib.parse.quote_plus`-escaped in the connection string -- **Silent exception logging** — replaced bare `except: pass` blocks with `contextlib.suppress` or proper logging - -### 41. Hardening & Performance Pass (Completed) -- **Critical bug fixes**: - - Fixed broken import: `_users_list_page` was imported from `helpers.slack_api` instead of `helpers.user_matching` where it's defined - - Fixed `str.format()` crash: messages containing literal curly braces (`{` or `}`) caused `KeyError`/`IndexError` in `apply_mentioned_users`; replaced with iterative `re.sub` using a lambda -- **Performance — Fernet caching**: Added `@functools.lru_cache(maxsize=2)` to `_get_fernet()` to cache the derived Fernet instance, eliminating 600,000 PBKDF2 iterations on every encrypt/decrypt call -- **Performance — `auth.test` consolidation**: Merged `get_own_bot_id` and `get_own_bot_user_id` into a single cached `_get_auth_info` call, halving Slack API round-trips for bot identity -- **Performance — `DbManager.count_records()`**: Added `SELECT COUNT(*)` method and replaced `len(find_records(...))` calls that were fetching all rows just to count them -- **Performance — module-level constants**: Moved `_PREFIXED_ACTIONS` tuple to module scope (avoids rebuilding on every request); cached `GetDBClass` column keys in a class-level `frozenset` -- **DoS — file download streaming**: All `requests.get` calls for files now use `stream=True` with 30s timeout, 8 KB chunks, and a 100 MB size cap -- **Media path** — single direct-upload path (download from Slack, re-upload via `files_upload_v2`); no runtime S3 or boto3 -- **DoS — input caps**: File attachments capped at 20 per event, mentions at 50 per message, federation user ingestion at 5,000 per request, federation images at 10 per message -- **DoS — federation body limit**: Local dev federation HTTP server enforces 1 MB max request body -- **DoS — connection pool safety**: `GLOBAL_ENGINE.dispose()` now only fires after all retries are exhausted, not on every transient failure (prevents disrupting other in-flight queries) -- **DoS — `decrypt_bot_token` reuse**: Eliminated duplicate `decrypt_bot_token` calls in the message edit handler -- **DRY — `_parse_private_metadata`**: Replaced 6 inline `import json; json.loads(private_metadata)` blocks across 4 handler files with a shared helper in `_common.py` -- **DRY — `_toggle_sync_status`**: Merged `handle_pause_sync` and `handle_resume_sync` (near-identical 60-line functions) into a single parameterized helper -- **DRY — `_activate_pairing_users`**: Extracted duplicated 30-line user directory refresh + seed + auto-match blocks from two pairing handlers -- **DRY — `_find_post_records`**: Extracted duplicated PostMeta query pattern (3 call sites) in `federation/api.py` -- **DRY — `_find_source_workspace_id`**: Extracted duplicated source-workspace lookup loop (5 call sites) in `messages.py` -- **DRY — user directory upsert**: Refactored `_refresh_user_directory` to call `_upsert_single_user_to_directory` instead of duplicating the upsert logic -- **DRY — `notify_admins_dm`**: Added optional `blocks` parameter for Block Kit support, consolidating the text-only and block DM paths -- **Lint clean**: All `ruff` checks pass with zero warnings - -### 42. Workspace Groups Refactor — Many-to-Many Collaboration (Completed) -- **Replaced 1-to-1 Workspace Pairings with many-to-many Workspace Groups** — workspaces can now create or join groups, and a single workspace can belong to multiple groups with different combinations of members -- **New database tables**: - - `workspace_groups` — group record with `name`, `invite_code`, `created_by_workspace_id`, `created_at` - - `workspace_group_members` — junction table with `group_id`, `workspace_id`, `joined_at`, `deleted_at` (soft-delete) -- **Removed `workspace_pairings` table** — all pairing logic replaced by group membership -- **Schema changes to `syncs`** — replaced `pairing_id` with `group_id` (FK to `workspace_groups`), added `sync_mode` (`direct` or `group`), `target_workspace_id` (for direct syncs), and `publisher_workspace_id` (controls unpublish rights) -- **Schema changes to `user_mappings`** — replaced `pairing_id` with `group_id` (FK to `workspace_groups`) -- **Two sync modes**: - - **Direct** — publish a channel 1-to-1 to a specific workspace in the group (behaves like legacy pairings) - - **Group-wide** — publish a channel for any group member to subscribe independently -- **Selective stop sync** — when a workspace stops syncing, only that workspace's `PostMeta` and `SyncChannel` records are deleted; other group members continue uninterrupted -- **Publisher-only unpublish** — only the workspace that originally published a channel can unpublish it; the `Sync` record persists until the publisher explicitly removes it -- **Invite code flow** — creating a group generates a `XXX-XXXX` invite code; any workspace can join by entering the code; any existing group member can accept join requests -- **User mapping scoped per group** — user matching operates per workspace pair within a group; remote users displayed as "Display Name (Workspace Name)" and sorted by normalized name -- **Home tab redesign** — groups displayed as sections with member lists, inline channel syncs, "Publish Channel" button per group (no separate group selection step), and "Leave Group" button -- **Federation integration** — federated connections now create `WorkspaceGroup` and `WorkspaceGroupMember` records (with `federated_workspace_id`) instead of `WorkspacePairing` records -- **Leave group with cleanup** — soft-deletes the membership, removes associated `PostMeta`/`SyncChannel` records, leaves channels, removes user mappings, notifies remaining members, and deletes the group if empty -- **New handler modules** — `handlers/groups.py` (create/join) and `handlers/group_manage.py` (leave) replace `handlers/pairing.py` and `handlers/pairing_manage.py` -- **Removed modules** — `handlers/pairing.py`, `handlers/pairing_manage.py`, `builders/pairing.py` -- **Updated tests** — renamed test classes and methods to group terminology; updated action ID constants - -### 43. Block Kit Shorthand & UI Polish (Completed) -- **Block Kit shorthand** — builders and handlers use `slack.blocks` helpers (`header`, `divider`, `context`, `section`, `button`, `actions`) instead of verbose `orm.*Block` constructors where applicable; `section` alias for section-style blocks in `slack/blocks.py` -- **Parameter shadowing** — in modules that take a `context` (request/Bolt) parameter, the blocks context helper is imported as `block_context` to avoid shadowing (e.g. `builders/home.py`, `builders/user_mapping.py`) -- **Synced Channels display** — Home tab Synced Channels rows no longer show the remote channel link; each row shows the local channel plus bracketed workspace list including the local workspace (e.g. _[Any: Sprocket Dev, Sprocket Dev Beta]_) -- **Deactivated/deleted users** — `UserDirectory` has `deleted_at`; deactivated users are soft-deleted and mappings purged; users no longer in `users.list` are hard-deleted; mapping UI, edit modal, and federation export filter out deleted users -- **Mapped display names** — synced messages in the target workspace use the mapped local user's name and icon when available; otherwise source name/icon with workspace indicator -- **Display name normalization** — `normalize_display_name()` used in user mapping UI and synced message display; user mapping screen shows "Display Name (Workspace)" with normalized names - -### 44. Home and User Mapping Refresh — Performance & Cost (Completed) -- **Content hash** — Home tab and User Mapping Refresh handlers compute a stable hash from minimal DB queries (groups, members, syncs, pending invites; for User Mapping, mapping ids/methods). When the hash matches the last full refresh, the app skips the expensive path (no N× `team_info`, no directory refresh, no full rebuild). -- **Cached built blocks** — After a full refresh, the built Block Kit payload is cached (in-process, keyed by team/user and optionally group for User Mapping). When the hash matches, the app re-publishes that cached view with one `views.publish` instead of re-running all DB and Slack calls. -- **60-second cooldown** — If the user clicks Refresh again within 60 seconds and the hash is unchanged, the app re-publishes the cached view with a context message: "No new data. Wait __ seconds before refreshing again." The displayed seconds are the current remaining time from the last refresh (recomputed on each click). Cooldown constant: `REFRESH_COOLDOWN_SECONDS` (default 60) in `constants.py`. -- **Request-scoped caching** — `get_workspace_by_id(workspace_id, context=None)` and `get_admin_ids(client, team_id=None, context=None)` use the request `context` dict when provided: one DB read per distinct workspace, one `users.list` per distinct team per request. Reduces duplicate lookups when building the Home tab or when multiple workspaces' Home tabs are refreshed in one invocation. -- **Context isolation for cross-workspace refreshes** — When a change in one workspace triggers Home tab refreshes in other group members, `context=None` is passed to `refresh_home_tab_for_workspace` to prevent the acting workspace's request-scoped cache (bot token, admin IDs) from leaking into other workspaces' refresh paths. The acting workspace's own refresh still uses `context=context`. -- **User Mapping Refresh** — Same pattern applied to the User Mapping screen: content hash, cached blocks, 60s cooldown with message, and `build_user_mapping_screen(..., context=..., return_blocks=True)` for caching. Request-scoped `get_workspace_by_id` used when building the screen. - -### 45. Backup, Restore, and Data Migration (Completed) -- **Slack UI** — Home tab has **Backup/Restore** (next to Refresh) and **Data Migration** (in External Connections when federation is enabled). Modals for download backup, restore from JSON, export workspace data, and import migration file; confirmation modals when HMAC or encryption-key/signature checks fail with option to proceed anyway. -- **Full-instance backup** — All tables exported as JSON with `version`, `exported_at`, `encryption_key_hash` (SHA-256 of `TOKEN_ENCRYPTION_KEY`), and HMAC over canonical JSON. Restore inserts in FK order; intended for empty/fresh DB (e.g. after AWS rebuild). On HMAC or encryption-key mismatch, payload stored in cache and confirmation modal pushed; after restore, Home tab caches invalidated for all workspaces. -- **Workspace migration export/import** — Export produces workspace-scoped JSON (syncs, sync channels, post meta, user directory, user mappings) with optional `source_instance` (webhook_url, instance_id, public_key, one-time connection code). Ed25519 signature for tampering detection. Import verifies signature, resolves or creates federated group (using `source_instance` when present), replace mode (remove then create SyncChannels/PostMeta/user_directory/user_mappings), optional tampering confirmation; Home tab and sync-list caches invalidated after import. -- **Instance A detection** — Federated pair request accepts optional `team_id` and `workspace_name`; stored as `primary_team_id` and `primary_workspace_name` on `federated_workspaces`. If a local workspace with that `team_id` exists, it is soft-deleted so the federated connection is the only representation of that workspace on the instance. - -### 46. Code Quality & Documentation Restructure (Completed) -- **Database reset via UI** — Renamed `DANGER_DROP_AND_INIT_DB` (auto-drop on startup) to `ENABLE_DB_RESET` (boolean env var). When enabled, a red "Reset Database" button appears in a "Danger Zone" section at the bottom of the Home tab. Clicking it opens a confirmation modal; confirming drops and reinitializes the database via Alembic, clears all caches, and publishes a confirmation message. No longer runs automatically on startup. -- **Variable naming convention audit** — Standardized variable names across 14 files to align with the domain model: - - `partner` / `p_ws` / `p_ch` / `p_client` → `member_ws` / `sync_channel` / `member_client` (maps to `workspace_group_members` table) - - `sc` (SyncChannel) → `sync_channel`; `ch` (ambiguous) → `sync_channel` or `slack_channel` depending on type - - `pm` → `post_meta` (PostMeta) or `pending_member` (WorkspaceGroupMember) to resolve ambiguity - - `fm` → `fed_member`; `pw` → `pending_ws` or `publisher_ws`; `och` → `other_channel` - - `m` in multi-line loops → `member`, `membership`, or `fed_member` as appropriate - - All log messages and comments updated to match -- **Naming convention established** — `_SCREAMING_CASE` for private module-level constants (true constants set once at import time); `_lowercase` for private functions, mutable state, and implementation-detail values; no-prefix `SCREAMING_CASE` for public constants -- **Cross-workspace context bug fix** — Fixed all handlers that were passing the acting workspace's `context` dict into other group members' Home tab refreshes. The `context` contains workspace-specific state (bot token, admin ID cache) that could contaminate other workspaces' builds. Now `context=None` for all cross-workspace refreshes. -- **README restructured** — Reduced README from ~580 lines to ~220 lines, keeping only install/deploy/run instructions. Moved end-user guide, backup/migration, CI/CD, shared infrastructure, and API reference into `docs/` folder (`USER_GUIDE.md`, `BACKUP_AND_MIGRATION.md`, `DEPLOYMENT.md`, `API_REFERENCE.md`). -- **Documentation consistency** — Updated `IMPROVEMENTS.md` and all doc files to use new domain terminology (group members instead of partners). - -### 47. OAuth on MySQL; Remove Runtime S3 and HEIC (Completed) -- **OAuth in RDS** — Slack OAuth state and installation data are stored in the same MySQL database via `SQLAlchemyInstallationStore` and `SQLAlchemyOAuthStateStore`. One code path for local dev and production; no file-based or S3-backed OAuth stores. -- **No runtime S3** — Removed all runtime S3 usage: OAuth buckets and image bucket resources, Lambda S3 policies, and env vars. Media is uploaded directly to each target Slack channel via `files_upload_v2`. SAM deploy still uses an S3 artifact bucket for packaging only. -- **HEIC and Pillow removed** — HEIC-to-PNG conversion and `upload_photos` (S3) were removed; direct upload is the only media path. Dropped `pillow` and `pillow-heif` from dependencies. -- **Template and docs** — `infra/aws/template.yaml` no longer creates OAuth or image buckets; README, DEPLOYMENT, ARCHITECTURE, USER_GUIDE, `.env.example`, and IMPROVEMENTS updated to describe MySQL OAuth and artifact-bucket-only S3. - -### 48. Infra Contract + Pre-Release DB Abstraction (Completed) -- **Contract rename for clarity** — renamed deployment contract docs to **Infrastructure Contract** (`docs/INFRA_CONTRACT.md`) and updated all references across docs, workflows, and infra comments. -- **Backend-neutral DB runtime contract** — standardized on `DATABASE_BACKEND`, `DATABASE_URL`, and `DATABASE_*` runtime names (`DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA`) across app, tests, infra, and docs. -- **SQLite-capable runtime path** — app startup and reset flows are Alembic-driven and dialect-aware (MySQL + SQLite), with fresh-install assumptions for this pre-release. -- **Token key naming cleanup** — renamed `PASSWORD_ENCRYPT_KEY` to `TOKEN_ENCRYPTION_KEY` everywhere (code, tests, infra, workflows, and docs). -- **Generate-once token key** — cloud deploy paths now generate `TOKEN_ENCRYPTION_KEY` once and persist it in provider secret manager by default. -- **Disaster recovery override** — added explicit key reuse overrides for rebuild scenarios: - - AWS SAM parameter: `TokenEncryptionKeyOverride` - - GCP Terraform variable: `token_encryption_key_override` -- **Admin/operator warning surface** — deploy helper scripts and deployment docs now explicitly warn that losing the token key requires workspace reinstall/re-authorization. - -### 49. PostgreSQL Parallel Backend (Completed) -- **Runtime** — Added `DATABASE_BACKEND=postgresql`, `psycopg2` + `postgresql+psycopg2://` URLs, `DATABASE_PORT` (default 5432), PostgreSQL-safe `CREATE DATABASE`, table drop/reset, and TLS via `sslmode`/`sslrootcert`. -- **AWS SAM** — `DatabaseEngine` parameter (MySQL default, PostgreSQL supported); split `RDSInstanceMysql` / `RDSInstancePostgres`; Lambda env sets `DATABASE_BACKEND`, `DATABASE_PORT`, and `DATABASE_HOST` accordingly. -- **Custom resource** — `infra/aws/db_setup/handler.py` branches on `DatabaseEngine` for MySQL vs PostgreSQL user/database creation. -- **Deploy UX** — `./infra/aws/scripts/deploy.sh` supports both engines with a numbered choice prompt. -- **Docs/tests** — `INFRA_CONTRACT.md`, `DEPLOYMENT.md`, `README.md`, `.env.example` updated; `tests/conftest.py` defaults tests to `mysql` for compatibility; added PostgreSQL pool/required-vars tests and `tests/test_db_setup.py`. - -## Remaining Recommendations - -### Low Priority - -1. **Dependencies** - - Keep dependency pins current with regular lock refreshes and security audits - - Review major-version upgrades for Slack SDK/Bolt and provider tooling on a planned cadence - -2. **Database Migrations** - - Startup now bootstraps schema via Alembic (`alembic upgrade head`) for fresh installs. - - Continue using Alembic revisions for schema changes and add DB integration coverage as schema evolves. - -3. **Advanced Testing** - - Add integration tests for database operations - - Add tests for Slack API interactions (using mocks for full handler flows) - - Add end-to-end sync workflow tests - -## Notes - -- The codebase is organized into focused packages (`handlers/`, `builders/`, `helpers/`, `federation/`, `db/`, `slack/`) with clear separation of concerns -- The routing system using mappers is clean and maintainable -- Database layer benefits from connection pooling, automatic retry with safe disposal, and `SELECT COUNT(*)` for counting -- All Slack API calls have rate-limit handling with exponential backoff -- Error isolation in sync loops ensures partial failures don't cascade -- The pytest suite covers core helper functions, encryption, caching, event parsing, bot filtering, invite codes, DB behavior, and sync creation -- Structured JSON logging with correlation IDs enables fast CloudWatch Logs Insights queries -- Pre-commit hooks enforce consistent code style on every commit -- Admin/owner authorization enforced on all configuration actions with defense-in-depth -- Cross-workspace user matching resolves @mentions persistently with email, name, and manual matching (scoped per group) -- Bot messages from third-party bots are synced with proper attribution; only SyncBot's own messages are filtered -- Workspace Groups support many-to-many collaboration with invite codes, ensuring syncs are only established between explicitly trusted workspaces -- Channel sync supports both direct (1-to-1) and group-wide publish modes -- User matching auto-runs on group join; unmatchable users are filtered; matched users have inline override dropdowns -- Dev Container and Docker Compose configs provide zero-install local development with live editing -- Reactions, images, videos, and GIFs are all synced bidirectionally with proper user attribution -- Individual syncs can be paused, resumed, and stopped with selective history cleanup and publisher-only unpublish -- User profile changes (display name, email) are detected automatically and trigger mapping re-evaluation -- SyncBot self-removes from unconfigured channels with a friendly message -- All foreign key relationships use `ON DELETE CASCADE` for clean data removal -- File downloads are streamed with timeouts and size caps to prevent DoS -- Fernet key derivation is cached for performance; bot identity is resolved in a single API call -- Duplicated code has been consolidated into shared helpers throughout handlers and federation modules -- Home and User Mapping Refresh buttons use content hash, cached blocks, and a 60s cooldown to minimize RDS and Slack API usage when nothing has changed; request-scoped caching keeps builds lightweight, and cross-workspace refreshes use `context=None` to prevent cache contamination -- Variable naming follows a consistent domain-model convention: `member_ws`/`member_client` for group members, `sync_channel` for ORM records, `slack_channel` for raw API dicts -- Schema bootstrap + migration application is automatic at startup via Alembic (`alembic upgrade head`) diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index 9be77b4..baee98f 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -4,7 +4,7 @@ This document defines what any infrastructure provider (AWS, GCP, Azure, etc.) m **Deploy entrypoint:** From the repo root, `./deploy.sh` (macOS/Linux, or Git Bash/WSL bash) or `.\deploy.ps1` (Windows PowerShell — finds Git Bash or WSL, then bash) runs an interactive helper that delegates to `infra//scripts/deploy.sh`. After identity/auth prompts, each provider script shows a **Deploy Tasks** menu (comma-separated numbers, default all): bootstrap (AWS only), build/deploy, CI/CD (GitHub Actions), Slack API configuration, and DR backup secret output—so operators can run subsets (e.g. CI/CD only against an existing stack) without mid-flow surprises. That flow sets Cloud/Terraform resources and runtime env vars consistent with this document. Step-by-step and manual alternatives: [DEPLOYMENT.md](DEPLOYMENT.md). -**Pre-release:** This repo is pre-release. Database rollout assumes **fresh installs only** (no legacy schema migration or stamping). New databases are initialized via Alembic `upgrade head` at startup. +**Schema:** The database schema is managed by **Alembic**. On startup the app runs **`alembic upgrade head`** so new and existing databases stay current with the latest migrations. ## Runtime Environment Variables diff --git a/pyproject.toml b/pyproject.toml index 7ea7a32..9c9d473 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,8 @@ [tool.poetry] name = "syncbot" -version = "0.1.0" -description = "" -authors = ["Klint Van Tassel ", "Evan Petzoldt "] +version = "1.0.0" +description = "Slack app for syncing messages and threads across workspaces." +authors = ["Evan Petzoldt ", "Klint Van Tassel "] readme = "README.md" [tool.poetry.requires-plugins] diff --git a/syncbot/app.py b/syncbot/app.py index ecd80f3..c7916c7 100644 --- a/syncbot/app.py +++ b/syncbot/app.py @@ -17,9 +17,15 @@ import logging import os import re +from importlib.metadata import PackageNotFoundError, version from dotenv import load_dotenv +try: + __version__ = version("syncbot") +except PackageNotFoundError: + __version__ = "dev" + # Load .env before any other app imports so env vars are available everywhere. # In production (Lambda) there is no .env file and this is a harmless no-op. load_dotenv(os.path.join(os.path.dirname(__file__), "..", ".env")) diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py index ad37a9b..b82f768 100644 --- a/syncbot/db/__init__.py +++ b/syncbot/db/__init__.py @@ -200,7 +200,7 @@ def _alembic_config(): def _run_alembic_upgrade() -> None: - """Run Alembic upgrade head (fresh-install flow only; pre-release).""" + """Run Alembic upgrade head to apply pending migrations.""" from alembic import command # pyright: ignore[reportMissingImports] config = _alembic_config() @@ -208,9 +208,9 @@ def _run_alembic_upgrade() -> None: def initialize_database() -> None: - """Initialize schema via Alembic migrations (fresh install only; pre-release). + """Ensure the database exists (MySQL/PostgreSQL) and apply Alembic migrations. - Ensures DB exists (MySQL/PostgreSQL), then runs Alembic upgrade head. + Runs ``alembic upgrade head`` so the schema matches the current revision. """ for attempt in range(1, _DB_INIT_MAX_ATTEMPTS + 1): try: diff --git a/syncbot/slack/orm.py b/syncbot/slack/orm.py index 23d8515..a8c2ed0 100644 --- a/syncbot/slack/orm.py +++ b/syncbot/slack/orm.py @@ -16,8 +16,8 @@ class BaseElement: def make_placeholder_field(self): return {"placeholder": {"type": "plain_text", "text": self.placeholder, "emoji": True}} - def get_selected_value(): - return "Not yet implemented" + def get_selected_value(self, input_data, action): + raise NotImplementedError @dataclass @@ -33,7 +33,7 @@ def as_form_field(self, initial_value=None): raise Exception("Not Implemented") def get_selected_value(self, input_data, action): - return "Not yet implemented" + raise NotImplementedError @dataclass @@ -148,9 +148,6 @@ class StaticSelectElement(BaseElement): initial_value: str = None options: list[SelectorOption] = None - # def with_options(self, options: List[SelectorOption]): - # return SelectorElement(self.label, self.action, options) - def as_form_field(self, action: str): if not self.options: self.options = as_selector_options(["Default"]) From ddc382e3610a8752a70ca7baad8765de3ecce2a6 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 21:42:53 -0500 Subject: [PATCH 32/45] Improvements to GCP deploys. --- Dockerfile | 6 +- docs/INFRA_CONTRACT.md | 6 +- infra/gcp/README.md | 6 +- infra/gcp/main.tf | 3 + syncbot/app.py | 195 ++++++++++++++++++++++------ syncbot/constants.py | 19 ++- syncbot/db/__init__.py | 24 ++-- tests/test_container_http_server.py | 54 ++++++++ 8 files changed, 256 insertions(+), 57 deletions(-) create mode 100644 tests/test_container_http_server.py diff --git a/Dockerfile b/Dockerfile index 7f81e73..dc365f2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,14 +13,14 @@ RUN apt-get update && \ # Install runtime dependencies from pinned requirements. COPY syncbot/requirements.txt /app/requirements.txt RUN python -m pip install --no-cache-dir --upgrade pip && \ - pip install --no-cache-dir -r /app/requirements.txt && \ - pip install --no-cache-dir boto3 + pip install --no-cache-dir -r /app/requirements.txt # Copy application code COPY syncbot/ ./syncbot/ WORKDIR /app/syncbot -EXPOSE 3000 +# Cloud Run sets PORT (default 8080); local dev may use 3000. +EXPOSE 8080 CMD ["python", "app.py"] diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index baee98f..76bf975 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -37,7 +37,7 @@ poetry export --only main --format requirements.txt --without-hashes --output sy | `DATABASE_PASSWORD` | Password. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | | `DATABASE_SCHEMA` | Database name (MySQL) or PostgreSQL database name (same convention as MySQL). Use alphanumeric and underscore only for PostgreSQL when the app must `CREATE DATABASE` at bootstrap. | | `DATABASE_TLS_ENABLED` | Optional TLS toggle (`true`/`false`). Defaults to enabled outside local dev. | -| `DATABASE_SSL_CA_PATH` | Optional CA bundle path when TLS is enabled (default `/etc/pki/tls/certs/ca-bundle.crt`). | +| `DATABASE_SSL_CA_PATH` | Optional CA bundle path when TLS is enabled. If unset, the app uses the first existing file among common OS locations (Amazon Linux, Debian, Alpine); PostgreSQL omits `sslrootcert` when none exist so libpq uses the system trust store. | **SQLite (forks / local):** Set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:///path/to/file.db`. Single-writer; suitable for small teams and dev. @@ -67,6 +67,7 @@ poetry export --only main --format requirements.txt --without-hashes --output sy | `ENABLE_DB_RESET` | When set to a Slack Team ID, enables the Reset Database button for that workspace. | | `LOCAL_DEVELOPMENT` | `true` only for local dev; disables token verification and enables dev shortcuts. | | `LOG_LEVEL` | `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` (default `INFO`). | +| `PORT` | HTTP listen port for container entrypoint (`python app.py` / Cloud Run). Cloud Run injects this (typically `8080`); default `3000` when unset. | | `SOFT_DELETE_RETENTION_DAYS` | Days to retain soft-deleted workspace data (default `30`). | | `SYNCBOT_FEDERATION_ENABLED` | `true` to enable external connections (federation). | | `SYNCBOT_INSTANCE_ID` | UUID for this instance (optional; can be auto-generated). | @@ -80,7 +81,8 @@ The provider must deliver: Slack sends events and interactivity to a single base URL. The app expects: - `POST /slack/events` — events and actions - `GET /slack/install` — OAuth start - - `GET /slack/oauth_redirect` — OAuth callback + - `GET /slack/oauth_redirect` — OAuth callback + - `GET /health` — liveness (JSON `{"status":"ok"}`) for keep-warm probes Any path under `/api/federation` is used for federation when enabled. 2. **Secret injection** diff --git a/infra/gcp/README.md b/infra/gcp/README.md index 8117eac..0d37b14 100644 --- a/infra/gcp/README.md +++ b/infra/gcp/README.md @@ -70,7 +70,11 @@ Use the [GCP bootstrap output script](scripts/print-bootstrap-outputs.sh) to pri ## Keep-warm -If `enable_keep_warm` is `true`, a Cloud Scheduler job pings the service at `/health` on the configured interval. Ensure your app exposes a `/health` endpoint or change the job target in [main.tf](main.tf) to another path (e.g. `/`). +If `enable_keep_warm` is `true`, a Cloud Scheduler job pings the service at `/health` on the configured interval. The app implements `GET /health` (JSON `{"status":"ok"}`). + +## HTTP port + +Cloud Run sets the `PORT` environment variable (default `8080`). The container entrypoint (`python app.py`) listens on `PORT`, falling back to `3000` when unset (local Docker). ## Security diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf index 105ec77..ac76934 100644 --- a/infra/gcp/main.tf +++ b/infra/gcp/main.tf @@ -265,6 +265,9 @@ resource "google_cloud_run_v2_service" "syncbot" { template { service_account = google_service_account.cloud_run.email + # Lambda-like single request per container (free-tier friendly; matches app pool sizing). + max_instance_request_concurrency = 1 + scaling { min_instance_count = var.cloud_run_min_instances max_instance_count = var.cloud_run_max_instances diff --git a/syncbot/app.py b/syncbot/app.py index c7916c7..d69efc3 100644 --- a/syncbot/app.py +++ b/syncbot/app.py @@ -1,7 +1,8 @@ """SyncBot — Slack app that syncs messages across workspaces. This module is the entry point for both AWS Lambda (via :func:`handler`) and -local development (``python app.py`` starts a Bolt dev server on port 3000). +container/local HTTP mode (``python app.py`` / Cloud Run: listens on :envvar:`PORT` +or port 3000 by default). All incoming Slack events, actions, view submissions, and slash commands are dispatched through :func:`main_response`. In production (non-local), view @@ -30,8 +31,13 @@ # In production (Lambda) there is no .env file and this is a harmless no-op. load_dotenv(os.path.join(os.path.dirname(__file__), "..", ".env")) +from http.server import BaseHTTPRequestHandler, HTTPServer + from slack_bolt import App from slack_bolt.adapter.aws_lambda import SlackRequestHandler +from slack_bolt.request import BoltRequest +from slack_bolt.response import BoltResponse +from slack_bolt.util.utils import get_boot_message from constants import ( FEDERATION_ENABLED, @@ -237,50 +243,157 @@ def main_response(body: dict, logger, client, ack, context: dict) -> None: app.view(MATCH_ALL_PATTERN)(ack=view_ack, lazy=[main_response]) -if __name__ == "__main__": - if LOCAL_DEVELOPMENT: - import threading - from http.server import BaseHTTPRequestHandler, HTTPServer - - class FederationHTTPHandler(BaseHTTPRequestHandler): - """Lightweight HTTP handler for federation API endpoints.""" - - def do_GET(self): - if self.path.startswith("/api/federation"): - self._handle_federation("GET") - else: - self.send_error(404) - - def do_POST(self): - if self.path.startswith("/api/federation"): - self._handle_federation("POST") - else: - self.send_error(404) +def _http_listen_port() -> int: + """Port for Bolt container mode (Cloud Run sets ``PORT``; local default 3000).""" + raw = os.environ.get("PORT", "3000").strip() + try: + return int(raw) + except ValueError: + return 3000 - _MAX_BODY = 1_048_576 # 1 MB - def _handle_federation(self, method: str): - try: - content_len = min(int(self.headers.get("Content-Length", 0)), self._MAX_BODY) - except (TypeError, ValueError): - content_len = 0 - body_str = self.rfile.read(content_len).decode() if content_len else "" - headers = {k: v for k, v in self.headers.items()} +def run_syncbot_http_server( + *, + port: int | None = None, + bolt_path: str = "/slack/events", + http_server_logger_enabled: bool = True, +) -> None: + """Start the HTTP server used by Cloud Run and ``python app.py``. - status, resp = dispatch_federation_request(method, self.path, body_str, headers) + Serves Slack (``bolt_path``), OAuth install/callback, ``/health``, and + ``/api/federation/*`` when :data:`~constants.FEDERATION_ENABLED` is true. + Mirrors :class:`slack_bolt.app.app.SlackAppDevelopmentServer` routing with + extra paths for production parity with API Gateway + Lambda. + """ + listen_port = port if port is not None else _http_listen_port() + _bolt_app = app + _bolt_oauth_flow = app.oauth_flow + _bolt_endpoint_path = bolt_path + _fed_enabled = FEDERATION_ENABLED + _http_log = http_server_logger_enabled + _fed_max_body = 1_048_576 # 1 MB + + class SyncBotHTTPHandler(BaseHTTPRequestHandler): + def log_message(self, fmt: str, *args) -> None: + if _http_log: + super().log_message(fmt, *args) + + def _path_no_query(self) -> str: + return self.path.partition("?")[0] + + def _send_raw( + self, + status: int, + headers: dict[str, list[str]], + body: str | bytes = "", + ) -> None: + if isinstance(body, str): + body_bytes = body.encode("utf-8") + else: + body_bytes = body + self.send_response(status) + for k, vs in headers.items(): + for v in vs: + self.send_header(k, v) + self.send_header("Content-Length", str(len(body_bytes))) + self.end_headers() + self.wfile.write(body_bytes) + + def _send_bolt_response(self, bolt_resp: BoltResponse) -> None: + self._send_raw( + status=bolt_resp.status, + headers={k: list(vs) for k, vs in bolt_resp.headers.items()}, + body=bolt_resp.body, + ) - self.send_response(status) - self.send_header("Content-Type", "application/json") - self.end_headers() - self.wfile.write(json.dumps(resp).encode()) + def do_GET(self) -> None: + path = self._path_no_query() + if path == "/health": + self._send_raw( + 200, + {"Content-Type": ["application/json"]}, + json.dumps({"status": "ok"}), + ) + return + if _fed_enabled and path.startswith("/api/federation"): + self._handle_federation("GET") + return + if _bolt_oauth_flow: + query = self.path.partition("?")[2] + if path == _bolt_oauth_flow.install_path: + bolt_req = BoltRequest( + body="", + query=query, + headers=self.headers, + ) + bolt_resp = _bolt_oauth_flow.handle_installation(bolt_req) + self._send_bolt_response(bolt_resp) + return + if path == _bolt_oauth_flow.redirect_uri_path: + bolt_req = BoltRequest( + body="", + query=query, + headers=self.headers, + ) + bolt_resp = _bolt_oauth_flow.handle_callback(bolt_req) + self._send_bolt_response(bolt_resp) + return + self._send_raw(404, {}) + + def do_POST(self) -> None: + path = self._path_no_query() + if _fed_enabled and path.startswith("/api/federation"): + self._handle_federation("POST") + return + if path != _bolt_endpoint_path: + self._send_raw(404, {}) + return + try: + content_len = int(self.headers.get("Content-Length") or 0) + except (TypeError, ValueError): + content_len = 0 + query = self.path.partition("?")[2] + request_body = self.rfile.read(content_len).decode("utf-8") + bolt_req = BoltRequest( + body=request_body, + query=query, + headers=self.headers, + ) + bolt_resp = _bolt_app.dispatch(bolt_req) + self._send_bolt_response(bolt_resp) + + def _handle_federation(self, method: str) -> None: + try: + content_len = min( + int(self.headers.get("Content-Length", 0)), + _fed_max_body, + ) + except (TypeError, ValueError): + content_len = 0 + body_str = self.rfile.read(content_len).decode() if content_len else "" + headers = {k: v for k, v in self.headers.items()} + status, resp = dispatch_federation_request( + method, self._path_no_query(), body_str, headers + ) + self._send_raw( + status, + {"Content-Type": ["application/json"]}, + json.dumps(resp), + ) - def log_message(self, format, *args): - pass + server = HTTPServer(("0.0.0.0", listen_port), SyncBotHTTPHandler) + if _bolt_app.logger.level > logging.INFO: + print(get_boot_message(development_server=True)) + else: + _bolt_app.logger.info( + "http_server_started", + extra={"port": listen_port, "bolt_path": bolt_path}, + ) + try: + server.serve_forever(0.05) + finally: + server.server_close() - if FEDERATION_ENABLED: - fed_server = HTTPServer(("0.0.0.0", 3001), FederationHTTPHandler) - fed_thread = threading.Thread(target=fed_server.serve_forever, daemon=True) - fed_thread.start() - _logger.info("Federation API server started on port 3001") - app.start(3000) +if __name__ == "__main__": + run_syncbot_http_server(http_server_logger_enabled=LOCAL_DEVELOPMENT) diff --git a/syncbot/constants.py b/syncbot/constants.py index 29394e9..826ba65 100644 --- a/syncbot/constants.py +++ b/syncbot/constants.py @@ -127,8 +127,23 @@ def database_tls_enabled() -> bool: def database_ssl_ca_path() -> str: - """Return optional CA bundle path for DB TLS verification.""" - return os.environ.get(DATABASE_SSL_CA_PATH, "/etc/pki/tls/certs/ca-bundle.crt") + """Return CA bundle path for DB TLS verification, or empty string for system defaults. + + If :envvar:`DATABASE_SSL_CA_PATH` is set, that path is returned as-is (caller may + verify it exists). Otherwise the first existing file among common OS locations + is used (Amazon Linux, Debian, Alpine). + """ + explicit = os.environ.get(DATABASE_SSL_CA_PATH, "").strip() + if explicit: + return explicit + for candidate in ( + "/etc/pki/tls/certs/ca-bundle.crt", # RHEL / Amazon Linux / Lambda + "/etc/ssl/certs/ca-certificates.crt", # Debian / Ubuntu / Cloud Run image + "/etc/ssl/cert.pem", # Alpine / macOS + ): + if os.path.isfile(candidate): + return candidate + return "" def get_required_db_vars() -> list: diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py index b82f768..b9cf451 100644 --- a/syncbot/db/__init__.py +++ b/syncbot/db/__init__.py @@ -73,9 +73,12 @@ def _build_mysql_url(include_schema: bool = False) -> tuple[str, dict]: connect_args: dict = {} if constants.database_tls_enabled(): ca_path = constants.database_ssl_ca_path() - try: - ssl_ctx = ssl.create_default_context(cafile=ca_path) - except (OSError, ssl.SSLError): + if ca_path: + try: + ssl_ctx = ssl.create_default_context(cafile=ca_path) + except (OSError, ssl.SSLError): + ssl_ctx = ssl.create_default_context() + else: ssl_ctx = ssl.create_default_context() connect_args["ssl"] = ssl_ctx return db_url, connect_args @@ -95,7 +98,8 @@ def _build_postgresql_url(include_schema: bool = False) -> tuple[str, dict]: if constants.database_tls_enabled(): ca_path = constants.database_ssl_ca_path() connect_args["sslmode"] = "verify-full" - connect_args["sslrootcert"] = ca_path + if ca_path and os.path.isfile(ca_path): + connect_args["sslrootcert"] = ca_path return db_url, connect_args @@ -107,14 +111,18 @@ def _network_sql_connect_args_from_url() -> dict: backend = constants.get_database_backend() ca_path = constants.database_ssl_ca_path() if backend == "mysql": - try: - ssl_ctx = ssl.create_default_context(cafile=ca_path) - except (OSError, ssl.SSLError): + if ca_path: + try: + ssl_ctx = ssl.create_default_context(cafile=ca_path) + except (OSError, ssl.SSLError): + ssl_ctx = ssl.create_default_context() + else: ssl_ctx = ssl.create_default_context() connect_args["ssl"] = ssl_ctx elif backend == "postgresql": connect_args["sslmode"] = "verify-full" - connect_args["sslrootcert"] = ca_path + if ca_path and os.path.isfile(ca_path): + connect_args["sslrootcert"] = ca_path return connect_args diff --git a/tests/test_container_http_server.py b/tests/test_container_http_server.py new file mode 100644 index 0000000..6de9b22 --- /dev/null +++ b/tests/test_container_http_server.py @@ -0,0 +1,54 @@ +"""Tests for Cloud Run / container HTTP server helpers in ``app``.""" + +import json +import os +import socket +import threading +import time +import urllib.error +import urllib.request +from unittest.mock import patch + +import pytest + + +def test_http_listen_port_from_env() -> None: + from app import _http_listen_port + + with patch.dict(os.environ, {"PORT": "8080"}): + assert _http_listen_port() == 8080 + + +def test_http_listen_port_invalid_falls_back() -> None: + from app import _http_listen_port + + with patch.dict(os.environ, {"PORT": "nope"}): + assert _http_listen_port() == 3000 + + +def test_health_endpoint_on_container_server() -> None: + """GET ``/health`` returns 200 and JSON (same server path as Cloud Run).""" + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.bind(("127.0.0.1", 0)) + port = sock.getsockname()[1] + sock.close() + + def serve() -> None: + from app import run_syncbot_http_server + + run_syncbot_http_server(port=port, http_server_logger_enabled=False) + + threading.Thread(target=serve, daemon=True).start() + + url = f"http://127.0.0.1:{port}/health" + last_err: BaseException | None = None + for _ in range(100): + try: + with urllib.request.urlopen(url, timeout=0.3) as r: + assert r.status == 200 + assert json.loads(r.read().decode()) == {"status": "ok"} + return + except (urllib.error.URLError, OSError) as e: + last_err = e + time.sleep(0.05) + pytest.fail(f"/health never became ready: {last_err!r}") From 079b53523ecee264b96a74f7aa9db6783cec6f53 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 22:25:02 -0500 Subject: [PATCH 33/45] Fix to syncing messages with photos. --- syncbot/handlers/channel_sync.py | 25 ++++++++ syncbot/handlers/messages.py | 72 ++++++++++++++++++++-- syncbot/handlers/sync.py | 22 +++++++ syncbot/helpers/slack_api.py | 10 ++- syncbot/helpers/workspace.py | 11 ++++ tests/test_channel_sync_handlers.py | 22 +++++++ tests/test_file_upload_attribution.py | 69 +++++++++++++++++++++ tests/test_message_sync.py | 87 +++++++++++++++++++++++++++ 8 files changed, 313 insertions(+), 5 deletions(-) create mode 100644 tests/test_file_upload_attribution.py create mode 100644 tests/test_message_sync.py diff --git a/syncbot/handlers/channel_sync.py b/syncbot/handlers/channel_sync.py index fefa1f8..d9fe8b4 100644 --- a/syncbot/handlers/channel_sync.py +++ b/syncbot/handlers/channel_sync.py @@ -766,6 +766,31 @@ def handle_subscribe_channel_submit( return group_id = sync_record.group_id + + existing_sub = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync_id, + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.status == "active", + ], + ) + if existing_sub: + _logger.info( + "subscribe_channel_duplicate_skip", + extra={ + "sync_id": sync_id, + "channel_id": channel_id, + "workspace_id": workspace_record.id, + }, + ) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + if group_id: + _refresh_group_member_homes(group_id, workspace_record.id, logger, context=context) + return + acting_user_id = helpers.safe_get(body, "user", "id") or user_id admin_name, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) diff --git a/syncbot/handlers/messages.py b/syncbot/handlers/messages.py index fb83185..b893794 100644 --- a/syncbot/handlers/messages.py +++ b/syncbot/handlers/messages.py @@ -26,6 +26,32 @@ def _find_source_workspace_id(records: list[tuple], channel_id: str, ws_index: i _logger = logging.getLogger(__name__) +def _shared_by_file_initial_comment( + *, + user_id: str, + source_workspace_id: int, + target_workspace_id: int, + name_for_target: str, + target_client: WebClient, + channel_id: str, + text_message_ts: str | None, +) -> str: + """Build ``initial_comment`` for ``files_upload_v2`` (mention + optional permalink to text).""" + mapped_id = helpers.get_mapped_target_user_id(user_id or "", source_workspace_id or 0, target_workspace_id) + user_ref = f"<@{mapped_id}>" if mapped_id else name_for_target + if not text_message_ts: + return f"Shared by {user_ref}" + permalink = None + try: + plink_resp = target_client.chat_getPermalink(channel=channel_id, message_ts=text_message_ts) + permalink = helpers.safe_get(plink_resp, "permalink") + except Exception: + pass + if permalink: + return f"Shared by {user_ref} in <{permalink}|this message>" + return f"Shared by {user_ref}" + + def _parse_event_fields(body: dict, client: WebClient) -> EventContext: """Extract the common fields every message handler needs.""" event: dict = body.get("event", {}) @@ -164,7 +190,6 @@ def _handle_new_post( user_name, user_profile_url = helpers.get_bot_info_from_event(body) workspace_name = _get_workspace_name(sync_records, channel_id, workspace_index=1) - posted_from = f"({workspace_name})" if workspace_name else "(via SyncBot)" post_uuid = uuid.uuid4().hex post_list: list[schemas.PostMeta] = [] @@ -229,11 +254,20 @@ def _handle_new_post( name_for_target = target_display_name or user_name or "Someone" if direct_files and not msg_text.strip(): + file_comment = _shared_by_file_initial_comment( + user_id=user_id or "", + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + name_for_target=name_for_target, + target_client=target_client, + channel_id=sync_channel.channel_id, + text_message_ts=None, + ) _, file_ts = helpers.upload_files_to_slack( bot_token=bot_token, channel_id=sync_channel.channel_id, files=direct_files, - initial_comment=f"Shared by {name_for_target} {posted_from}", + initial_comment=file_comment, ) ts = file_ts or helpers.safe_get(body, "event", "ts") else: @@ -249,11 +283,22 @@ def _handle_new_post( ts = helpers.safe_get(res, "ts") or helpers.safe_get(body, "event", "ts") if direct_files: + text_ts = str(ts) if ts else None + file_comment = _shared_by_file_initial_comment( + user_id=user_id or "", + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + name_for_target=name_for_target, + target_client=target_client, + channel_id=sync_channel.channel_id, + text_message_ts=text_ts, + ) helpers.upload_files_to_slack( bot_token=bot_token, channel_id=sync_channel.channel_id, files=direct_files, thread_ts=ts, + initial_comment=file_comment, ) if ts: @@ -293,7 +338,6 @@ def _handle_thread_reply( return workspace_name = _get_workspace_name(post_records, channel_id, workspace_index=2) - posted_from = f"({workspace_name})" if workspace_name else "(via SyncBot)" if user_id: user_name, user_profile_url = helpers.get_user_info(client, user_id) @@ -357,11 +401,20 @@ def _handle_thread_reply( name_for_target = target_display_name or user_name or "Someone" if direct_files and not msg_text.strip(): + file_comment = _shared_by_file_initial_comment( + user_id=user_id or "", + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + name_for_target=name_for_target, + target_client=target_client, + channel_id=sync_channel.channel_id, + text_message_ts=None, + ) _, file_ts = helpers.upload_files_to_slack( bot_token=bot_token, channel_id=sync_channel.channel_id, files=direct_files, - initial_comment=f"Shared by {name_for_target} {posted_from}", + initial_comment=file_comment, thread_ts=parent_ts, ) ts = file_ts or helpers.safe_get(body, "event", "ts") @@ -379,11 +432,22 @@ def _handle_thread_reply( ts = helpers.safe_get(res, "ts") if direct_files: + text_ts = str(ts) if ts else None + file_comment = _shared_by_file_initial_comment( + user_id=user_id or "", + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + name_for_target=name_for_target, + target_client=target_client, + channel_id=sync_channel.channel_id, + text_message_ts=text_ts, + ) helpers.upload_files_to_slack( bot_token=bot_token, channel_id=sync_channel.channel_id, files=direct_files, thread_ts=parent_ts, + initial_comment=file_comment, ) if ts: diff --git a/syncbot/handlers/sync.py b/syncbot/handlers/sync.py index d33e00a..c63eb9d 100644 --- a/syncbot/handlers/sync.py +++ b/syncbot/handlers/sync.py @@ -206,6 +206,28 @@ def handle_join_sync_submission( logger.warning("Rejected join-sync: workspace or sync record not found") return + existing_join = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync_id, + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.status == "active", + ], + ) + if existing_join: + _logger.info( + "join_sync_duplicate_skip", + extra={ + "sync_id": sync_id, + "channel_id": channel_id, + "workspace_id": workspace_record.id, + }, + ) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + return + acting_user_id = helpers.safe_get(body, "user", "id") or user_id admin_name, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) diff --git a/syncbot/helpers/slack_api.py b/syncbot/helpers/slack_api.py index 68d11d4..20d33ed 100644 --- a/syncbot/helpers/slack_api.py +++ b/syncbot/helpers/slack_api.py @@ -179,7 +179,15 @@ def get_post_records(thread_ts: str) -> list[tuple[schemas.PostMeta, schemas.Syn ) else: post_records = [] - return post_records + + seen: set[tuple[int, str]] = set() + deduped: list[tuple[schemas.PostMeta, schemas.SyncChannel, schemas.Workspace]] = [] + for pm, sc, ws in post_records: + key = (ws.id, sc.channel_id) + if key not in seen: + seen.add(key) + deduped.append((pm, sc, ws)) + return deduped @slack_retry diff --git a/syncbot/helpers/workspace.py b/syncbot/helpers/workspace.py index b37dd31..f64a234 100644 --- a/syncbot/helpers/workspace.py +++ b/syncbot/helpers/workspace.py @@ -40,6 +40,17 @@ def get_sync_list(team_id: str, channel_id: str) -> list[tuple[schemas.SyncChann else: sync_channels = [] + # One logical target per (workspace, Slack channel): duplicate SyncChannel rows + # (e.g. double-submit on join/subscribe) would otherwise post the same message N times. + seen: set[tuple[int, str]] = set() + deduped: list[tuple[schemas.SyncChannel, schemas.Workspace]] = [] + for sc, ws in sync_channels: + key = (ws.id, sc.channel_id) + if key not in seen: + seen.add(key) + deduped.append((sc, ws)) + sync_channels = deduped + _cache_set(cache_key, sync_channels) return sync_channels diff --git a/tests/test_channel_sync_handlers.py b/tests/test_channel_sync_handlers.py index f70c2c6..6f9747e 100644 --- a/tests/test_channel_sync_handlers.py +++ b/tests/test_channel_sync_handlers.py @@ -121,3 +121,25 @@ def test_missing_channel_selection_exits_early(self): handle_subscribe_channel_submit({}, client, logger, context) create_record.assert_not_called() + + def test_duplicate_channel_skips_join_and_create(self): + client = MagicMock() + logger = MagicMock() + context = {} + workspace = SimpleNamespace(id=10) + sync_record = SimpleNamespace(group_id=None) + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={"sync_id": 55}), + patch("handlers.channel_sync._get_selected_conversation_or_option", return_value="Cdup"), + patch("handlers.channel_sync.DbManager.get_record", return_value=sync_record), + patch("handlers.channel_sync.DbManager.find_records", return_value=[object()]), + patch("handlers.channel_sync.DbManager.create_record") as create_record, + patch("handlers.channel_sync.builders.refresh_home_tab_for_workspace") as refresh_home, + ): + handle_subscribe_channel_submit({"user": {"id": "U1"}}, client, logger, context) + + create_record.assert_not_called() + client.conversations_join.assert_not_called() + refresh_home.assert_called_once() diff --git a/tests/test_file_upload_attribution.py b/tests/test_file_upload_attribution.py new file mode 100644 index 0000000..aca9d27 --- /dev/null +++ b/tests/test_file_upload_attribution.py @@ -0,0 +1,69 @@ +"""Tests for threaded file upload ``initial_comment`` (mentions + permalink).""" + +from unittest.mock import MagicMock, patch + +from slack_sdk.web import WebClient + +from handlers.messages import _shared_by_file_initial_comment + + +class TestSharedByFileInitialComment: + def test_file_only_uses_mention_when_mapped(self): + client = MagicMock(spec=WebClient) + with patch("handlers.messages.helpers.get_mapped_target_user_id", return_value="UMAPPED"): + text = _shared_by_file_initial_comment( + user_id="U_SRC", + source_workspace_id=1, + target_workspace_id=2, + name_for_target="Nacho", + target_client=client, + channel_id="C1", + text_message_ts=None, + ) + assert text == "Shared by <@UMAPPED>" + client.chat_getPermalink.assert_not_called() + + def test_file_only_falls_back_to_display_name(self): + client = MagicMock(spec=WebClient) + with patch("handlers.messages.helpers.get_mapped_target_user_id", return_value=None): + text = _shared_by_file_initial_comment( + user_id="U_SRC", + source_workspace_id=1, + target_workspace_id=2, + name_for_target="Nacho", + target_client=client, + channel_id="C1", + text_message_ts=None, + ) + assert text == "Shared by Nacho" + + def test_with_text_message_includes_permalink_link(self): + client = MagicMock(spec=WebClient) + client.chat_getPermalink.return_value = {"permalink": "https://example.slack.com/archives/C1/p123"} + with patch("handlers.messages.helpers.get_mapped_target_user_id", return_value="U99"): + text = _shared_by_file_initial_comment( + user_id="U_SRC", + source_workspace_id=1, + target_workspace_id=2, + name_for_target="Nacho", + target_client=client, + channel_id="C1", + text_message_ts="1234.567890", + ) + assert text == "Shared by <@U99> in " + client.chat_getPermalink.assert_called_once_with(channel="C1", message_ts="1234.567890") + + def test_permalink_failure_falls_back_to_shared_by_only(self): + client = MagicMock(spec=WebClient) + client.chat_getPermalink.side_effect = RuntimeError("api error") + with patch("handlers.messages.helpers.get_mapped_target_user_id", return_value=None): + text = _shared_by_file_initial_comment( + user_id="U_SRC", + source_workspace_id=1, + target_workspace_id=2, + name_for_target="Pat", + target_client=client, + channel_id="C1", + text_message_ts="1.0", + ) + assert text == "Shared by Pat" diff --git a/tests/test_message_sync.py b/tests/test_message_sync.py new file mode 100644 index 0000000..f8d53cf --- /dev/null +++ b/tests/test_message_sync.py @@ -0,0 +1,87 @@ +"""Tests for sync list / post record deduplication and join-sync duplicate guard.""" + +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +from handlers.sync import handle_join_sync_submission +from helpers.slack_api import get_post_records +from helpers.workspace import get_sync_list +from slack import actions + + +class TestGetSyncListDeduplication: + def test_deduplicates_same_workspace_and_channel(self): + ws = SimpleNamespace(id=42, team_id="T1", workspace_name="WS") + sc_source = SimpleNamespace(id=1, sync_id=7, channel_id="Csource") + sc_dup_a = SimpleNamespace(id=2, sync_id=7, channel_id="C999") + sc_dup_b = SimpleNamespace(id=3, sync_id=7, channel_id="C999") + + with ( + patch("helpers.workspace._cache_get", return_value=None), + patch("helpers.workspace._cache_set") as cache_set, + patch("helpers.workspace.DbManager.find_records", return_value=[sc_source]), + patch( + "helpers.workspace.DbManager.find_join_records2", + return_value=[(sc_dup_a, ws), (sc_dup_b, ws)], + ), + ): + result = get_sync_list("T1", "Csource") + + assert len(result) == 1 + assert result[0][0] is sc_dup_a + assert result[0][1] is ws # first wins among duplicates + cache_set.assert_called_once() + + +class TestGetPostRecordsDeduplication: + def test_deduplicates_same_workspace_and_channel(self): + pm = SimpleNamespace(post_id="p1", ts=123.456789) + ws = SimpleNamespace(id=42) + sc_a = SimpleNamespace(id=10, channel_id="C777") + sc_b = SimpleNamespace(id=11, channel_id="C777") + + with ( + patch("helpers.slack_api.DbManager.find_records", return_value=[pm]), + patch( + "helpers.slack_api.DbManager.find_join_records3", + return_value=[(pm, sc_a, ws), (pm, sc_b, ws)], + ), + ): + result = get_post_records("123.456789") + + assert len(result) == 1 + assert result[0][1] is sc_a + + +class TestJoinSyncDuplicateSkip: + def test_duplicate_channel_skips_join_and_create(self): + client = MagicMock() + logger = MagicMock() + context = {} + workspace = SimpleNamespace(id=10, team_id="T1") + sync_record = SimpleNamespace(id=5, title="Other") + + body = { + "user": {"id": "Uadmin"}, + "view": {"team_id": "T1", "state": {"values": {}}}, + } + form_values = { + actions.CONFIG_JOIN_SYNC_SELECT: 5, + actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT: "Cdup", + } + + with ( + patch("handlers.sync.helpers.get_user_id_from_body", return_value="Uadmin"), + patch("handlers.sync.helpers.is_user_authorized", return_value=True), + patch("handlers.sync.forms.JOIN_SYNC_FORM.get_selected_values", return_value=form_values), + patch("handlers.sync.DbManager.get_record", side_effect=[workspace, sync_record]), + patch("handlers.sync.DbManager.find_records", return_value=[object()]), + patch("handlers.sync.DbManager.create_record") as create_record, + patch("handlers.sync.helpers.format_admin_label", return_value=("Admin", "Admin")), + patch("handlers.sync.builders.refresh_home_tab_for_workspace") as refresh_home, + ): + handle_join_sync_submission(body, client, logger, context) + + create_record.assert_not_called() + client.conversations_join.assert_not_called() + refresh_home.assert_called_once() From 00501aa9a97b017b82b9d1d0bd5b0bd7589303a3 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 22:44:06 -0500 Subject: [PATCH 34/45] Fix to syncing messages with photos. Deeper fix because bug was still occurring. --- syncbot/app.py | 14 ++++ syncbot/handlers/messages.py | 43 +++++++++++- tests/test_message_event_dedup.py | 109 ++++++++++++++++++++++++++++++ 3 files changed, 165 insertions(+), 1 deletion(-) create mode 100644 tests/test_message_event_dedup.py diff --git a/syncbot/app.py b/syncbot/app.py index d69efc3..b0d234a 100644 --- a/syncbot/app.py +++ b/syncbot/app.py @@ -89,6 +89,20 @@ def _redact_sensitive(obj, _depth=0): ) +@app.middleware +def _capture_slack_retry_num(req, resp, next): + """Expose ``X-Slack-Retry-Num`` on context so message handlers can drop retries.""" + headers = getattr(req, "headers", None) or {} + vals = headers.get("x-slack-retry-num") + if vals: + try: + v = vals[0] if isinstance(vals, (list, tuple)) else vals + req.context["slack_retry_num"] = int(v) + except (ValueError, TypeError, IndexError): + pass + return next() + + def handler(event: dict, context: dict) -> dict: """AWS Lambda entry point. diff --git a/syncbot/handlers/messages.py b/syncbot/handlers/messages.py index b893794..2c6671c 100644 --- a/syncbot/handlers/messages.py +++ b/syncbot/handlers/messages.py @@ -729,6 +729,25 @@ def _is_own_bot_message(body: dict, client: WebClient, context: dict) -> bool: return event_bot_id == own_bot_id +def _should_skip_slack_event_retry(body: dict, context: dict) -> bool: + """Return True if Slack delivered this event as a retry (duplicate work).""" + rn = context.get("slack_retry_num") + if rn is not None: + try: + if int(rn) >= 1: + return True + except (TypeError, ValueError): + pass + ra = helpers.safe_get(body, "retry_attempt") + if ra is not None: + try: + if int(ra) >= 1: + return True + except (TypeError, ValueError): + pass + return False + + def respond_to_message_event( body: dict, client: WebClient, @@ -748,11 +767,33 @@ def respond_to_message_event( if _is_own_bot_message(body, client, context): return + if _should_skip_slack_event_retry(body, context): + _logger.info( + "skipping_slack_event_retry", + extra={ + "slack_retry_num": context.get("slack_retry_num"), + "retry_attempt": helpers.safe_get(body, "retry_attempt"), + }, + ) + return + + # Slack sends a plain message event and then a file_share for the same post; process only file_share + # so we do not sync twice (and avoid downloading files twice). + event_has_files = bool( + helpers.safe_get(body, "event", "files") or helpers.safe_get(body, "event", "message", "files") + ) + if not event_subtype and event_has_files: + _logger.debug( + "skip_message_pending_file_share", + extra={"channel": helpers.safe_get(body, "event", "channel")}, + ) + return + photo_list, photo_blocks, direct_files = _build_file_context(body, client, logger) has_files = bool(photo_blocks or direct_files) if ( - (not event_subtype) + (not event_subtype and not event_has_files) or event_subtype == "bot_message" or (event_subtype == "file_share" and (ctx["msg_text"] != "" or has_files)) ): diff --git a/tests/test_message_event_dedup.py b/tests/test_message_event_dedup.py new file mode 100644 index 0000000..94d5894 --- /dev/null +++ b/tests/test_message_event_dedup.py @@ -0,0 +1,109 @@ +"""Tests for message event deduplication (file_share vs plain message, Slack retries).""" + +import os +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from handlers.messages import ( # noqa: E402 + _should_skip_slack_event_retry, + respond_to_message_event, +) + + +class TestShouldSkipSlackEventRetry: + def test_skips_when_context_slack_retry_num_ge_1(self): + assert _should_skip_slack_event_retry({}, {"slack_retry_num": 1}) is True + + def test_no_skip_when_slack_retry_num_zero(self): + assert _should_skip_slack_event_retry({}, {"slack_retry_num": 0}) is False + + def test_skips_when_body_retry_attempt_ge_1(self): + assert _should_skip_slack_event_retry({"retry_attempt": 1}, {}) is True + + def test_no_skip_first_delivery(self): + assert _should_skip_slack_event_retry({}, {}) is False + + +class TestRespondToMessageEventDedup: + def _base_body(self): + return { + "team_id": "T001", + "event": { + "type": "message", + "channel": "C001", + "user": "U001", + "text": "Hello", + "ts": "1234567890.000001", + }, + } + + def test_text_only_no_subtype_still_calls_new_post(self): + client = MagicMock() + logger = MagicMock() + context = {} + + with ( + patch("handlers.messages._is_own_bot_message", return_value=False), + patch("handlers.messages._handle_new_post") as mock_new, + patch("handlers.messages._build_file_context", return_value=([], [], [])), + ): + respond_to_message_event(self._base_body(), client, logger, context) + + mock_new.assert_called_once() + + def test_no_subtype_with_files_skips_without_building_file_context(self): + body = self._base_body() + body["event"]["files"] = [{"id": "F1", "mimetype": "image/jpeg"}] + + client = MagicMock() + logger = MagicMock() + context = {} + + with ( + patch("handlers.messages._is_own_bot_message", return_value=False), + patch("handlers.messages._handle_new_post") as mock_new, + patch("handlers.messages._build_file_context") as build_fc, + ): + respond_to_message_event(body, client, logger, context) + + mock_new.assert_not_called() + build_fc.assert_not_called() + + def test_file_share_subtype_still_calls_new_post(self): + body = self._base_body() + body["event"]["subtype"] = "file_share" + body["event"]["files"] = [{"id": "F1", "mimetype": "image/jpeg"}] + + client = MagicMock() + logger = MagicMock() + context = {} + + with ( + patch("handlers.messages._is_own_bot_message", return_value=False), + patch("handlers.messages._handle_new_post") as mock_new, + patch("handlers.messages._build_file_context", return_value=([], [], [{"path": "/tmp/x", "name": "x.jpg", "mimetype": "image/jpeg"}])), + ): + respond_to_message_event(body, client, logger, context) + + mock_new.assert_called_once() + assert mock_new.call_args is not None + + def test_retry_skips_handler(self): + client = MagicMock() + logger = MagicMock() + context = {"slack_retry_num": 1} + + with ( + patch("handlers.messages._is_own_bot_message", return_value=False), + patch("handlers.messages._handle_new_post") as mock_new, + patch("handlers.messages._build_file_context") as build_fc, + ): + respond_to_message_event(self._base_body(), client, logger, context) + + mock_new.assert_not_called() + build_fc.assert_not_called() From 06a4f9d0ae72e9294a0e59eff8f1d3d342d83c6e Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 23:09:54 -0500 Subject: [PATCH 35/45] Fix for split text + media reactions. --- syncbot/handlers/messages.py | 24 +++++- syncbot/helpers/slack_api.py | 2 + tests/test_message_sync.py | 22 ++++- tests/test_split_message_reactions.py | 118 ++++++++++++++++++++++++++ 4 files changed, 161 insertions(+), 5 deletions(-) create mode 100644 tests/test_split_message_reactions.py diff --git a/syncbot/handlers/messages.py b/syncbot/handlers/messages.py index 2c6671c..cf2d86d 100644 --- a/syncbot/handlers/messages.py +++ b/syncbot/handlers/messages.py @@ -193,6 +193,7 @@ def _handle_new_post( post_uuid = uuid.uuid4().hex post_list: list[schemas.PostMeta] = [] + channels_synced = 0 source_workspace_id = _find_source_workspace_id(sync_records, channel_id) @@ -202,6 +203,7 @@ def _handle_new_post( for sync_channel, workspace in sync_records: try: + split_file_ts: str | None = None if sync_channel.channel_id == channel_id: ts = helpers.safe_get(body, "event", "ts") elif fed_ws and workspace.id != source_workspace_id: @@ -293,7 +295,7 @@ def _handle_new_post( channel_id=sync_channel.channel_id, text_message_ts=text_ts, ) - helpers.upload_files_to_slack( + _, split_file_ts = helpers.upload_files_to_slack( bot_token=bot_token, channel_id=sync_channel.channel_id, files=direct_files, @@ -303,10 +305,16 @@ def _handle_new_post( if ts: post_list.append(schemas.PostMeta(post_id=post_uuid, sync_channel_id=sync_channel.id, ts=float(ts))) + if split_file_ts: + post_list.append( + schemas.PostMeta(post_id=post_uuid, sync_channel_id=sync_channel.id, ts=float(split_file_ts)) + ) + if ts or split_file_ts: + channels_synced += 1 except Exception as exc: _logger.error(f"Failed to sync new post to channel {sync_channel.channel_id}: {exc}") - synced = len(post_list) + synced = channels_synced failed = len(sync_records) - synced emit_metric("messages_synced", value=synced, sync_type="new_post") if failed: @@ -346,6 +354,7 @@ def _handle_thread_reply( post_uuid = uuid.uuid4().hex post_list: list[schemas.PostMeta] = [] + channels_synced = 0 source_workspace_id = _find_source_workspace_id(post_records, channel_id, ws_index=2) @@ -357,6 +366,7 @@ def _handle_thread_reply( for post_meta, sync_channel, workspace in post_records: try: + split_file_ts: str | None = None if sync_channel.channel_id == channel_id: ts = helpers.safe_get(body, "event", "ts") elif fed_ws and workspace.id != source_workspace_id: @@ -442,7 +452,7 @@ def _handle_thread_reply( channel_id=sync_channel.channel_id, text_message_ts=text_ts, ) - helpers.upload_files_to_slack( + _, split_file_ts = helpers.upload_files_to_slack( bot_token=bot_token, channel_id=sync_channel.channel_id, files=direct_files, @@ -452,10 +462,16 @@ def _handle_thread_reply( if ts: post_list.append(schemas.PostMeta(post_id=post_uuid, sync_channel_id=sync_channel.id, ts=float(ts))) + if split_file_ts: + post_list.append( + schemas.PostMeta(post_id=post_uuid, sync_channel_id=sync_channel.id, ts=float(split_file_ts)) + ) + if ts or split_file_ts: + channels_synced += 1 except Exception as exc: _logger.error(f"Failed to sync thread reply to channel {sync_channel.channel_id}: {exc}") - synced = len(post_list) + synced = channels_synced failed = len(post_records) - synced emit_metric("messages_synced", value=synced, sync_type="thread_reply") if failed: diff --git a/syncbot/helpers/slack_api.py b/syncbot/helpers/slack_api.py index 20d33ed..072e5df 100644 --- a/syncbot/helpers/slack_api.py +++ b/syncbot/helpers/slack_api.py @@ -180,6 +180,8 @@ def get_post_records(thread_ts: str) -> list[tuple[schemas.PostMeta, schemas.Syn else: post_records = [] + post_records.sort(key=lambda row: row[0].id) + seen: set[tuple[int, str]] = set() deduped: list[tuple[schemas.PostMeta, schemas.SyncChannel, schemas.Workspace]] = [] for pm, sc, ws in post_records: diff --git a/tests/test_message_sync.py b/tests/test_message_sync.py index f8d53cf..3f92d11 100644 --- a/tests/test_message_sync.py +++ b/tests/test_message_sync.py @@ -35,7 +35,7 @@ def test_deduplicates_same_workspace_and_channel(self): class TestGetPostRecordsDeduplication: def test_deduplicates_same_workspace_and_channel(self): - pm = SimpleNamespace(post_id="p1", ts=123.456789) + pm = SimpleNamespace(id=1, post_id="p1", ts=123.456789) ws = SimpleNamespace(id=42) sc_a = SimpleNamespace(id=10, channel_id="C777") sc_b = SimpleNamespace(id=11, channel_id="C777") @@ -52,6 +52,26 @@ def test_deduplicates_same_workspace_and_channel(self): assert len(result) == 1 assert result[0][1] is sc_a + def test_dedup_prefers_lower_post_meta_id_for_split_file_alias(self): + """Reactions on file thread replies share post_id; primary text row must win.""" + pm_file = SimpleNamespace(id=99, post_id="p1", ts=888.888) + pm_text = SimpleNamespace(id=10, post_id="p1", ts=111.111) + ws = SimpleNamespace(id=42) + sc = SimpleNamespace(id=10, channel_id="C777") + + with ( + patch("helpers.slack_api.DbManager.find_records", return_value=[pm_file]), + patch( + "helpers.slack_api.DbManager.find_join_records3", + return_value=[(pm_file, sc, ws), (pm_text, sc, ws)], + ), + ): + result = get_post_records("888.888") + + assert len(result) == 1 + assert result[0][0].id == 10 + assert result[0][0].ts == 111.111 + class TestJoinSyncDuplicateSkip: def test_duplicate_channel_skips_join_and_create(self): diff --git a/tests/test_split_message_reactions.py b/tests/test_split_message_reactions.py new file mode 100644 index 0000000..44a21ff --- /dev/null +++ b/tests/test_split_message_reactions.py @@ -0,0 +1,118 @@ +"""Tests for PostMeta rows on split text+file sync (reaction resolution).""" + +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +from slack_sdk.web import WebClient + +from handlers.messages import _handle_new_post, _handle_thread_reply + + +class TestSplitMessagePostMeta: + def test_new_post_text_plus_file_stores_file_ts_same_post_id(self): + logger = MagicMock() + client = MagicMock(spec=WebClient) + + sc_source = SimpleNamespace(id=1, channel_id="C_SRC", sync_id=7) + ws_source = SimpleNamespace(id=10, team_id="T1", bot_token="enc", workspace_name="A") + sc_target = SimpleNamespace(id=2, channel_id="C_TGT", sync_id=7) + ws_target = SimpleNamespace(id=20, team_id="T2", bot_token="enc", workspace_name="B") + + body = { + "event": { + "channel": "C_SRC", + "ts": "100.000000", + "team": "T1", + } + } + ctx = { + "team_id": "T1", + "channel_id": "C_SRC", + "msg_text": "hello", + "mentioned_users": [], + "user_id": "U1", + } + direct_files = [{"path": "/tmp/f.jpg", "name": "f.jpg"}] + + created: list = [] + + def capture_post_meta(rows): + created.extend(rows) + + with ( + patch("handlers.messages.helpers.get_sync_list", return_value=[(sc_source, ws_source), (sc_target, ws_target)]), + patch("handlers.messages.helpers.get_user_info", return_value=("N", "http://i")), + patch("handlers.messages.helpers.get_mapped_target_user_id", return_value=None), + patch("handlers.messages.helpers.get_federated_workspace_for_sync", return_value=None), + patch("handlers.messages.helpers.decrypt_bot_token", return_value="xoxb-test"), + patch("handlers.messages.helpers.apply_mentioned_users", side_effect=lambda t, *a, **k: t), + patch("handlers.messages.helpers.resolve_channel_references", side_effect=lambda t, *a, **k: t), + patch("handlers.messages.helpers.get_workspace_by_id", return_value=None), + patch( + "handlers.messages.helpers.get_display_name_and_icon_for_synced_message", + return_value=("N", None), + ), + patch("handlers.messages.helpers.post_message", return_value={"ts": "200.000000"}), + patch("handlers.messages.helpers.upload_files_to_slack", return_value=(None, "300.000000")), + patch("handlers.messages.helpers.cleanup_temp_files"), + patch("handlers.messages.DbManager.create_records", side_effect=capture_post_meta), + ): + _handle_new_post(body, client, logger, ctx, [], [], direct_files) + + assert len(created) == 3 + assert {m.sync_channel_id for m in created} == {1, 2} + target_rows = [m for m in created if m.sync_channel_id == 2] + assert len(target_rows) == 2 + assert target_rows[0].post_id == target_rows[1].post_id + assert {target_rows[0].ts, target_rows[1].ts} == {200.0, 300.0} + + def test_thread_reply_text_plus_file_stores_file_ts_same_post_id(self): + logger = MagicMock() + client = MagicMock(spec=WebClient) + + pm_src = SimpleNamespace(id=1, post_id="parent", ts=10.0) + pm_tgt = SimpleNamespace(id=2, post_id="parent", ts=20.0) + sc_source = SimpleNamespace(id=11, channel_id="C_SRC", sync_id=7) + ws_source = SimpleNamespace(id=10, workspace_name="A", bot_token="enc") + sc_target = SimpleNamespace(id=22, channel_id="C_TGT", sync_id=7) + ws_target = SimpleNamespace(id=20, workspace_name="B", bot_token="enc") + + post_records = [(pm_src, sc_source, ws_source), (pm_tgt, sc_target, ws_target)] + + body = {"event": {"channel": "C_SRC", "ts": "150.000000"}} + ctx = { + "channel_id": "C_SRC", + "msg_text": "reply", + "mentioned_users": [], + "user_id": "U1", + "thread_ts": "10.000000", + } + direct_files = [{"path": "/tmp/f.jpg", "name": "f.jpg"}] + + created: list = [] + + with ( + patch("handlers.messages.helpers.get_post_records", return_value=post_records), + patch("handlers.messages.helpers.get_user_info", return_value=("N", "http://i")), + patch("handlers.messages.helpers.get_mapped_target_user_id", return_value=None), + patch("handlers.messages.helpers.get_federated_workspace_for_sync", return_value=None), + patch("handlers.messages.helpers.decrypt_bot_token", return_value="xoxb-test"), + patch("handlers.messages.helpers.apply_mentioned_users", side_effect=lambda t, *a, **k: t), + patch("handlers.messages.helpers.resolve_channel_references", side_effect=lambda t, *a, **k: t), + patch("handlers.messages.helpers.get_workspace_by_id", return_value=None), + patch( + "handlers.messages.helpers.get_display_name_and_icon_for_synced_message", + return_value=("N", None), + ), + patch("handlers.messages.helpers.post_message", return_value={"ts": "250.000000"}), + patch("handlers.messages.helpers.upload_files_to_slack", return_value=(None, "350.000000")), + patch("handlers.messages.helpers.cleanup_temp_files"), + patch("handlers.messages.DbManager.create_records", side_effect=lambda rows: created.extend(rows)), + ): + _handle_thread_reply(body, client, logger, ctx, [], direct_files) + + assert len(created) == 3 + target_rows = [m for m in created if m.sync_channel_id == 22] + assert len(target_rows) == 2 + assert target_rows[0].post_id == target_rows[1].post_id + assert {target_rows[0].ts, target_rows[1].ts} == {250.0, 350.0} From 4a29f0c9b1a0ea8f8d9dc0d05f7ed7e6b25b9b70 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 23:16:15 -0500 Subject: [PATCH 36/45] Added summary of how media is synced. --- docs/USER_GUIDE.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/USER_GUIDE.md b/docs/USER_GUIDE.md index d6f595f..56dadb6 100644 --- a/docs/USER_GUIDE.md +++ b/docs/USER_GUIDE.md @@ -51,6 +51,15 @@ The Home tab and User Mapping screens have Refresh buttons. To keep API usage lo Images and videos are downloaded from the source and uploaded directly to each target channel. GIFs from the Slack GIF picker or GIPHY are synced as image blocks. +| Source message | What appears in target workspace | +|---|---| +| Text only | Single message with text, shown under the original poster's name and avatar | +| GIF (Slack picker / GIPHY) | Single message with the GIF embedded inline via image block, under the poster's name | +| GIF + text | Single message with text and GIF together, under the poster's name | +| Photo or video only (no text) | Single file upload with `Shared by @User` (tagged if mapped, plain name otherwise) | +| Text + photo or video | Text message under the poster's name, then the file in a thread reply with `Shared by @User in this message` linking back to the text | +| Multiple files | Same as above; all files are uploaded together in a single thread reply | + ## External Connections *(Opt-in — set `SYNCBOT_FEDERATION_ENABLED=true` and `SYNCBOT_PUBLIC_URL` to enable)* From eb53ded5fc13f764bfcb5f787445cae07e1ad94e Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 23:16:27 -0500 Subject: [PATCH 37/45] Change to description. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 9c9d473..83ff438 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] name = "syncbot" version = "1.0.0" -description = "Slack app for syncing messages and threads across workspaces." +description = "Sync chat threads between Slack Workspaces." authors = ["Evan Petzoldt ", "Klint Van Tassel "] readme = "README.md" From 0f0dc0c892646282831f9cdbe5abc92d2eda56eb Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Wed, 25 Mar 2026 23:23:50 -0500 Subject: [PATCH 38/45] Poetry update. --- poetry.lock | 118 +++++++++++++++++++-------------------- syncbot/requirements.txt | 2 +- 2 files changed, 60 insertions(+), 60 deletions(-) diff --git a/poetry.lock b/poetry.lock index eb0573a..7ea3e3b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "alembic" @@ -22,18 +22,18 @@ tz = ["tzdata"] [[package]] name = "boto3" -version = "1.42.75" +version = "1.42.76" description = "The AWS SDK for Python" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "boto3-1.42.75-py3-none-any.whl", hash = "sha256:16bc657d16403ee8e11c8b6920c245629e37a36ea60352b919da566f82b4cb4c"}, - {file = "boto3-1.42.75.tar.gz", hash = "sha256:3c7fd95a50c69271bd7707b7eda07dcfddb30e961a392613010f7ee81d91acb3"}, + {file = "boto3-1.42.76-py3-none-any.whl", hash = "sha256:63c6779c814847016b89ae1b72ed968f8a63d80e589ba337511aa6fc1b59585e"}, + {file = "boto3-1.42.76.tar.gz", hash = "sha256:aa2b1973eee8973a9475d24bb579b1dee7176595338d4e4f7880b5c6189b8814"}, ] [package.dependencies] -botocore = ">=1.42.75,<1.43.0" +botocore = ">=1.42.76,<1.43.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.16.0,<0.17.0" @@ -42,14 +42,14 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.42.75" +version = "1.42.76" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "botocore-1.42.75-py3-none-any.whl", hash = "sha256:915e43b7ac8f50cf3dbc937ba713de5acb999ea48ad8fecd1589d92ad415f787"}, - {file = "botocore-1.42.75.tar.gz", hash = "sha256:95c8e716b6be903ee1601531caa4f50217400aa877c18fe9a2c3047d2945d477"}, + {file = "botocore-1.42.76-py3-none-any.whl", hash = "sha256:151e714ae3c32f68ea0b4dc60751401e03f84a87c6cf864ea0ee64aa10eb4607"}, + {file = "botocore-1.42.76.tar.gz", hash = "sha256:c553fa0ae29e36a5c407f74da78b78404b81b74b15fb62bf640a3cd9385f0874"}, ] [package.dependencies] @@ -324,61 +324,61 @@ files = [ [[package]] name = "cryptography" -version = "46.0.5" +version = "46.0.6" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = "!=3.9.0,!=3.9.1,>=3.8" groups = ["main"] files = [ - {file = "cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad"}, - {file = "cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b"}, - {file = "cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b"}, - {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263"}, - {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d"}, - {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed"}, - {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2"}, - {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2"}, - {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0"}, - {file = "cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731"}, - {file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82"}, - {file = "cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1"}, - {file = "cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48"}, - {file = "cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4"}, - {file = "cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2"}, - {file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678"}, - {file = "cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87"}, - {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee"}, - {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981"}, - {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9"}, - {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648"}, - {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4"}, - {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0"}, - {file = "cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663"}, - {file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826"}, - {file = "cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d"}, - {file = "cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a"}, - {file = "cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4"}, - {file = "cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31"}, - {file = "cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18"}, - {file = "cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235"}, - {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a"}, - {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76"}, - {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614"}, - {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229"}, - {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1"}, - {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d"}, - {file = "cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c"}, - {file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4"}, - {file = "cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9"}, - {file = "cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72"}, - {file = "cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595"}, - {file = "cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c"}, - {file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a"}, - {file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356"}, - {file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da"}, - {file = "cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257"}, - {file = "cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7"}, - {file = "cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d"}, + {file = "cryptography-46.0.6-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:64235194bad039a10bb6d2d930ab3323baaec67e2ce36215fd0952fad0930ca8"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:26031f1e5ca62fcb9d1fcb34b2b60b390d1aacaa15dc8b895a9ed00968b97b30"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9a693028b9cbe51b5a1136232ee8f2bc242e4e19d456ded3fa7c86e43c713b4a"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:67177e8a9f421aa2d3a170c3e56eca4e0128883cf52a071a7cbf53297f18b175"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:d9528b535a6c4f8ff37847144b8986a9a143585f0540fbcb1a98115b543aa463"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:22259338084d6ae497a19bae5d4c66b7ca1387d3264d1c2c0e72d9e9b6a77b97"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:760997a4b950ff00d418398ad73fbc91aa2894b5c1db7ccb45b4f68b42a63b3c"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3dfa6567f2e9e4c5dceb8ccb5a708158a2a871052fa75c8b78cb0977063f1507"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:cdcd3edcbc5d55757e5f5f3d330dd00007ae463a7e7aa5bf132d1f22a4b62b19"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:d4e4aadb7fc1f88687f47ca20bb7227981b03afaae69287029da08096853b738"}, + {file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2b417edbe8877cda9022dde3a008e2deb50be9c407eef034aeeb3a8b11d9db3c"}, + {file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:380343e0653b1c9d7e1f55b52aaa2dbb2fdf2730088d48c43ca1c7c0abb7cc2f"}, + {file = "cryptography-46.0.6-cp311-abi3-win32.whl", hash = "sha256:bcb87663e1f7b075e48c3be3ecb5f0b46c8fc50b50a97cf264e7f60242dca3f2"}, + {file = "cryptography-46.0.6-cp311-abi3-win_amd64.whl", hash = "sha256:6739d56300662c468fddb0e5e291f9b4d084bead381667b9e654c7dd81705124"}, + {file = "cryptography-46.0.6-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:2ef9e69886cbb137c2aef9772c2e7138dc581fad4fcbcf13cc181eb5a3ab6275"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7f417f034f91dcec1cb6c5c35b07cdbb2ef262557f701b4ecd803ee8cefed4f4"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d24c13369e856b94892a89ddf70b332e0b70ad4a5c43cf3e9cb71d6d7ffa1f7b"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:aad75154a7ac9039936d50cf431719a2f8d4ed3d3c277ac03f3339ded1a5e707"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:3c21d92ed15e9cfc6eb64c1f5a0326db22ca9c2566ca46d845119b45b4400361"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:4668298aef7cddeaf5c6ecc244c2302a2b8e40f384255505c22875eebb47888b"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:8ce35b77aaf02f3b59c90b2c8a05c73bac12cea5b4e8f3fbece1f5fddea5f0ca"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:c89eb37fae9216985d8734c1afd172ba4927f5a05cfd9bf0e4863c6d5465b013"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:ed418c37d095aeddf5336898a132fba01091f0ac5844e3e8018506f014b6d2c4"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:69cf0056d6947edc6e6760e5f17afe4bea06b56a9ac8a06de9d2bd6b532d4f3a"}, + {file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e7304c4f4e9490e11efe56af6713983460ee0780f16c63f219984dab3af9d2d"}, + {file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b928a3ca837c77a10e81a814a693f2295200adb3352395fad024559b7be7a736"}, + {file = "cryptography-46.0.6-cp314-cp314t-win32.whl", hash = "sha256:97c8115b27e19e592a05c45d0dd89c57f81f841cc9880e353e0d3bf25b2139ed"}, + {file = "cryptography-46.0.6-cp314-cp314t-win_amd64.whl", hash = "sha256:c797e2517cb7880f8297e2c0f43bb910e91381339336f75d2c1c2cbf811b70b4"}, + {file = "cryptography-46.0.6-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:12cae594e9473bca1a7aceb90536060643128bb274fcea0fc459ab90f7d1ae7a"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:639301950939d844a9e1c4464d7e07f902fe9a7f6b215bb0d4f28584729935d8"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ed3775295fb91f70b4027aeba878d79b3e55c0b3e97eaa4de71f8f23a9f2eb77"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8927ccfbe967c7df312ade694f987e7e9e22b2425976ddbf28271d7e58845290"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:b12c6b1e1651e42ab5de8b1e00dc3b6354fdfd778e7fa60541ddacc27cd21410"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:063b67749f338ca9c5a0b7fe438a52c25f9526b851e24e6c9310e7195aad3b4d"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:02fad249cb0e090b574e30b276a3da6a149e04ee2f049725b1f69e7b8351ec70"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:7e6142674f2a9291463e5e150090b95a8519b2fb6e6aaec8917dd8d094ce750d"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:456b3215172aeefb9284550b162801d62f5f264a081049a3e94307fe20792cfa"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:341359d6c9e68834e204ceaf25936dffeafea3829ab80e9503860dcc4f4dac58"}, + {file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9a9c42a2723999a710445bc0d974e345c32adfd8d2fac6d8a251fa829ad31cfb"}, + {file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6617f67b1606dfd9fe4dbfa354a9508d4a6d37afe30306fe6c101b7ce3274b72"}, + {file = "cryptography-46.0.6-cp38-abi3-win32.whl", hash = "sha256:7f6690b6c55e9c5332c0b59b9c8a3fb232ebf059094c17f9019a51e9827df91c"}, + {file = "cryptography-46.0.6-cp38-abi3-win_amd64.whl", hash = "sha256:79e865c642cfc5c0b3eb12af83c35c5aeff4fa5c672dc28c43721c2c9fdd2f0f"}, + {file = "cryptography-46.0.6-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:2ea0f37e9a9cf0df2952893ad145fd9627d326a59daec9b0802480fa3bcd2ead"}, + {file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a3e84d5ec9ba01f8fd03802b2147ba77f0c8f2617b2aff254cedd551844209c8"}, + {file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:12f0fa16cc247b13c43d56d7b35287ff1569b5b1f4c5e87e92cc4fcc00cd10c0"}, + {file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:50575a76e2951fe7dbd1f56d181f8c5ceeeb075e9ff88e7ad997d2f42af06e7b"}, + {file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:90e5f0a7b3be5f40c3a0a0eafb32c681d8d2c181fc2a1bdabe9b3f611d9f6b1a"}, + {file = "cryptography-46.0.6-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6728c49e3b2c180ef26f8e9f0a883a2c585638db64cf265b49c9ba10652d430e"}, + {file = "cryptography-46.0.6.tar.gz", hash = "sha256:27550628a518c5c6c903d84f637fbecf287f6cb9ced3804838a1295dc1fd0759"}, ] [package.dependencies] @@ -391,7 +391,7 @@ nox = ["nox[uv] (>=2024.4.15)"] pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"] sdist = ["build (>=1.0.0)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi (>=2024)", "cryptography-vectors (==46.0.5)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] +test = ["certifi (>=2024)", "cryptography-vectors (==46.0.6)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] test-randomorder = ["pytest-randomly"] [[package]] diff --git a/syncbot/requirements.txt b/syncbot/requirements.txt index 95d10f9..8d1ceb2 100644 --- a/syncbot/requirements.txt +++ b/syncbot/requirements.txt @@ -2,7 +2,7 @@ alembic==1.18.4 ; python_version >= "3.12" and python_version < "4.0" certifi==2026.2.25 ; python_version >= "3.12" and python_version < "4.0" cffi==2.0.0 ; python_version >= "3.12" and python_version < "4.0" and platform_python_implementation != "PyPy" charset-normalizer==3.4.6 ; python_version >= "3.12" and python_version < "4.0" -cryptography==46.0.5 ; python_version >= "3.12" and python_version < "4.0" +cryptography==46.0.6 ; python_version >= "3.12" and python_version < "4.0" greenlet==3.3.2 ; python_version >= "3.12" and python_version < "4.0" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") idna==3.11 ; python_version >= "3.12" and python_version < "4.0" mako==1.3.10 ; python_version >= "3.12" and python_version < "4.0" From 0c14a0f709b2e81c4e1d27e6fdc7328c1516fc2c Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Thu, 26 Mar 2026 07:36:40 -0500 Subject: [PATCH 39/45] Update to docs. Fix for custom emojis. --- CONTRIBUTING.md | 13 +- README.md | 107 +++----------- docs/DEPLOYMENT.md | 4 +- docs/DEVELOPMENT.md | 74 ++++++++++ docs/USER_GUIDE.md | 2 +- syncbot/federation/api.py | 35 ++++- syncbot/federation/core.py | 4 + syncbot/handlers/messages.py | 3 + tests/test_federation_reactions.py | 218 +++++++++++++++++++++++++++++ 9 files changed, 364 insertions(+), 96 deletions(-) create mode 100644 docs/DEVELOPMENT.md create mode 100644 tests/test_federation_reactions.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 04846d1..426d16e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,6 +2,17 @@ Thanks for helping improve SyncBot. +## Branching (upstream vs downstream) + +The **upstream** repository ([F3Nation-Community/syncbot](https://github.com/F3Nation-Community/syncbot)) is the shared codebase. Each deployment maintains its own **fork**: + +| Branch | Role | +|--------|------| +| **`main`** | Tracks upstream. Use it to merge PRs and to **sync with the upstream repository** (`git pull upstream main`, etc.). | +| **`test`** / **`prod`** | On your fork, use these for **deployments**: GitHub Actions deploy workflows run on **push** to `test` and `prod` (see [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md)). | + +Typical flow: develop on a feature branch → open a PR to **`main`** → merge → when ready to deploy, merge **`main`** into **`test`** or **`prod`** on your fork. + ## Workflow 1. **Fork** the repository and create a branch from **`main`**. @@ -12,7 +23,7 @@ Thanks for helping improve SyncBot. - Run **`pre-commit run --all-files`** (install with `pip install pre-commit && pre-commit install` if needed). - Ensure **CI passes**: requirements export check, SAM template lint, and tests (see [.github/workflows/ci.yml](.github/workflows/ci.yml)). -- If you change dependencies in `pyproject.toml`, refresh the lockfile and `syncbot/requirements.txt` as described in the README. +- If you change dependencies in `pyproject.toml`, refresh the lockfile and `syncbot/requirements.txt` as described in [docs/DEVELOPMENT.md](docs/DEVELOPMENT.md). ## Questions diff --git a/README.md b/README.md index 97f1afb..c3b640e 100644 --- a/README.md +++ b/README.md @@ -1,109 +1,49 @@ # SyncBot SyncBot Icon -SyncBot is a Slack app for replicating messages and replies across workspaces on the free tier. Once configured, messages, threads, edits, deletes, reactions, images, videos, and GIFs mirror to every channel in a Sync group. +SyncBot is a Slack app for syncing messages across workspaces. Once configured, this app will sync messages, threads, edits, deletes, reactions, images, videos, and GIFs to every channel in a SyncBot group. > **Using SyncBot in Slack?** See the [User Guide](docs/USER_GUIDE.md). --- -## Branching (upstream vs downstream) +## Slack app setup -This repo is the **canonical** project. **Forks** (downstream installations) should use Git like this: +Do this before you deploy or run locally: -| Branch | Role | -|--------|------| -| **`main`** | Tracks upstream. Use it to merge PRs and to **sync with the upstream repository** (`git pull upstream main`, etc.). | -| **`test`** / **`prod`** | On your fork, use these for **deployments**: GitHub Actions deploy workflows run on **push** to `test` and `prod` (see [DEPLOYMENT.md](docs/DEPLOYMENT.md)). | - -Typical flow: develop on a feature branch → open a PR to **`main`** → merge → when ready to deploy, merge **`main`** into **`test`** or **`prod`** on your fork. +1. [api.slack.com/apps](https://api.slack.com/apps) → **Create New App** → **From an app manifest** → paste [`slack-manifest.json`](slack-manifest.json). +2. Upload [`assets/icon.png`](assets/icon.png) under **Basic Information** → **Display Information**. +3. Copy **Signing Secret**, **Client ID**, and **Client Secret** (needed for deploy). For **local dev**, install the app under **OAuth & Permissions** and copy the **Bot User OAuth Token** (`xoxb-...`). --- -## Deploy (AWS or GCP) +## Deploy -You can deploy in two ways: - -1. **Download or clone and run the deploy script** — No GitHub Actions required. From the **repository root**, run `./deploy.sh` (or `.\deploy.ps1` on Windows). The script walks you through provider choice, cloud auth, and optional GitHub variable setup. -2. **Fork the repo and use CI/CD** — Configure repository variables and secrets (see [DEPLOYMENT.md](docs/DEPLOYMENT.md)), then push to **`test`** or **`prod`** on your fork to trigger automated deploys. - -From the **repository root**, the infra-agnostic launcher is: +From the **repo root**, run the deploy script once for **`test`** and once for **`prod`** to automatically deploy to your infrastructure provider (currently AWS and GCP are supported). | OS | Command | |----|---------| | macOS / Linux | `./deploy.sh` | | Windows (PowerShell) | `.\deploy.ps1` | -The launcher lists providers under `infra//scripts/deploy.sh` (e.g. **aws**, **gcp**), prompts for a choice, and runs that script. Shortcuts: `./deploy.sh aws`, `./deploy.sh gcp`, `./deploy.sh 1`. On **Windows**, `deploy.ps1` checks for **Git Bash** or **WSL** bash, then runs the same `deploy.sh` paths (provider prerequisites are enforced inside those bash scripts). - -If **Poetry** is on your `PATH`, the root launcher first runs `poetry update` and regenerates `syncbot/requirements.txt` from `poetry.lock` so deploys match the pinned Python deps (Poetry 2.x: install the export plugin once with `poetry self add poetry-plugin-export`). If Poetry is missing, the launcher skips this step and continues. - -**GCP CI:** Interactive deploy via `./deploy.sh` → **gcp** is supported. The **GitHub Actions** workflow for GCP (`.github/workflows/deploy-gcp.yml`) is a stub until Workload Identity Federation and image build/push steps are wired — use the guided script for GCP until then. +You can also fork the repo, set GitHub variables/secrets, and push to **`test`** or **`prod`** to trigger CI — see [DEPLOYMENT.md](docs/DEPLOYMENT.md). -### What to install first +### Prerequisites -| Tool | Why | -|------|-----| -| **Git** | Clone the repo; on Windows, **Git for Windows** supplies **Git Bash**, which the deploy scripts use. | -| **Bash** | Required for `./deploy.sh` and `infra/*/scripts/deploy.sh`. On Windows use Git Bash or **WSL** (then run `./deploy.sh` from Linux). | +In order for the deploy script to work, you need **Git** and **Bash** (on Windows, use **Git for Windows** / **Git Bash** or **WSL**). -**AWS** (`infra/aws/scripts/deploy.sh`): **AWS CLI v2**, **AWS SAM CLI**, **Docker** (for `sam build --use-container`), **Python 3** (`python3`), **`curl`** (Slack manifest API). **Optional:** **`gh`** (GitHub Actions setup); if `gh` is missing, the script shows install hints and asks whether to continue. +**AWS:** AWS CLI v2, SAM CLI, Docker (for `sam build --use-container`), Python 3, and `curl`. Optional: `gh` for GitHub Actions setup. -**GCP** (`infra/gcp/scripts/deploy.sh`): **Terraform**, **Google Cloud SDK (`gcloud`)**, **Python 3**, **`curl`**. **Optional:** **`gh`** — same behavior as AWS. +**GCP:** Terraform, `gcloud`, Python 3, and `curl`. Optional: `gh`. -Full behavior, manual `sam` / Terraform steps, GitHub variables, and troubleshooting: **[docs/DEPLOYMENT.md](docs/DEPLOYMENT.md)**. - ---- - -## Slack app (before deploy or local dev) - -1. [api.slack.com/apps](https://api.slack.com/apps) → **Create New App** → **From an app manifest** → paste [`slack-manifest.json`](slack-manifest.json). -2. Upload [`assets/icon.png`](assets/icon.png) under **Basic Information** → **Display Information**. -3. Copy **Signing Secret**, **Client ID**, and **Client Secret** (needed for deploy). For **local dev**, install the app under **OAuth & Permissions** and copy the **Bot User OAuth Token** (`xoxb-...`). +Full prerequisite checks, manual `sam` / Terraform, Slack URLs after deploy, and CI variables: **[docs/DEPLOYMENT.md](docs/DEPLOYMENT.md)**. -After deployment, point Event Subscriptions and Interactivity at your real HTTPS URL (the deploy script can generate a stage-specific `slack-manifest_.json` and optional Slack API updates). Details: [DEPLOYMENT.md](docs/DEPLOYMENT.md). --- ## Local development -### Dev Container (recommended) - -**Needs:** [Docker Desktop](https://www.docker.com/products/docker-desktop/) (or Docker Engine on Linux) + [Dev Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) in VS Code. - -1. `cp .env.example .env` and set `SLACK_BOT_TOKEN` (`xoxb-...`). -2. **Dev Containers: Reopen in Container** — Python, MySQL, and deps run inside the container. -3. `cd syncbot && python app.py` → app on **port 3000** (forwarded). -4. Expose to Slack with **cloudflared** or **ngrok** from the host; set Slack **Event Subscriptions** / **Interactivity** URLs to the public URL. - -Optional **SQLite**: in `.env` set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:////app/syncbot/syncbot.db`. - -### Docker Compose (no Dev Container) - -```bash -cp .env.example .env # set SLACK_BOT_TOKEN -docker compose up --build -``` - -App on port **3000**; restart the `app` service after code changes. - -### Native Python - -**Needs:** Python 3.12+, Poetry. Run MySQL locally (e.g. `docker run ... mysql:8`) or SQLite. See `.env.example` and [INFRA_CONTRACT.md](docs/INFRA_CONTRACT.md). - -After `poetry add` / `poetry update`, regenerate the pinned file used by the Docker image and `pip-audit` in CI so it matches `poetry.lock`: - -```bash -poetry self add poetry-plugin-export # Poetry 2.x; once per Poetry install -poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt -``` - ---- - -## Configuration reference - -- **[`.env.example`](.env.example)** — local env vars with comments. -- **[docs/INFRA_CONTRACT.md](docs/INFRA_CONTRACT.md)** — runtime contract for any cloud (DB, Slack, OAuth, production vs local). +See **[docs/DEVELOPMENT.md](docs/DEVELOPMENT.md)** for Dev Container, Docker Compose, native Python, project layout, and refreshing `syncbot/requirements.txt` after dependency changes. --- @@ -113,6 +53,7 @@ poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt |-----|----------| | [USER_GUIDE.md](docs/USER_GUIDE.md) | End-user features (Home tab, syncs, groups) | | [DEPLOYMENT.md](docs/DEPLOYMENT.md) | Guided + manual AWS/GCP deploy, CI, GitHub | +| [DEVELOPMENT.md](docs/DEVELOPMENT.md) | Local dev, branching for forks, dependencies | | [INFRA_CONTRACT.md](docs/INFRA_CONTRACT.md) | Environment variables and platform expectations | | [ARCHITECTURE.md](docs/ARCHITECTURE.md) | Sync flow, AWS reference architecture | | [BACKUP_AND_MIGRATION.md](docs/BACKUP_AND_MIGRATION.md) | Backup/restore and federation migration | @@ -120,22 +61,6 @@ poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt | [CHANGELOG.md](CHANGELOG.md) | Release history | | [CONTRIBUTING.md](CONTRIBUTING.md) | How to contribute | -### Project layout - -``` -syncbot/ -├── syncbot/ # App (app.py); slack_manifest_scopes.py = bot/user OAuth scope lists (manifest + SLACK_BOT_SCOPES / SLACK_USER_SCOPES) -├── syncbot/db/alembic/ # Migrations (bundled with app for Lambda) -├── tests/ -├── docs/ -├── infra/aws/ # SAM, bootstrap stack -├── infra/gcp/ # Terraform -├── deploy.sh # Root launcher (macOS / Linux / Git Bash) -├── deploy.ps1 # Windows launcher → Git Bash or WSL → infra/.../deploy.sh -├── slack-manifest.json -└── docker-compose.yml -``` - ## License **AGPL-3.0** — see [LICENSE](LICENSE). diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md index 225b13c..cf02d2e 100644 --- a/docs/DEPLOYMENT.md +++ b/docs/DEPLOYMENT.md @@ -21,7 +21,7 @@ The launcher discovers `infra//scripts/deploy.sh`, shows a numbered me **Windows:** `deploy.ps1` requires **Git Bash** or **WSL** with bash, then runs the same `infra/.../deploy.sh` as macOS/Linux. Alternatively install [Git for Windows](https://git-scm.com/download/win) or [WSL](https://learn.microsoft.com/windows/wsl/install) and run `./deploy.sh` from Git Bash or a WSL shell. -**Prerequisites** (also summarized in the root [README](../README.md)): +**Prerequisites** (short list in the root [README](../README.md); full detail below): - **AWS path:** AWS CLI v2, SAM CLI, Docker (`sam build --use-container`), Python 3 (`python3`), **`curl`** (Slack manifest API). **Optional:** `gh` (GitHub Actions setup). The script prints a CLI status line per tool (✓ / !) and Slack doc links; if `gh` is missing, it asks whether to continue. - **GCP path:** Terraform, `gcloud`, Python 3, **`curl`**. **Optional:** `gh` — same behavior as AWS. @@ -68,7 +68,7 @@ See [infra/gcp/README.md](../infra/gcp/README.md) for Terraform variables and ou ## Fork-First model (recommended for forks) -**Branch roles** (see also the root [README](../README.md) **Branching** section): use **`main`** to track upstream and merge contributions; on your fork, use **`test`** and **`prod`** for automated deploys (CI runs on push to those branches). +**Branch roles** (see [CONTRIBUTING.md](../CONTRIBUTING.md)): use **`main`** to track upstream and merge contributions; on your fork, use **`test`** and **`prod`** for automated deploys (CI runs on push to those branches). 1. Keep `syncbot/` provider-neutral; use only env vars from [INFRA_CONTRACT.md](INFRA_CONTRACT.md). 2. Put provider code in `infra//` and `.github/workflows/deploy-.yml`. diff --git a/docs/DEVELOPMENT.md b/docs/DEVELOPMENT.md new file mode 100644 index 0000000..b2affa4 --- /dev/null +++ b/docs/DEVELOPMENT.md @@ -0,0 +1,74 @@ +# Development Guide + +How to run SyncBot locally (Dev Container, Docker Compose, native Python) and manage dependencies. For **cloud deploy** and CI/CD, see [DEPLOYMENT.md](DEPLOYMENT.md). For runtime env vars in any environment, see [INFRA_CONTRACT.md](INFRA_CONTRACT.md). + +## Branching (upstream vs downstream) + +The **upstream** repository ([F3Nation-Community/syncbot](https://github.com/F3Nation-Community/syncbot)) is the shared codebase. Each deployment maintains its own **fork**: + +| Branch | Role | +|--------|------| +| **`main`** | Tracks upstream. Use it to merge PRs and to **sync with the upstream repository** (`git pull upstream main`, etc.). | +| **`test`** / **`prod`** | On your fork, use these for **deployments**: GitHub Actions deploy workflows run on **push** to `test` and `prod` (see [DEPLOYMENT.md](DEPLOYMENT.md)). | + +Typical flow: develop on a feature branch → open a PR to **`main`** → merge → when ready to deploy, merge **`main`** into **`test`** or **`prod`** on your fork. + +## Local development + +### Dev Container (recommended) + +**Needs:** [Docker Desktop](https://www.docker.com/products/docker-desktop/) (or Docker Engine on Linux) + [Dev Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) in VS Code. + +1. `cp .env.example .env` and set `SLACK_BOT_TOKEN` (`xoxb-...`). +2. **Dev Containers: Reopen in Container** — Python, MySQL, and deps run inside the container. +3. `cd syncbot && python app.py` → app on **port 3000** (forwarded). +4. Expose to Slack with **cloudflared** or **ngrok** from the host; set Slack **Event Subscriptions** / **Interactivity** URLs to the public URL. + +Optional **SQLite**: in `.env` set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:////app/syncbot/syncbot.db`. + +### Docker Compose (no Dev Container) + +```bash +cp .env.example .env # set SLACK_BOT_TOKEN +docker compose up --build +``` + +App on port **3000**; restart the `app` service after code changes. + +### Native Python + +**Needs:** Python 3.12+, Poetry. Run MySQL locally (e.g. `docker run ... mysql:8`) or SQLite. See [`.env.example`](../.env.example) and [INFRA_CONTRACT.md](INFRA_CONTRACT.md). + +## Configuration reference + +- **[`.env.example`](../.env.example)** — local env vars with comments. +- **[INFRA_CONTRACT.md](INFRA_CONTRACT.md)** — runtime contract for any cloud (DB, Slack, OAuth, production vs local). + +## Project layout + +``` +syncbot/ +├── syncbot/ # App (app.py); slack_manifest_scopes.py = bot/user OAuth scope lists (manifest + SLACK_BOT_SCOPES / SLACK_USER_SCOPES) +├── syncbot/db/alembic/ # Migrations (bundled with app for Lambda) +├── tests/ +├── docs/ +├── infra/aws/ # SAM, bootstrap stack +├── infra/gcp/ # Terraform +├── deploy.sh # Root launcher (macOS / Linux / Git Bash) +├── deploy.ps1 # Windows launcher → Git Bash or WSL → infra/.../deploy.sh +├── slack-manifest.json +└── docker-compose.yml +``` + +## Dependency management + +After `poetry add` / `poetry update`, regenerate the pinned file used by the Docker image and **`pip-audit`** in CI so it matches `poetry.lock`: + +```bash +poetry self add poetry-plugin-export # Poetry 2.x; once per Poetry install +poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt +``` + +The root **`./deploy.sh`** may run `poetry update` and regenerate `syncbot/requirements.txt` when Poetry is on your `PATH` (see [DEPLOYMENT.md](DEPLOYMENT.md)). + +CI runs `pip-audit` on `syncbot/requirements.txt` and `infra/aws/db_setup/requirements.txt` (see [.github/workflows/ci.yml](../.github/workflows/ci.yml)). diff --git a/docs/USER_GUIDE.md b/docs/USER_GUIDE.md index 56dadb6..71077da 100644 --- a/docs/USER_GUIDE.md +++ b/docs/USER_GUIDE.md @@ -7,7 +7,7 @@ This guide is for **workspace admins and end users** configuring SyncBot in Slac 1. Click the install link from a desktop browser (make sure you've selected the correct workspace in the upper right) 2. Open the **SyncBot** app from the sidebar and click the **Home** tab (requires workspace admin or owner) 3. The Home tab shows everything in one view: - - **SyncBot Configuration (top row)** — **Refresh** and **Backup/Restore** (full-instance backup download and restore from JSON) + - **SyncBot Configuration (bottom row)** — **Refresh** and **Backup/Restore** (full-instance backup download and restore from JSON) - **Workspace Groups** — create or join groups of workspaces that can sync channels together - **Per-group sections** — for each group you can publish channels, manage user mapping (dedicated Home tab screen), and see/manage channel syncs inline - **Synced Channels** — each row shows the local channel and workspace list in brackets (e.g. _[Any: Your Workspace, Other Workspace]_), with pause/resume and stop controls, synced-since date, and tracked message count diff --git a/syncbot/federation/api.py b/syncbot/federation/api.py index 8ab406e..738a714 100644 --- a/syncbot/federation/api.py +++ b/syncbot/federation/api.py @@ -24,6 +24,7 @@ import re from datetime import UTC, datetime +from slack_sdk.errors import SlackApiError from slack_sdk.web import WebClient import constants @@ -490,6 +491,9 @@ def handle_message_react(body: dict, fed_ws: schemas.FederatedWorkspace) -> tupl channel_id = body["channel_id"] reaction = body["reaction"] action = body.get("action", "add") + user_name = body.get("user_name") or "Remote User" + user_avatar_url = body.get("user_avatar_url") + workspace_name = body.get("workspace_name") or "Remote" resolved = _resolve_channel_for_federated(channel_id, fed_ws) if not resolved: @@ -499,7 +503,8 @@ def handle_message_react(body: dict, fed_ws: schemas.FederatedWorkspace) -> tupl post_records = _find_post_records(post_id, sync_channel.id) applied = 0 - ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + bot_token = helpers.decrypt_bot_token(workspace.bot_token) + ws_client = WebClient(token=bot_token) for post_meta in post_records: try: if action == "add": @@ -507,6 +512,34 @@ def handle_message_react(body: dict, fed_ws: schemas.FederatedWorkspace) -> tupl else: ws_client.reactions_remove(channel=channel_id, timestamp=str(post_meta.ts), name=reaction) applied += 1 + except SlackApiError as exc: + error_code = "" + if exc.response: + if isinstance(exc.response, dict): + error_code = str(exc.response.get("error") or "") + else: + error_code = str(getattr(exc.response, "get", lambda _k, _d=None: "")("error", "")) + + if action == "add" and error_code == "invalid_name": + try: + helpers.post_message( + bot_token=bot_token, + channel_id=channel_id, + msg_text=f"reacted with :{reaction}:", + user_name=user_name, + user_profile_url=user_avatar_url, + workspace_name=workspace_name, + thread_ts=str(post_meta.ts), + ) + applied += 1 + continue + except Exception: + _logger.warning( + "federation_react_fallback_failed", + extra={"channel_id": channel_id, "ts": str(post_meta.ts)}, + ) + + _logger.warning("federation_react_failed", extra={"channel_id": channel_id, "ts": str(post_meta.ts)}) except Exception: _logger.warning("federation_react_failed", extra={"channel_id": channel_id, "ts": str(post_meta.ts)}) diff --git a/syncbot/federation/core.py b/syncbot/federation/core.py index 4d4702d..99800f4 100644 --- a/syncbot/federation/core.py +++ b/syncbot/federation/core.py @@ -662,6 +662,8 @@ def build_reaction_payload( reaction: str, action: str, user_name: str, + user_avatar_url: str | None = None, + workspace_name: str | None = None, timestamp: str, ) -> dict: """Build a federation reaction payload.""" @@ -672,5 +674,7 @@ def build_reaction_payload( "reaction": reaction, "action": action, "user_name": user_name, + "user_avatar_url": user_avatar_url, + "workspace_name": workspace_name, "timestamp": timestamp, } diff --git a/syncbot/handlers/messages.py b/syncbot/handlers/messages.py index cf2d86d..9cae837 100644 --- a/syncbot/handlers/messages.py +++ b/syncbot/handlers/messages.py @@ -662,6 +662,9 @@ def _handle_reaction( channel_id=sync_channel.channel_id, reaction=reaction, action="add", + user_name=user_name or user_id or "Someone", + user_avatar_url=user_profile_url, + workspace_name=ws_name, timestamp=f"{post_meta.ts:.6f}", ) federation.push_reaction(fed_ws, payload) diff --git a/tests/test_federation_reactions.py b/tests/test_federation_reactions.py new file mode 100644 index 0000000..ea6bfb4 --- /dev/null +++ b/tests/test_federation_reactions.py @@ -0,0 +1,218 @@ +"""Tests for federated reaction payload and fallback behavior.""" + +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +from slack_sdk.errors import SlackApiError + +from federation import api as federation_api +from federation import core as federation_core + + +class TestFederationReactionPayload: + def test_build_reaction_payload_includes_user_fields(self): + payload = federation_core.build_reaction_payload( + post_id="post-1", + channel_id="C123", + reaction="custom_emoji", + action="add", + user_name="Alice", + user_avatar_url="https://avatar.example/alice.png", + workspace_name="Workspace A", + timestamp="100.000001", + ) + + assert payload["post_id"] == "post-1" + assert payload["channel_id"] == "C123" + assert payload["reaction"] == "custom_emoji" + assert payload["action"] == "add" + assert payload["user_name"] == "Alice" + assert payload["user_avatar_url"] == "https://avatar.example/alice.png" + assert payload["workspace_name"] == "Workspace A" + assert payload["timestamp"] == "100.000001" + + +class TestFederationReactionFallback: + def test_invalid_name_reaction_falls_back_to_thread_text(self): + body = { + "post_id": "post-1", + "channel_id": "C123", + "reaction": "missing_custom", + "action": "add", + "user_name": "Alice", + "user_avatar_url": "https://avatar.example/alice.png", + "workspace_name": "Workspace A", + } + fed_ws = SimpleNamespace(instance_id="remote-instance") + sync_channel = SimpleNamespace(id=101, channel_id="C123") + workspace = SimpleNamespace(bot_token="enc-token") + post_meta = SimpleNamespace(ts=123.456) + + slack_response = MagicMock() + slack_response.get.return_value = "invalid_name" + slack_exc = SlackApiError(message="emoji not found", response=slack_response) + + ws_client = MagicMock() + ws_client.reactions_add.side_effect = slack_exc + + with ( + patch.object(federation_api, "_resolve_channel_for_federated", return_value=(sync_channel, workspace)), + patch.object(federation_api, "_find_post_records", return_value=[post_meta]), + patch.object(federation_api.helpers, "decrypt_bot_token", return_value="xoxb-test"), + patch.object(federation_api, "WebClient", return_value=ws_client), + patch.object(federation_api.helpers, "post_message", return_value={"ts": "200.000001"}) as post_message_mock, + ): + status, resp = federation_api.handle_message_react(body, fed_ws) + + assert status == 200 + assert resp["ok"] is True + assert resp["applied"] == 1 + ws_client.reactions_add.assert_called_once_with(channel="C123", timestamp="123.456", name="missing_custom") + post_message_mock.assert_called_once_with( + bot_token="xoxb-test", + channel_id="C123", + msg_text="reacted with :missing_custom:", + user_name="Alice", + user_profile_url="https://avatar.example/alice.png", + workspace_name="Workspace A", + thread_ts="123.456", + ) + + def test_non_invalid_name_error_does_not_fallback(self): + """Other Slack errors (rate limit, network, etc.) should NOT trigger the text fallback.""" + body = { + "post_id": "post-1", + "channel_id": "C123", + "reaction": "thumbsup", + "action": "add", + "user_name": "Alice", + } + fed_ws = SimpleNamespace(instance_id="remote-instance") + sync_channel = SimpleNamespace(id=101, channel_id="C123") + workspace = SimpleNamespace(bot_token="enc-token") + post_meta = SimpleNamespace(ts=123.456) + + slack_response = MagicMock() + slack_response.get.return_value = "too_many_reactions" + slack_exc = SlackApiError(message="too many reactions", response=slack_response) + + ws_client = MagicMock() + ws_client.reactions_add.side_effect = slack_exc + + with ( + patch.object(federation_api, "_resolve_channel_for_federated", return_value=(sync_channel, workspace)), + patch.object(federation_api, "_find_post_records", return_value=[post_meta]), + patch.object(federation_api.helpers, "decrypt_bot_token", return_value="xoxb-test"), + patch.object(federation_api, "WebClient", return_value=ws_client), + patch.object(federation_api.helpers, "post_message") as post_message_mock, + ): + status, resp = federation_api.handle_message_react(body, fed_ws) + + assert status == 200 + assert resp["applied"] == 0 + post_message_mock.assert_not_called() + + def test_successful_reaction_add_no_fallback(self): + """When reactions_add succeeds, no text fallback should be posted.""" + body = { + "post_id": "post-1", + "channel_id": "C123", + "reaction": "thumbsup", + "action": "add", + "user_name": "Alice", + } + fed_ws = SimpleNamespace(instance_id="remote-instance") + sync_channel = SimpleNamespace(id=101, channel_id="C123") + workspace = SimpleNamespace(bot_token="enc-token") + post_meta = SimpleNamespace(ts=123.456) + + ws_client = MagicMock() + + with ( + patch.object(federation_api, "_resolve_channel_for_federated", return_value=(sync_channel, workspace)), + patch.object(federation_api, "_find_post_records", return_value=[post_meta]), + patch.object(federation_api.helpers, "decrypt_bot_token", return_value="xoxb-test"), + patch.object(federation_api, "WebClient", return_value=ws_client), + patch.object(federation_api.helpers, "post_message") as post_message_mock, + ): + status, resp = federation_api.handle_message_react(body, fed_ws) + + assert status == 200 + assert resp["applied"] == 1 + ws_client.reactions_add.assert_called_once() + post_message_mock.assert_not_called() + + def test_reaction_remove_invalid_name_no_fallback(self): + """Removing a non-existent emoji should not post a text fallback.""" + body = { + "post_id": "post-1", + "channel_id": "C123", + "reaction": "missing_custom", + "action": "remove", + "user_name": "Alice", + } + fed_ws = SimpleNamespace(instance_id="remote-instance") + sync_channel = SimpleNamespace(id=101, channel_id="C123") + workspace = SimpleNamespace(bot_token="enc-token") + post_meta = SimpleNamespace(ts=123.456) + + slack_response = MagicMock() + slack_response.get.return_value = "invalid_name" + slack_exc = SlackApiError(message="emoji not found", response=slack_response) + + ws_client = MagicMock() + ws_client.reactions_remove.side_effect = slack_exc + + with ( + patch.object(federation_api, "_resolve_channel_for_federated", return_value=(sync_channel, workspace)), + patch.object(federation_api, "_find_post_records", return_value=[post_meta]), + patch.object(federation_api.helpers, "decrypt_bot_token", return_value="xoxb-test"), + patch.object(federation_api, "WebClient", return_value=ws_client), + patch.object(federation_api.helpers, "post_message") as post_message_mock, + ): + status, resp = federation_api.handle_message_react(body, fed_ws) + + assert status == 200 + assert resp["applied"] == 0 + post_message_mock.assert_not_called() + + def test_missing_user_fields_use_defaults(self): + """When user_name/workspace_name are absent from payload, defaults are used.""" + body = { + "post_id": "post-1", + "channel_id": "C123", + "reaction": "missing_custom", + "action": "add", + } + fed_ws = SimpleNamespace(instance_id="remote-instance") + sync_channel = SimpleNamespace(id=101, channel_id="C123") + workspace = SimpleNamespace(bot_token="enc-token") + post_meta = SimpleNamespace(ts=123.456) + + slack_response = MagicMock() + slack_response.get.return_value = "invalid_name" + slack_exc = SlackApiError(message="emoji not found", response=slack_response) + + ws_client = MagicMock() + ws_client.reactions_add.side_effect = slack_exc + + with ( + patch.object(federation_api, "_resolve_channel_for_federated", return_value=(sync_channel, workspace)), + patch.object(federation_api, "_find_post_records", return_value=[post_meta]), + patch.object(federation_api.helpers, "decrypt_bot_token", return_value="xoxb-test"), + patch.object(federation_api, "WebClient", return_value=ws_client), + patch.object(federation_api.helpers, "post_message", return_value={"ts": "200.000001"}) as post_message_mock, + ): + status, resp = federation_api.handle_message_react(body, fed_ws) + + assert status == 200 + assert resp["applied"] == 1 + post_message_mock.assert_called_once_with( + bot_token="xoxb-test", + channel_id="C123", + msg_text="reacted with :missing_custom:", + user_name="Remote User", + user_profile_url=None, + workspace_name="Remote", + thread_ts="123.456", + ) From 8121dda48847bdfe021cf14749f4f94681c4163f Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Thu, 26 Mar 2026 08:13:17 -0500 Subject: [PATCH 40/45] Fix CI checkout for fork PRs. --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bdab18e..9824f5a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,6 +19,7 @@ jobs: - uses: actions/checkout@v4 with: ref: ${{ github.head_ref || github.ref_name }} + repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} fetch-depth: 0 - uses: actions/setup-python@v5 with: From 7afa27c86ffea799f7c4fcfecc7b2e5b43ff6cae Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Thu, 26 Mar 2026 20:01:00 -0500 Subject: [PATCH 41/45] Security fixes to backup/restore functionality. Added PRIMARY_WORKSPACE env var to restrict full backup/restore to that workspace. Changed ENABLE_DB_RESET to boolean. --- .env.example | 9 +++-- .github/workflows/deploy-aws.yml | 2 + CHANGELOG.md | 10 +++++ deploy.sh | 13 +++---- docs/BACKUP_AND_MIGRATION.md | 6 +++ docs/INFRA_CONTRACT.md | 5 ++- infra/aws/scripts/deploy.sh | 17 ++++++-- infra/aws/template.yaml | 20 ++++++++-- infra/gcp/main.tf | 1 + infra/gcp/scripts/deploy.sh | 17 ++++++-- infra/gcp/variables.tf | 8 +++- syncbot/builders/home.py | 13 ++++--- syncbot/constants.py | 5 ++- syncbot/db/__init__.py | 2 +- syncbot/handlers/export_import.py | 19 +++++++++ syncbot/handlers/sync.py | 12 ++++-- syncbot/helpers/__init__.py | 2 + syncbot/helpers/core.py | 38 ++++++++++++------ syncbot/slack/actions.py | 2 +- tests/test_export_import_handlers.py | 32 +++++++++++++-- tests/test_primary_workspace_gates.py | 56 +++++++++++++++++++++++++++ 21 files changed, 238 insertions(+), 51 deletions(-) create mode 100644 tests/test_primary_workspace_gates.py diff --git a/.env.example b/.env.example index d82597f..3b3fd9b 100644 --- a/.env.example +++ b/.env.example @@ -33,9 +33,12 @@ DATABASE_SCHEMA=syncbot # DATABASE_BACKEND=sqlite # DATABASE_URL=sqlite:///syncbot.db -# When set to a Slack Team ID, the "Reset Database" button will be available -# on the Home tab for that team. Clicking it drops and reinitializes the DB. -# ENABLE_DB_RESET= +# Slack Team ID of the primary workspace. When set, full backup/restore and DB +# reset (when enabled below) are only available from this workspace. +# PRIMARY_WORKSPACE=T0123456789 + +# When true (and PRIMARY_WORKSPACE matches), show "Reset Database" on the Home tab. +# ENABLE_DB_RESET=true # ----------------------------------------------------------------------------- # Local Development Mode diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml index f49e019..fab46d5 100644 --- a/.github/workflows/deploy-aws.yml +++ b/.github/workflows/deploy-aws.yml @@ -110,6 +110,7 @@ jobs: SyncbotFederationEnabled=${{ vars.SYNCBOT_FEDERATION_ENABLED || 'false' }} \ SyncbotInstanceId=${{ vars.SYNCBOT_INSTANCE_ID }} \ SyncbotPublicUrl=${{ vars.SYNCBOT_PUBLIC_URL }} \ + PrimaryWorkspace=${{ vars.PRIMARY_WORKSPACE }} \ EnableDbReset=${{ vars.ENABLE_DB_RESET }} \ SlackClientID=${{ vars.SLACK_CLIENT_ID }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ @@ -171,6 +172,7 @@ jobs: SyncbotFederationEnabled=${{ vars.SYNCBOT_FEDERATION_ENABLED || 'false' }} \ SyncbotInstanceId=${{ vars.SYNCBOT_INSTANCE_ID }} \ SyncbotPublicUrl=${{ vars.SYNCBOT_PUBLIC_URL }} \ + PrimaryWorkspace=${{ vars.PRIMARY_WORKSPACE }} \ EnableDbReset=${{ vars.ENABLE_DB_RESET }} \ SlackClientID=${{ vars.SLACK_CLIENT_ID }} \ SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ diff --git a/CHANGELOG.md b/CHANGELOG.md index a2f4453..b763942 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,16 @@ All notable changes to this project are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +### Changed + +- `ENABLE_DB_RESET` is now a boolean (`true` / `1` / `yes`) instead of a Slack Team ID. Reset Database requires both `PRIMARY_WORKSPACE` to match the current workspace and `ENABLE_DB_RESET` to be truthy. + +### Added + +- `PRIMARY_WORKSPACE` env var: when set to a Slack Team ID, restricts full backup/restore and DB reset to that workspace only. + ## [1.0.0] - 2026-03-25 ### Added diff --git a/deploy.sh b/deploy.sh index cc0e106..e1e87f4 100755 --- a/deploy.sh +++ b/deploy.sh @@ -272,22 +272,21 @@ prompt_soft_delete_retention_days() { done } -prompt_enable_db_reset() { +prompt_primary_workspace() { local default="$1" - echo "WARNING: When set to a Slack Team ID, a \"Reset Database\" button appears on the Home tab." >&2 - echo "Clicking it DROPS and reinitializes the entire database -- all data is permanently destroyed." >&2 - echo "Leave empty (or enter none/disabled) to turn this off." >&2 + echo "Optional Slack Team ID for PRIMARY_WORKSPACE (scopes backup/restore and DB reset to one workspace)." >&2 + echo "Leave empty so backup/restore is available from every workspace." >&2 local disp if [[ -z "$default" ]]; then - disp="(disabled)" + disp="(any workspace)" else disp="$default" fi local v - read -r -p "ENABLE_DB_RESET (Slack Team ID) [$disp]: " v + read -r -p "PRIMARY_WORKSPACE (Slack Team ID) [$disp]: " v v="${v:-$default}" case "$(echo "$v" | tr "[:upper:]" "[:lower:]")" in - "" | none | disabled) echo "" ;; + "" | none) echo "" ;; *) echo "$v" ;; esac } diff --git a/docs/BACKUP_AND_MIGRATION.md b/docs/BACKUP_AND_MIGRATION.md index 982cd58..a65cd5d 100644 --- a/docs/BACKUP_AND_MIGRATION.md +++ b/docs/BACKUP_AND_MIGRATION.md @@ -2,6 +2,8 @@ ## Full-Instance Backup and Restore +When **`PRIMARY_WORKSPACE`** is set to a Slack Team ID, the **Backup/Restore** button is only shown in that workspace. This prevents admins in other workspaces from downloading a full-instance backup. When **`PRIMARY_WORKSPACE`** is unset, backup/restore is available from every workspace (legacy behavior). + Use **Backup/Restore** (Home tab, next to Refresh) to: - **Download backup** — Generates a JSON file containing all tables (workspaces, groups, syncs, channels, post meta, user directory, user mappings, federation, instance keys). The file is sent to your DM. Backup includes an HMAC for integrity and a hash of the encryption key. **Use the same `TOKEN_ENCRYPTION_KEY` on the target instance** so restored bot tokens decrypt; otherwise workspaces must reinstall the app to re-authorize. @@ -9,6 +11,10 @@ Use **Backup/Restore** (Home tab, next to Refresh) to: After restore, Home tab caches are cleared so the next Refresh shows current data. +## Reset Database + +Setting **`ENABLE_DB_RESET=true`** (with **`PRIMARY_WORKSPACE`** matching the current workspace) shows a **Reset Database** button on the Home tab. This is an advanced/destructive feature -- it drops and reinitializes the entire database. The deploy scripts do not prompt for it; set it manually via your provider's env/secret configuration (e.g. AWS CloudFormation `EnableDbReset` parameter, GCP Terraform `enable_db_reset` variable, or GitHub Actions `ENABLE_DB_RESET` variable). + ## Workspace Data Migration (Federation) When **External Connections** is enabled, **Data Migration** (in that section) lets you: diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index 76bf975..1568b72 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -56,7 +56,7 @@ poetry export --only main --format requirements.txt --without-hashes --output sy | `SLACK_USER_SCOPES` | Comma-separated OAuth **user** scopes. Must match `oauth_config.scopes.user` and `syncbot/slack_manifest_scopes.py` `USER_SCOPES`. If this env requests scopes that are not declared on the Slack app, install fails with `invalid_scope`. | | `TOKEN_ENCRYPTION_KEY` | **Required** in production; must be a strong, random value (e.g. 16+ characters). Providers may auto-generate it (e.g. AWS Secrets Manager). Back up the key after first deploy. In local dev you may set it manually or leave unset. | -**Reference wiring:** AWS SAM ([`infra/aws/template.yaml`](../infra/aws/template.yaml)) maps CloudFormation parameters to Lambda env: **`SlackOauthBotScopes`** / **`SlackOauthUserScopes`** → **`SLACK_BOT_SCOPES`** / **`SLACK_USER_SCOPES`** (defaults match `BOT_SCOPES` / `USER_SCOPES`); **`LogLevel`** → **`LOG_LEVEL`**; **`RequireAdmin`** → **`REQUIRE_ADMIN`**; **`SoftDeleteRetentionDays`** → **`SOFT_DELETE_RETENTION_DAYS`**; **`SyncbotFederationEnabled`**, **`SyncbotInstanceId`**, **`SyncbotPublicUrl`** (optional override) → federation env vars; **`EnableDbReset`** → **`ENABLE_DB_RESET`**; optional **`DatabaseTlsEnabled`** / **`DatabaseSslCaPath`** → **`DATABASE_TLS_ENABLED`** / **`DATABASE_SSL_CA_PATH`** (omit when empty so app defaults apply). **`SYNCBOT_PUBLIC_URL`** defaults to the API Gateway stage base URL unless **`SyncbotPublicUrl`** is set; stack output **`SyncBotPublicBaseUrl`** documents that base. GCP Terraform uses **`secret_slack_bot_scopes`** (Secret Manager → `SLACK_BOT_SCOPES`) and variables **`slack_user_scopes`**, **`log_level`**, **`require_admin`**, **`database_backend`**, **`database_port`**, **`soft_delete_retention_days`**, **`syncbot_federation_enabled`**, **`syncbot_instance_id`**, **`syncbot_public_url_override`**, **`enable_db_reset`**, **`database_tls_enabled`**, **`database_ssl_ca_path`** for the corresponding runtime env on Cloud Run (see [infra/gcp/README.md](../infra/gcp/README.md)); **`syncbot_public_url_override`** is empty by default—set it to your service’s public HTTPS base (e.g. after first deploy) if you need **`SYNCBOT_PUBLIC_URL`** for federation. +**Reference wiring:** AWS SAM ([`infra/aws/template.yaml`](../infra/aws/template.yaml)) maps CloudFormation parameters to Lambda env: **`SlackOauthBotScopes`** / **`SlackOauthUserScopes`** → **`SLACK_BOT_SCOPES`** / **`SLACK_USER_SCOPES`** (defaults match `BOT_SCOPES` / `USER_SCOPES`); **`LogLevel`** → **`LOG_LEVEL`**; **`RequireAdmin`** → **`REQUIRE_ADMIN`**; **`SoftDeleteRetentionDays`** → **`SOFT_DELETE_RETENTION_DAYS`**; **`SyncbotFederationEnabled`**, **`SyncbotInstanceId`**, **`SyncbotPublicUrl`** (optional override) → federation env vars; **`PrimaryWorkspace`** → **`PRIMARY_WORKSPACE`**; **`EnableDbReset`** → **`ENABLE_DB_RESET`** (boolean `true` when enabled); optional **`DatabaseTlsEnabled`** / **`DatabaseSslCaPath`** → **`DATABASE_TLS_ENABLED`** / **`DATABASE_SSL_CA_PATH`** (omit when empty so app defaults apply). **`SYNCBOT_PUBLIC_URL`** defaults to the API Gateway stage base URL unless **`SyncbotPublicUrl`** is set; stack output **`SyncBotPublicBaseUrl`** documents that base. GCP Terraform uses **`secret_slack_bot_scopes`** (Secret Manager → `SLACK_BOT_SCOPES`) and variables **`slack_user_scopes`**, **`log_level`**, **`require_admin`**, **`database_backend`**, **`database_port`**, **`soft_delete_retention_days`**, **`syncbot_federation_enabled`**, **`syncbot_instance_id`**, **`syncbot_public_url_override`**, **`primary_workspace`**, **`enable_db_reset`**, **`database_tls_enabled`**, **`database_ssl_ca_path`** for the corresponding runtime env on Cloud Run (see [infra/gcp/README.md](../infra/gcp/README.md)); **`syncbot_public_url_override`** is empty by default—set it to your service’s public HTTPS base (e.g. after first deploy) if you need **`SYNCBOT_PUBLIC_URL`** for federation. ### Optional @@ -64,7 +64,8 @@ poetry export --only main --format requirements.txt --without-hashes --output sy |----------|-------------| | `SLACK_BOT_TOKEN` | Set by OAuth flow; placeholder until first install. | | `REQUIRE_ADMIN` | `true` (default) or `false`; restricts config to admins/owners. | -| `ENABLE_DB_RESET` | When set to a Slack Team ID, enables the Reset Database button for that workspace. | +| `PRIMARY_WORKSPACE` | Optional Slack Team ID. When set, full backup/restore and DB reset (if enabled) are scoped to this workspace only. | +| `ENABLE_DB_RESET` | When `true` / `1` / `yes` and `PRIMARY_WORKSPACE` matches the current workspace, shows the Reset Database button. Not prompted during deploy; set manually via infra config or GitHub Actions variable. | | `LOCAL_DEVELOPMENT` | `true` only for local dev; disables token verification and enables dev shortcuts. | | `LOG_LEVEL` | `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` (default `INFO`). | | `PORT` | HTTP listen port for container entrypoint (`python app.py` / Cloud Run). Cloud Run injects this (typically `8080`); default `3000` when unset. | diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index 0fa6013..70aa1b8 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -1195,6 +1195,7 @@ PREV_SOFT_DELETE="" PREV_FEDERATION="" PREV_INSTANCE_ID="" PREV_PUBLIC_URL="" +PREV_PRIMARY_WORKSPACE="" PREV_ENABLE_DB_RESET="" PREV_DB_TLS="" PREV_DB_SSL_CA="" @@ -1222,6 +1223,7 @@ if [[ -n "$EXISTING_STACK_STATUS" && "$EXISTING_STACK_STATUS" != "None" ]]; then PREV_FEDERATION="$(stack_param_value "$EXISTING_STACK_PARAMS" "SyncbotFederationEnabled")" PREV_INSTANCE_ID="$(stack_param_value "$EXISTING_STACK_PARAMS" "SyncbotInstanceId")" PREV_PUBLIC_URL="$(stack_param_value "$EXISTING_STACK_PARAMS" "SyncbotPublicUrl")" + PREV_PRIMARY_WORKSPACE="$(stack_param_value "$EXISTING_STACK_PARAMS" "PrimaryWorkspace")" PREV_ENABLE_DB_RESET="$(stack_param_value "$EXISTING_STACK_PARAMS" "EnableDbReset")" PREV_DB_TLS="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseTlsEnabled")" PREV_DB_SSL_CA="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseSslCaPath")" @@ -1550,6 +1552,7 @@ SOFT_DELETE_RETENTION_DAYS="${PREV_SOFT_DELETE:-30}" SYNCBOT_FEDERATION_ENABLED="${PREV_FEDERATION:-false}" SYNCBOT_INSTANCE_ID="${PREV_INSTANCE_ID:-}" SYNCBOT_PUBLIC_URL="${PREV_PUBLIC_URL:-}" +PRIMARY_WORKSPACE="${PREV_PRIMARY_WORKSPACE:-}" ENABLE_DB_RESET="${PREV_ENABLE_DB_RESET:-}" DATABASE_TLS_ENABLED="${PREV_DB_TLS:-}" DATABASE_SSL_CA_PATH="${PREV_DB_SSL_CA:-}" @@ -1562,7 +1565,7 @@ echo echo "=== App Settings ===" REQUIRE_ADMIN="$(prompt_require_admin "$REQUIRE_ADMIN")" SOFT_DELETE_RETENTION_DAYS="$(prompt_soft_delete_retention_days "$SOFT_DELETE_RETENTION_DAYS")" -ENABLE_DB_RESET="$(prompt_enable_db_reset "$ENABLE_DB_RESET")" +PRIMARY_WORKSPACE="$(prompt_primary_workspace "$PRIMARY_WORKSPACE")" SYNCBOT_FEDERATION_ENABLED="$(prompt_federation_enabled "$SYNCBOT_FEDERATION_ENABLED")" if [[ "$SYNCBOT_FEDERATION_ENABLED" == "true" ]]; then SYNCBOT_INSTANCE_ID="$(prompt_instance_id "$SYNCBOT_INSTANCE_ID")" @@ -1577,10 +1580,15 @@ echo "Stage: $STAGE" echo "Log level: $LOG_LEVEL" echo "Require admin: $REQUIRE_ADMIN" echo "Soft-delete days: $SOFT_DELETE_RETENTION_DAYS" -if [[ -n "$ENABLE_DB_RESET" ]]; then - echo "DB reset (team): $ENABLE_DB_RESET" +if [[ -n "$PRIMARY_WORKSPACE" ]]; then + echo "Primary workspace: $PRIMARY_WORKSPACE" else - echo "DB reset (team): (disabled)" + echo "Primary workspace: (any — backup from all workspaces)" +fi +if [[ "$ENABLE_DB_RESET" == "true" ]]; then + echo "DB reset: enabled (PRIMARY_WORKSPACE must match)" +else + echo "DB reset: (disabled)" fi if [[ "$SYNCBOT_FEDERATION_ENABLED" == "true" ]]; then echo "Federation: enabled" @@ -1650,6 +1658,7 @@ PARAMS=( # SAM rejects Key= (empty value) in shorthand format; only include when non-empty. [[ -n "$SYNCBOT_INSTANCE_ID" ]] && PARAMS+=("SyncbotInstanceId=$SYNCBOT_INSTANCE_ID") [[ -n "$SYNCBOT_PUBLIC_URL" ]] && PARAMS+=("SyncbotPublicUrl=$SYNCBOT_PUBLIC_URL") +[[ -n "$PRIMARY_WORKSPACE" ]] && PARAMS+=("PrimaryWorkspace=$PRIMARY_WORKSPACE") [[ -n "$ENABLE_DB_RESET" ]] && PARAMS+=("EnableDbReset=$ENABLE_DB_RESET") [[ -n "$DATABASE_TLS_ENABLED" ]] && PARAMS+=("DatabaseTlsEnabled=$DATABASE_TLS_ENABLED") [[ -n "$DATABASE_SSL_CA_PATH" ]] && PARAMS+=("DatabaseSslCaPath=$DATABASE_SSL_CA_PATH") diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index 7b1f785..d52d97b 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -241,9 +241,16 @@ Parameters: Type: String Default: "" + PrimaryWorkspace: + Description: > + Slack Team ID for PRIMARY_WORKSPACE (scopes backup/restore and DB reset). Leave empty to allow backup from all workspaces. + Type: String + Default: "" + EnableDbReset: Description: > - Slack Team ID to scope the Reset Database button (ENABLE_DB_RESET). Leave empty to disable. + Set to "true" to enable Reset Database when PRIMARY_WORKSPACE matches (ENABLE_DB_RESET). Leave empty to disable. + Legacy values (e.g. a Slack Team ID) are ignored by the app until updated to "true". Type: String Default: "" @@ -284,7 +291,8 @@ Conditions: HasAppDbPasswordOverride: !Not [!Equals [!Ref AppDbPasswordOverride, ""]] HasNoAppDbPasswordOverride: !Not [!Condition HasAppDbPasswordOverride] HasSyncbotPublicUrlOverride: !Not [!Equals [!Ref SyncbotPublicUrl, ""]] - HasEnableDbReset: !Not [!Equals [!Ref EnableDbReset, ""]] + HasPrimaryWorkspace: !Not [!Equals [!Ref PrimaryWorkspace, ""]] + HasEnableDbReset: !Equals [!Ref EnableDbReset, "true"] HasDatabaseTlsExplicit: !Not [!Equals [!Ref DatabaseTlsEnabled, ""]] HasDatabaseSslCaPath: !Not [!Equals [!Ref DatabaseSslCaPath, ""]] CreateTokenEncryptionKeySecret: !And @@ -713,10 +721,14 @@ Resources: SYNCBOT_FEDERATION_ENABLED: !Ref SyncbotFederationEnabled SYNCBOT_INSTANCE_ID: !Ref SyncbotInstanceId SYNCBOT_PUBLIC_URL: !Ref SyncbotPublicUrl + PRIMARY_WORKSPACE: !If + - HasPrimaryWorkspace + - !Ref PrimaryWorkspace + - !Ref AWS::NoValue ENABLE_DB_RESET: !If - HasEnableDbReset - - !Ref EnableDbReset - - "" + - "true" + - !Ref AWS::NoValue DATABASE_TLS_ENABLED: !If - HasDatabaseTlsExplicit - !Ref DatabaseTlsEnabled diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf index ac76934..8a34047 100644 --- a/infra/gcp/main.tf +++ b/infra/gcp/main.tf @@ -64,6 +64,7 @@ locals { }, var.syncbot_instance_id != "" ? { SYNCBOT_INSTANCE_ID = var.syncbot_instance_id } : {}, local.syncbot_public_url_effective != "" ? { SYNCBOT_PUBLIC_URL = trimsuffix(local.syncbot_public_url_effective, "/") } : {}, + trimspace(var.primary_workspace) != "" ? { PRIMARY_WORKSPACE = var.primary_workspace } : {}, trimspace(var.enable_db_reset) != "" ? { ENABLE_DB_RESET = var.enable_db_reset } : {}, var.database_tls_enabled != "" ? { DATABASE_TLS_ENABLED = var.database_tls_enabled } : {}, trimspace(var.database_ssl_ca_path) != "" ? { DATABASE_SSL_CA_PATH = var.database_ssl_ca_path } : {}, diff --git a/infra/gcp/scripts/deploy.sh b/infra/gcp/scripts/deploy.sh index a1156d1..e73ba1f 100755 --- a/infra/gcp/scripts/deploy.sh +++ b/infra/gcp/scripts/deploy.sh @@ -647,6 +647,7 @@ SOFT_DELETE_DEFAULT="30" SYNCBOT_PUBLIC_DEFAULT="" SYNCBOT_FEDERATION_DEFAULT="false" INSTANCE_ID_VAR="" +PRIMARY_WORKSPACE_VAR="" ENABLE_DB_RESET_VAR="" DB_TLS_VAR="" DB_SSL_CA_VAR="" @@ -668,6 +669,8 @@ if [[ -n "$EXISTING_SERVICE_URL" ]]; then fi DETECTED_INSTANCE_ID="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "SYNCBOT_INSTANCE_ID")" INSTANCE_ID_VAR="${DETECTED_INSTANCE_ID:-}" + DETECTED_PW="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "PRIMARY_WORKSPACE")" + PRIMARY_WORKSPACE_VAR="${DETECTED_PW:-}" DETECTED_ER="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "ENABLE_DB_RESET")" ENABLE_DB_RESET_VAR="${DETECTED_ER:-}" DETECTED_DB_TLS="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_TLS_ENABLED")" @@ -684,7 +687,7 @@ echo echo "=== App Settings ===" REQUIRE_ADMIN_DEFAULT="$(prompt_require_admin "$REQUIRE_ADMIN_DEFAULT")" SOFT_DELETE_DEFAULT="$(prompt_soft_delete_retention_days "$SOFT_DELETE_DEFAULT")" -ENABLE_DB_RESET_VAR="$(prompt_enable_db_reset "$ENABLE_DB_RESET_VAR")" +PRIMARY_WORKSPACE_VAR="$(prompt_primary_workspace "$PRIMARY_WORKSPACE_VAR")" SYNCBOT_FEDERATION_DEFAULT="$(prompt_federation_enabled "$SYNCBOT_FEDERATION_DEFAULT")" if [[ "$SYNCBOT_FEDERATION_DEFAULT" == "true" ]]; then INSTANCE_ID_VAR="$(prompt_instance_id "$INSTANCE_ID_VAR")" @@ -709,6 +712,7 @@ VARS=( "-var=soft_delete_retention_days=$SOFT_DELETE_DEFAULT" "-var=syncbot_federation_enabled=$SYNCBOT_FEDERATION_DEFAULT" "-var=syncbot_instance_id=${INSTANCE_ID_VAR:-}" + "-var=primary_workspace=${PRIMARY_WORKSPACE_VAR:-}" "-var=enable_db_reset=${ENABLE_DB_RESET_VAR:-}" "-var=database_tls_enabled=${DB_TLS_VAR:-}" "-var=database_ssl_ca_path=${DB_SSL_CA_VAR:-}" @@ -732,10 +736,15 @@ echo echo "Require admin: $REQUIRE_ADMIN_DEFAULT" echo "Soft-delete days: $SOFT_DELETE_DEFAULT" echo "Log level: $LOG_LEVEL" -if [[ -n "$ENABLE_DB_RESET_VAR" ]]; then - echo "DB reset (team): $ENABLE_DB_RESET_VAR" +if [[ -n "$PRIMARY_WORKSPACE_VAR" ]]; then + echo "Primary workspace: $PRIMARY_WORKSPACE_VAR" else - echo "DB reset (team): (disabled)" + echo "Primary workspace: (any)" +fi +if [[ "$ENABLE_DB_RESET_VAR" == "true" ]]; then + echo "DB reset: enabled" +else + echo "DB reset: (disabled)" fi if [[ "$SYNCBOT_FEDERATION_DEFAULT" == "true" ]]; then echo "Federation: enabled" diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf index 6ce01c1..2e88006 100644 --- a/infra/gcp/variables.tf +++ b/infra/gcp/variables.tf @@ -228,10 +228,16 @@ variable "syncbot_public_url_override" { description = "SYNCBOT_PUBLIC_URL (HTTPS base, no path). Set after first deploy if using federation; empty omits the env var." } +variable "primary_workspace" { + type = string + default = "" + description = "PRIMARY_WORKSPACE Slack Team ID; empty omits the env var (backup available from all workspaces)." +} + variable "enable_db_reset" { type = string default = "" - description = "ENABLE_DB_RESET: Slack Team ID to scope Reset Database; empty omits the env var." + description = "ENABLE_DB_RESET: set to \"true\" for Reset Database when PRIMARY_WORKSPACE matches; empty omits." } variable "database_tls_enabled" { diff --git a/syncbot/builders/home.py b/syncbot/builders/home.py index c6cf3fe..db9cb80 100644 --- a/syncbot/builders/home.py +++ b/syncbot/builders/home.py @@ -38,7 +38,7 @@ def _home_tab_content_hash(workspace_record: Workspace) -> str: Includes groups, members, syncs, sync channels (id/workspace/status), mapped counts, pending invite ids, and reset-button visibility so the hash changes when anything - visible on Home changes (including ENABLE_DB_RESET / team_id for the Reset button). + visible on Home changes (including PRIMARY_WORKSPACE / ENABLE_DB_RESET for Reset). """ workspace_id = workspace_record.id workspace_name = (workspace_record.workspace_name or "") or "" @@ -233,11 +233,14 @@ def build_home_tab( label="Refresh", action=actions.CONFIG_REFRESH_HOME, ), - orm.ButtonElement( - label="Backup/Restore", - action=actions.CONFIG_BACKUP_RESTORE, - ), ] + if helpers.is_backup_visible_for_workspace(workspace_record.team_id): + config_buttons.append( + orm.ButtonElement( + label="Backup/Restore", + action=actions.CONFIG_BACKUP_RESTORE, + ), + ) if helpers.is_db_reset_visible_for_workspace(workspace_record.team_id): config_buttons.append( orm.ButtonElement( diff --git a/syncbot/constants.py b/syncbot/constants.py index 826ba65..5c670a0 100644 --- a/syncbot/constants.py +++ b/syncbot/constants.py @@ -42,7 +42,10 @@ DATABASE_SSL_CA_PATH = "DATABASE_SSL_CA_PATH" DATABASE_TLS_ENABLED = "DATABASE_TLS_ENABLED" -# Name of env var that scopes the Reset Database button to one workspace. +# Slack Team ID of the primary workspace (backup/restore and DB reset when enabled). +PRIMARY_WORKSPACE = "PRIMARY_WORKSPACE" + +# When "true"/"1"/"yes" and PRIMARY_WORKSPACE matches, show Reset Database on Home. ENABLE_DB_RESET = "ENABLE_DB_RESET" # --------------------------------------------------------------------------- diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py index b9cf451..63fb4df 100644 --- a/syncbot/db/__init__.py +++ b/syncbot/db/__init__.py @@ -278,7 +278,7 @@ def drop_and_init_db() -> None: """Empty the current schema and reinitialize via Alembic. All data is lost. Drops all tables dialect-aware, then runs Alembic upgrade head. - Called from the "Reset Database" UI button (gated by ENABLE_DB_RESET). + Called from the "Reset Database" UI button (gated by PRIMARY_WORKSPACE + ENABLE_DB_RESET). Resets GLOBAL_ENGINE and GLOBAL_SESSION so the next get_engine() uses a fresh DB. """ global GLOBAL_ENGINE, GLOBAL_SESSION, GLOBAL_SCHEMA diff --git a/syncbot/handlers/export_import.py b/syncbot/handlers/export_import.py index 2d8bf3d..45148c0 100644 --- a/syncbot/handlers/export_import.py +++ b/syncbot/handlers/export_import.py @@ -60,6 +60,15 @@ def _is_admin(client: WebClient, user_id: str, body: dict) -> bool: return helpers.is_user_authorized(client, user_id) +def _team_id_for_backup_gate(body: dict) -> str | None: + """Slack team_id for primary-workspace backup/restore gating.""" + return ( + helpers.safe_get(body, "team", "id") + or helpers.safe_get(body, "view", "team_id") + or helpers.safe_get(body, "team_id") + ) + + def _open_dm_channel(client: WebClient, user_id: str) -> str: """Open (or reopen) a DM with *user_id* and return the channel ID.""" resp = client.conversations_open(users=[user_id]) @@ -81,6 +90,8 @@ def handle_backup_restore( user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) if not _is_admin(client, user_id, body): return + if not helpers.is_backup_visible_for_workspace(_team_id_for_backup_gate(body)): + return trigger_id = helpers.safe_get(body, "trigger_id") if not trigger_id: return @@ -142,6 +153,8 @@ def handle_backup_download( user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) if not _is_admin(client, user_id, body): return + if not helpers.is_backup_visible_for_workspace(_team_id_for_backup_gate(body)): + return try: payload = ei.build_full_backup() json_str = json.dumps(payload, default=ei._json_serializer, indent=2) @@ -187,6 +200,8 @@ def handle_backup_restore_submit_ack( user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) if not _is_admin(client, user_id, body): return None + if not helpers.is_backup_visible_for_workspace(_team_id_for_backup_gate(body)): + return None values = helpers.safe_get(body, "view", "state", "values") or {} file_data = helpers.safe_get( @@ -294,6 +309,8 @@ def handle_backup_restore_submit_work( user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) if not _is_admin(client, user_id, body): return + if not helpers.is_backup_visible_for_workspace(_team_id_for_backup_gate(body)): + return values = helpers.safe_get(body, "view", "state", "values") or {} file_data = helpers.safe_get( @@ -338,6 +355,8 @@ def handle_backup_restore_proceed( user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) if not _is_admin(client, user_id, body): return + if not helpers.is_backup_visible_for_workspace(_team_id_for_backup_gate(body)): + return from helpers._cache import _cache_get data = _cache_get(f"restore_pending:{user_id}") diff --git a/syncbot/handlers/sync.py b/syncbot/handlers/sync.py index c63eb9d..4f8878a 100644 --- a/syncbot/handlers/sync.py +++ b/syncbot/handlers/sync.py @@ -433,7 +433,7 @@ def check_join_sync_channel( # --------------------------------------------------------------------------- -# Database Reset (gated by ENABLE_DB_RESET) +# Database Reset (gated by PRIMARY_WORKSPACE + ENABLE_DB_RESET) # --------------------------------------------------------------------------- @@ -443,7 +443,10 @@ def handle_db_reset( logger: Logger, context: dict, ) -> None: - """Open a confirmation modal warning the user before a full DB reset. Only for the workspace whose team_id matches ENABLE_DB_RESET.""" + """Open a confirmation modal warning the user before a full DB reset. + + Only when PRIMARY_WORKSPACE matches and ENABLE_DB_RESET is truthy (see helpers.core). + """ team_id = helpers.safe_get(body, "team", "id") or helpers.safe_get(body, "view", "team_id") if not helpers.is_db_reset_visible_for_workspace(team_id): return @@ -498,7 +501,10 @@ def handle_db_reset_proceed( logger: Logger, context: dict, ) -> None: - """Execute the database reset after user confirmed via modal. Only for the workspace whose team_id matches ENABLE_DB_RESET.""" + """Execute the database reset after user confirmed via modal. + + Same gating as handle_db_reset (PRIMARY_WORKSPACE + ENABLE_DB_RESET). + """ team_id = helpers.safe_get(body, "team", "id") or helpers.safe_get(body, "view", "team_id") if not helpers.is_db_reset_visible_for_workspace(team_id): return diff --git a/syncbot/helpers/__init__.py b/syncbot/helpers/__init__.py index f4e784e..2619f7d 100644 --- a/syncbot/helpers/__init__.py +++ b/syncbot/helpers/__init__.py @@ -18,6 +18,7 @@ format_admin_label, get_request_type, get_user_id_from_body, + is_backup_visible_for_workspace, is_db_reset_visible_for_workspace, is_user_authorized, safe_get, @@ -128,6 +129,7 @@ "get_workspace_record", "index_of_block_with_action", "inject_cooldown_message", + "is_backup_visible_for_workspace", "is_db_reset_visible_for_workspace", "is_user_authorized", "notify_admins_dm", diff --git a/syncbot/helpers/core.py b/syncbot/helpers/core.py index e382ed0..a756a21 100644 --- a/syncbot/helpers/core.py +++ b/syncbot/helpers/core.py @@ -55,27 +55,41 @@ def is_user_authorized(client, user_id: str) -> bool: return bool(user.get("is_admin") or user.get("is_owner")) -def is_db_reset_visible_for_workspace(team_id: str | None) -> bool: - """Return True if the DB reset button/action is allowed for this workspace. +def is_backup_visible_for_workspace(team_id: str | None) -> bool: + """Return True if full backup/restore UI and handlers are allowed for this workspace. - When ENABLE_DB_RESET is set to a Slack team ID, only that workspace may see - and use the Reset Database button; other workspaces cannot. - Reads ENABLE_DB_RESET from os.environ at call time so it is correct even - if .env was loaded after constants was first imported. + When PRIMARY_WORKSPACE is set, only that Slack team_id may use backup/restore. + When unset, backup is available from all workspaces (backward-compatible). """ - enabled = (os.environ.get(constants.ENABLE_DB_RESET) or "").strip() - if not enabled: - _logger.debug("DB reset button hidden: ENABLE_DB_RESET not set") - return False - visible = (team_id or "") == enabled + primary = (os.environ.get(constants.PRIMARY_WORKSPACE) or "").strip() + if not primary: + return True + visible = (team_id or "") == primary if not visible: _logger.debug( - "DB reset button hidden: team_id %r does not match ENABLE_DB_RESET", + "backup/restore hidden: team_id %r does not match PRIMARY_WORKSPACE", team_id, ) return visible +def is_db_reset_visible_for_workspace(team_id: str | None) -> bool: + """Return True if the DB reset button/action is allowed for this workspace. + + Requires PRIMARY_WORKSPACE to match *team_id* and ENABLE_DB_RESET to be a truthy + boolean string (``true``, ``1``, ``yes``). Reads env at call time. + """ + primary = (os.environ.get(constants.PRIMARY_WORKSPACE) or "").strip() + if not primary or (team_id or "") != primary: + _logger.debug("DB reset button hidden: PRIMARY_WORKSPACE unset or team_id mismatch") + return False + enabled = (os.environ.get(constants.ENABLE_DB_RESET) or "").strip().lower() + if enabled not in ("true", "1", "yes"): + _logger.debug("DB reset button hidden: ENABLE_DB_RESET not true") + return False + return True + + def format_admin_label(client, user_id: str, workspace) -> tuple[str, str]: """Return ``(display_name, full_label)`` for an admin.""" from .slack_api import get_user_info diff --git a/syncbot/slack/actions.py b/syncbot/slack/actions.py index 13c7bb5..308ce0e 100644 --- a/syncbot/slack/actions.py +++ b/syncbot/slack/actions.py @@ -212,7 +212,7 @@ """Action: user clicked "Remove Connection" on an external connection (prefix-matched).""" # --------------------------------------------------------------------------- -# Database Reset (dev/admin tool, gated by ENABLE_DB_RESET env var) +# Database Reset (dev/admin tool, gated by PRIMARY_WORKSPACE + ENABLE_DB_RESET) # --------------------------------------------------------------------------- CONFIG_DB_RESET = "db_reset" diff --git a/tests/test_export_import_handlers.py b/tests/test_export_import_handlers.py index cd25d56..9967ee6 100644 --- a/tests/test_export_import_handlers.py +++ b/tests/test_export_import_handlers.py @@ -9,7 +9,10 @@ os.environ.setdefault("DATABASE_SCHEMA", "syncbot") os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") -from handlers.export_import import handle_backup_restore_submit_ack # noqa: E402 +from handlers.export_import import ( # noqa: E402 + handle_backup_restore, + handle_backup_restore_submit_ack, +) from slack import actions # noqa: E402 @@ -18,7 +21,10 @@ def test_returns_error_when_file_missing(self): client = MagicMock() body = {"user": {"id": "U1"}, "view": {"state": {"values": {}}}} - with patch("handlers.export_import._is_admin", return_value=True): + with ( + patch.dict(os.environ, {"PRIMARY_WORKSPACE": ""}), + patch("handlers.export_import._is_admin", return_value=True), + ): resp = handle_backup_restore_submit_ack(body, client, context={}) assert resp["response_action"] == "errors" @@ -41,8 +47,28 @@ def test_returns_error_when_uploaded_file_has_no_url(self): }, } - with patch("handlers.export_import._is_admin", return_value=True): + with ( + patch.dict(os.environ, {"PRIMARY_WORKSPACE": ""}), + patch("handlers.export_import._is_admin", return_value=True), + ): resp = handle_backup_restore_submit_ack(body, client, context={}) assert resp["response_action"] == "errors" assert "Could not retrieve the uploaded file." in resp["errors"][actions.CONFIG_BACKUP_RESTORE_JSON_INPUT] + + +class TestHandleBackupRestorePrimaryWorkspace: + def test_returns_early_when_primary_mismatch(self): + client = MagicMock() + body = { + "user": {"id": "U1"}, + "team": {"id": "T_WRONG"}, + "trigger_id": "trig", + } + with ( + patch.dict(os.environ, {"PRIMARY_WORKSPACE": "T_PRIMARY"}), + patch("handlers.export_import._is_admin", return_value=True), + ): + handle_backup_restore(body, client, MagicMock(), {}) + + client.views_open.assert_not_called() diff --git a/tests/test_primary_workspace_gates.py b/tests/test_primary_workspace_gates.py new file mode 100644 index 0000000..774a04c --- /dev/null +++ b/tests/test_primary_workspace_gates.py @@ -0,0 +1,56 @@ +"""Tests for PRIMARY_WORKSPACE backup gate and ENABLE_DB_RESET boolean.""" + +import os +from unittest.mock import patch + +import pytest + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from helpers.core import ( # noqa: E402 + is_backup_visible_for_workspace, + is_db_reset_visible_for_workspace, +) + + +class TestIsBackupVisibleForWorkspace: + def test_unset_primary_allows_all(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": ""}): + assert is_backup_visible_for_workspace("T111") is True + assert is_backup_visible_for_workspace(None) is True + + def test_matching_team_allowed(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TABC123"}): + assert is_backup_visible_for_workspace("TABC123") is True + + def test_non_matching_team_denied(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TABC123"}): + assert is_backup_visible_for_workspace("TOTHER") is False + + +class TestIsDbResetVisibleForWorkspace: + def test_unset_primary_denies(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "", "ENABLE_DB_RESET": "true"}): + assert is_db_reset_visible_for_workspace("T111") is False + + def test_primary_match_and_true_enables(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TABC123", "ENABLE_DB_RESET": "true"}): + assert is_db_reset_visible_for_workspace("TABC123") is True + + @pytest.mark.parametrize("truthy", ("true", "1", "yes")) + def test_truthy_strings(self, truthy: str): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TABC123", "ENABLE_DB_RESET": truthy}): + assert is_db_reset_visible_for_workspace("TABC123") is True + + def test_unset_enable_db_reset_denies(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TABC123"}, clear=False): + os.environ.pop("ENABLE_DB_RESET", None) + assert is_db_reset_visible_for_workspace("TABC123") is False + + def test_team_mismatch_denies(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TABC123", "ENABLE_DB_RESET": "true"}): + assert is_db_reset_visible_for_workspace("TOTHER") is False From f5522779771a3375a70c7214227bdc149e035e7c Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Thu, 26 Mar 2026 20:08:33 -0500 Subject: [PATCH 42/45] Correction to show backup/restore logic. --- .env.example | 4 ++-- CHANGELOG.md | 2 +- deploy.sh | 4 ++-- docs/BACKUP_AND_MIGRATION.md | 2 +- docs/INFRA_CONTRACT.md | 2 +- infra/aws/scripts/deploy.sh | 2 +- infra/aws/template.yaml | 2 +- infra/gcp/scripts/deploy.sh | 2 +- infra/gcp/variables.tf | 2 +- syncbot/helpers/core.py | 7 ++++--- tests/test_export_import_handlers.py | 7 ++++--- tests/test_primary_workspace_gates.py | 6 +++--- 12 files changed, 22 insertions(+), 20 deletions(-) diff --git a/.env.example b/.env.example index 3b3fd9b..026f186 100644 --- a/.env.example +++ b/.env.example @@ -33,8 +33,8 @@ DATABASE_SCHEMA=syncbot # DATABASE_BACKEND=sqlite # DATABASE_URL=sqlite:///syncbot.db -# Slack Team ID of the primary workspace. When set, full backup/restore and DB -# reset (when enabled below) are only available from this workspace. +# Slack Team ID of the primary workspace. Required for backup/restore to appear. +# DB reset (when enabled below) is also scoped to this workspace. # PRIMARY_WORKSPACE=T0123456789 # When true (and PRIMARY_WORKSPACE matches), show "Reset Database" on the Home tab. diff --git a/CHANGELOG.md b/CHANGELOG.md index b763942..85d610e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added -- `PRIMARY_WORKSPACE` env var: when set to a Slack Team ID, restricts full backup/restore and DB reset to that workspace only. +- `PRIMARY_WORKSPACE` env var: must be set to a Slack Team ID for backup/restore to appear. Also scopes DB reset to that workspace. ## [1.0.0] - 2026-03-25 diff --git a/deploy.sh b/deploy.sh index e1e87f4..34c59ad 100755 --- a/deploy.sh +++ b/deploy.sh @@ -274,8 +274,8 @@ prompt_soft_delete_retention_days() { prompt_primary_workspace() { local default="$1" - echo "Optional Slack Team ID for PRIMARY_WORKSPACE (scopes backup/restore and DB reset to one workspace)." >&2 - echo "Leave empty so backup/restore is available from every workspace." >&2 + echo "Slack Team ID for PRIMARY_WORKSPACE (required for backup/restore to appear; also scopes DB reset)." >&2 + echo "Leave empty to hide backup/restore from all workspaces." >&2 local disp if [[ -z "$default" ]]; then disp="(any workspace)" diff --git a/docs/BACKUP_AND_MIGRATION.md b/docs/BACKUP_AND_MIGRATION.md index a65cd5d..ce9de9b 100644 --- a/docs/BACKUP_AND_MIGRATION.md +++ b/docs/BACKUP_AND_MIGRATION.md @@ -2,7 +2,7 @@ ## Full-Instance Backup and Restore -When **`PRIMARY_WORKSPACE`** is set to a Slack Team ID, the **Backup/Restore** button is only shown in that workspace. This prevents admins in other workspaces from downloading a full-instance backup. When **`PRIMARY_WORKSPACE`** is unset, backup/restore is available from every workspace (legacy behavior). +**`PRIMARY_WORKSPACE`** must be set to a Slack Team ID for backup/restore to be available. When set, the **Backup/Restore** button is only shown in that workspace. When unset, backup/restore is hidden everywhere. Use **Backup/Restore** (Home tab, next to Refresh) to: diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md index 1568b72..216dabe 100644 --- a/docs/INFRA_CONTRACT.md +++ b/docs/INFRA_CONTRACT.md @@ -64,7 +64,7 @@ poetry export --only main --format requirements.txt --without-hashes --output sy |----------|-------------| | `SLACK_BOT_TOKEN` | Set by OAuth flow; placeholder until first install. | | `REQUIRE_ADMIN` | `true` (default) or `false`; restricts config to admins/owners. | -| `PRIMARY_WORKSPACE` | Optional Slack Team ID. When set, full backup/restore and DB reset (if enabled) are scoped to this workspace only. | +| `PRIMARY_WORKSPACE` | Slack Team ID of the primary workspace. Required for backup/restore to be visible. DB reset (if enabled) is also scoped to this workspace. | | `ENABLE_DB_RESET` | When `true` / `1` / `yes` and `PRIMARY_WORKSPACE` matches the current workspace, shows the Reset Database button. Not prompted during deploy; set manually via infra config or GitHub Actions variable. | | `LOCAL_DEVELOPMENT` | `true` only for local dev; disables token verification and enables dev shortcuts. | | `LOG_LEVEL` | `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` (default `INFO`). | diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh index 70aa1b8..0380d76 100755 --- a/infra/aws/scripts/deploy.sh +++ b/infra/aws/scripts/deploy.sh @@ -1583,7 +1583,7 @@ echo "Soft-delete days: $SOFT_DELETE_RETENTION_DAYS" if [[ -n "$PRIMARY_WORKSPACE" ]]; then echo "Primary workspace: $PRIMARY_WORKSPACE" else - echo "Primary workspace: (any — backup from all workspaces)" + echo "Primary workspace: (not set — backup/restore hidden)" fi if [[ "$ENABLE_DB_RESET" == "true" ]]; then echo "DB reset: enabled (PRIMARY_WORKSPACE must match)" diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml index d52d97b..dc57999 100644 --- a/infra/aws/template.yaml +++ b/infra/aws/template.yaml @@ -243,7 +243,7 @@ Parameters: PrimaryWorkspace: Description: > - Slack Team ID for PRIMARY_WORKSPACE (scopes backup/restore and DB reset). Leave empty to allow backup from all workspaces. + Slack Team ID for PRIMARY_WORKSPACE. Required for backup/restore to appear; also scopes DB reset. Leave empty to hide backup/restore. Type: String Default: "" diff --git a/infra/gcp/scripts/deploy.sh b/infra/gcp/scripts/deploy.sh index e73ba1f..90af4ec 100755 --- a/infra/gcp/scripts/deploy.sh +++ b/infra/gcp/scripts/deploy.sh @@ -739,7 +739,7 @@ echo "Log level: $LOG_LEVEL" if [[ -n "$PRIMARY_WORKSPACE_VAR" ]]; then echo "Primary workspace: $PRIMARY_WORKSPACE_VAR" else - echo "Primary workspace: (any)" + echo "Primary workspace: (not set — backup/restore hidden)" fi if [[ "$ENABLE_DB_RESET_VAR" == "true" ]]; then echo "DB reset: enabled" diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf index 2e88006..9237f90 100644 --- a/infra/gcp/variables.tf +++ b/infra/gcp/variables.tf @@ -231,7 +231,7 @@ variable "syncbot_public_url_override" { variable "primary_workspace" { type = string default = "" - description = "PRIMARY_WORKSPACE Slack Team ID; empty omits the env var (backup available from all workspaces)." + description = "PRIMARY_WORKSPACE Slack Team ID; required for backup/restore to appear. Empty omits the env var and hides backup/restore." } variable "enable_db_reset" { diff --git a/syncbot/helpers/core.py b/syncbot/helpers/core.py index a756a21..d3e7dc7 100644 --- a/syncbot/helpers/core.py +++ b/syncbot/helpers/core.py @@ -58,12 +58,13 @@ def is_user_authorized(client, user_id: str) -> bool: def is_backup_visible_for_workspace(team_id: str | None) -> bool: """Return True if full backup/restore UI and handlers are allowed for this workspace. - When PRIMARY_WORKSPACE is set, only that Slack team_id may use backup/restore. - When unset, backup is available from all workspaces (backward-compatible). + Requires PRIMARY_WORKSPACE to be set and to match *team_id*. + When PRIMARY_WORKSPACE is unset, backup/restore is hidden everywhere. """ primary = (os.environ.get(constants.PRIMARY_WORKSPACE) or "").strip() if not primary: - return True + _logger.debug("backup/restore hidden: PRIMARY_WORKSPACE not set") + return False visible = (team_id or "") == primary if not visible: _logger.debug( diff --git a/tests/test_export_import_handlers.py b/tests/test_export_import_handlers.py index 9967ee6..619fe24 100644 --- a/tests/test_export_import_handlers.py +++ b/tests/test_export_import_handlers.py @@ -19,10 +19,10 @@ class TestBackupRestoreSubmitValidation: def test_returns_error_when_file_missing(self): client = MagicMock() - body = {"user": {"id": "U1"}, "view": {"state": {"values": {}}}} + body = {"user": {"id": "U1"}, "team": {"id": "TTEST"}, "view": {"state": {"values": {}}}} with ( - patch.dict(os.environ, {"PRIMARY_WORKSPACE": ""}), + patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TTEST"}), patch("handlers.export_import._is_admin", return_value=True), ): resp = handle_backup_restore_submit_ack(body, client, context={}) @@ -34,6 +34,7 @@ def test_returns_error_when_uploaded_file_has_no_url(self): client = MagicMock() body = { "user": {"id": "U1"}, + "team": {"id": "TTEST"}, "view": { "state": { "values": { @@ -48,7 +49,7 @@ def test_returns_error_when_uploaded_file_has_no_url(self): } with ( - patch.dict(os.environ, {"PRIMARY_WORKSPACE": ""}), + patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TTEST"}), patch("handlers.export_import._is_admin", return_value=True), ): resp = handle_backup_restore_submit_ack(body, client, context={}) diff --git a/tests/test_primary_workspace_gates.py b/tests/test_primary_workspace_gates.py index 774a04c..2c766f1 100644 --- a/tests/test_primary_workspace_gates.py +++ b/tests/test_primary_workspace_gates.py @@ -18,10 +18,10 @@ class TestIsBackupVisibleForWorkspace: - def test_unset_primary_allows_all(self): + def test_unset_primary_denies_all(self): with patch.dict(os.environ, {"PRIMARY_WORKSPACE": ""}): - assert is_backup_visible_for_workspace("T111") is True - assert is_backup_visible_for_workspace(None) is True + assert is_backup_visible_for_workspace("T111") is False + assert is_backup_visible_for_workspace(None) is False def test_matching_team_allowed(self): with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TABC123"}): From c25212cf10240a06b6230b58f36af2d89bcc09e6 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Thu, 26 Mar 2026 21:52:08 -0500 Subject: [PATCH 43/45] Changes to cross-workspace channel links and fallback text. Fixed how link is resolved for channels in other workspaces. Changed formatting of fallback text for unresolved usernames and channels. --- CHANGELOG.md | 1 + docs/ARCHITECTURE.md | 1 + syncbot/builders/_common.py | 12 +++- syncbot/handlers/messages.py | 15 ++++- syncbot/helpers/user_matching.py | 39 +++++++++---- tests/test_helpers.py | 97 ++++++++++++++++++++++++++++++++ 6 files changed, 149 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 85d610e..094011f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- Cross-workspace channel links in synced messages use workspace archive URLs (`https://{domain}.slack.com/archives/{id}`) instead of `slack.com/app_redirect`, with a `[#channel (Workspace)]` fallback when the domain cannot be resolved. Federation outbound messages now resolve channel references the same way as same-instance sync. - `ENABLE_DB_RESET` is now a boolean (`true` / `1` / `yes`) instead of a Slack Team ID. Reset Database requires both `PRIMARY_WORKSPACE` to match the current workspace and `ENABLE_DB_RESET` to be truthy. ### Added diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index 80e3c6e..e263cbb 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -47,6 +47,7 @@ sequenceDiagram loop For each target channel L->>L: Re-map @mentions (cached user matching) + L->>L: Resolve #channel references (archive URLs) L->>SB: chat.postMessage (as sender) SB-->>L: ts (timestamp) L->>DB: Save PostMeta record diff --git a/syncbot/builders/_common.py b/syncbot/builders/_common.py index 6b125d9..5097125 100644 --- a/syncbot/builders/_common.py +++ b/syncbot/builders/_common.py @@ -94,7 +94,7 @@ def _get_group_members(group_id: int) -> list[WorkspaceGroupMember]: def _get_workspace_info(workspace: Workspace) -> dict: """Fetch workspace icon URL and domain from the Slack API (cached 24h).""" - result: dict[str, str | None] = {"icon_url": None, "domain": None} + result: dict[str, str | None] = {"icon_url": None, "domain": None, "raw_domain": None} if not workspace or not workspace.bot_token: return result @@ -112,6 +112,7 @@ def _get_workspace_info(workspace: Workspace) -> dict: domain = helpers.safe_get(info, "team", "domain") if domain: result["domain"] = f"" + result["raw_domain"] = domain helpers._cache_set(cache_key, result, ttl=86400) except Exception as exc: _logger.debug(f"_get_workspace_meta: team_info call failed: {exc}") @@ -150,9 +151,14 @@ def _format_channel_ref( extra={"channel_id": channel_id, "workspace": ws_name, "error": str(e)}, ) - deep_link = f"https://slack.com/app_redirect?channel={channel_id}&team={workspace.team_id}" + ws_info = _get_workspace_info(workspace) + domain = ws_info.get("raw_domain") link_text = f"#{ch_name} ({ws_name})" if include_workspace_in_link else f"#{ch_name}" - result = f"<{deep_link}|{link_text}>" + if domain: + deep_link = f"https://{domain}.slack.com/archives/{channel_id}" + result = f"<{deep_link}|{link_text}>" + else: + result = f"`[{link_text}]`" if ch_name != channel_id: helpers._cache_set(cache_key, result, ttl=3600) return result diff --git a/syncbot/handlers/messages.py b/syncbot/handlers/messages.py index 9cae837..887e91d 100644 --- a/syncbot/handlers/messages.py +++ b/syncbot/handlers/messages.py @@ -201,6 +201,9 @@ def _handle_new_post( if sync_records: fed_ws = helpers.get_federated_workspace_for_sync(sync_records[0][0].sync_id) + source_ws_fed = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + fed_adapted_text = helpers.resolve_channel_references(msg_text, client, source_ws_fed) + for sync_channel, workspace in sync_records: try: split_file_ts: str | None = None @@ -223,7 +226,7 @@ def _handle_new_post( user_name=user_name, user_avatar_url=user_profile_url, workspace_name=workspace_name, - text=msg_text, + text=fed_adapted_text, images=image_payloads, timestamp=helpers.safe_get(body, "event", "ts"), ) @@ -364,6 +367,9 @@ def _handle_thread_reply( thread_post_id = post_records[0][0].post_id if post_records else None + source_ws_fed = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + fed_adapted_text = helpers.resolve_channel_references(msg_text, client, source_ws_fed) + for post_meta, sync_channel, workspace in post_records: try: split_file_ts: str | None = None @@ -377,7 +383,7 @@ def _handle_thread_reply( user_name=user_name, user_avatar_url=user_profile_url, workspace_name=workspace_name, - text=msg_text, + text=fed_adapted_text, thread_post_id=str(thread_post_id) if thread_post_id else None, timestamp=helpers.safe_get(body, "event", "ts"), ) @@ -507,6 +513,9 @@ def _handle_message_edit( if post_records: fed_ws = helpers.get_federated_workspace_for_sync(post_records[0][1].sync_id) + source_ws_fed = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + fed_adapted_text = helpers.resolve_channel_references(msg_text, client, source_ws_fed) + synced = 0 failed = 0 for post_meta, sync_channel, workspace in post_records: @@ -517,7 +526,7 @@ def _handle_message_edit( payload = federation.build_edit_payload( post_id=post_meta.post_id.hex() if isinstance(post_meta.post_id, bytes) else str(post_meta.post_id), channel_id=sync_channel.channel_id, - text=msg_text, + text=fed_adapted_text, timestamp=f"{post_meta.ts:.6f}", ) federation.push_edit(fed_ws, payload) diff --git a/syncbot/helpers/user_matching.py b/syncbot/helpers/user_matching.py index 809673a..06dd133 100644 --- a/syncbot/helpers/user_matching.py +++ b/syncbot/helpers/user_matching.py @@ -374,8 +374,8 @@ def resolve_mention_for_workspace( def _unmapped_label(name: str) -> str: if source_ws_name: - return f"[{name} ({source_ws_name})]" - return f"[{name}]" + return f"`[@{name} ({source_ws_name})]`" + return f"`[@{name}]`" mappings = DbManager.find_records( schemas.UserMapping, @@ -481,14 +481,32 @@ def apply_mentioned_users( source_ws = get_workspace_by_id(source_workspace_id) if source_workspace_id else None ws_label = resolve_workspace_name(source_ws) if source_ws else None if ws_label: - replace_list.append(f"[{fallback} ({ws_label})]") + replace_list.append(f"`[@{fallback} ({ws_label})]`") else: - replace_list.append(f"[{fallback}]") + replace_list.append(f"`[@{fallback}]`") replace_iter = iter(replace_list) return re.sub(r"<@\w+>", lambda _: next(replace_iter), msg_text) +def _get_workspace_domain(client: WebClient, team_id: str) -> str | None: + """Return the workspace subdomain (e.g. ``acme`` for ``acme.slack.com``) from ``team.info``, cached.""" + cache_key = f"ws_domain:{team_id}" + cached = _cache_get(cache_key) + if cached: + return cached + + try: + info = client.team_info() + domain = safe_get(info, "team", "domain") + if domain: + _cache_set(cache_key, domain, ttl=86400) + return domain + except Exception as exc: + _logger.debug("get_workspace_domain_failed", extra={"team_id": team_id, "error": str(exc)}) + return None + + def resolve_channel_references( msg_text: str, source_client: WebClient, @@ -519,13 +537,14 @@ def resolve_channel_references( extra={"channel_id": ch_id, "error": str(exc)}, ) - if team_id and ch_name != ch_id: - deep_link = f"https://slack.com/app_redirect?channel={ch_id}&team={team_id}" + if ch_name != ch_id: label = f"#{ch_name} ({ws_name})" if ws_name else f"#{ch_name}" - replacement = f"<{deep_link}|{label}>" - elif ch_name != ch_id: - label = f"#{ch_name} ({ws_name})" if ws_name else f"#{ch_name}" - replacement = label + domain = _get_workspace_domain(source_client, team_id) if team_id else None + if domain: + deep_link = f"https://{domain}.slack.com/archives/{ch_id}" + replacement = f"<{deep_link}|{label}>" + else: + replacement = f"`[{label}]`" else: replacement = f"#{ch_id}" diff --git a/tests/test_helpers.py b/tests/test_helpers.py index e268ccf..8bea143 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -241,3 +241,100 @@ def fn(): with pytest.raises(SlackApiError): fn() + + +# ----------------------------------------------------------------------- +# resolve_channel_references +# ----------------------------------------------------------------------- + + +class TestResolveChannelReferences: + """Tests for helpers.resolve_channel_references (archive URL generation).""" + + def setup_method(self): + helpers._CACHE.clear() + + def _make_workspace(self, team_id="T123", name="Acme"): + ws = MagicMock() + ws.team_id = team_id + ws.workspace_name = name + return ws + + def _make_client(self, channel_name="general", domain="acme"): + client = MagicMock() + client.conversations_info.return_value = {"channel": {"name": channel_name}} + client.team_info.return_value = {"team": {"domain": domain}} + return client + + def test_no_channel_refs_unchanged(self): + result = helpers.resolve_channel_references("hello world", MagicMock()) + assert result == "hello world" + + def test_empty_text(self): + result = helpers.resolve_channel_references("", MagicMock()) + assert result == "" + + def test_none_text(self): + result = helpers.resolve_channel_references(None, MagicMock()) + assert result is None + + def test_archive_url_with_workspace(self): + client = self._make_client(channel_name="general", domain="acme") + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references("see <#CABC123>", client, ws) + assert "https://acme.slack.com/archives/CABC123" in result + assert "#general (Acme)" in result + + def test_archive_url_without_workspace(self): + client = self._make_client(channel_name="general", domain="acme") + result = helpers.resolve_channel_references("see <#CABC123>", client, None) + assert "#general" in result + + def test_fallback_when_domain_unavailable(self): + client = MagicMock() + client.conversations_info.return_value = {"channel": {"name": "general"}} + client.team_info.side_effect = Exception("api error") + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references("see <#CABC123>", client, ws) + assert result == "see `[#general (Acme)]`" + assert "slack.com" not in result + + def test_fallback_when_channel_unresolvable(self): + client = MagicMock() + client.conversations_info.side_effect = Exception("channel_not_found") + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references("see <#CABC123>", client, ws) + assert result == "see #CABC123" + + def test_channel_ref_with_label(self): + client = self._make_client(channel_name="general", domain="acme") + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references("see <#CABC123|general>", client, ws) + assert "https://acme.slack.com/archives/CABC123" in result + + def test_multiple_channel_refs(self): + client = MagicMock() + call_count = 0 + + def conv_info(channel): + nonlocal call_count + call_count += 1 + names = {"CABC111": "alpha", "CABC222": "beta"} + return {"channel": {"name": names.get(channel, channel)}} + + client.conversations_info.side_effect = conv_info + client.team_info.return_value = {"team": {"domain": "acme"}} + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references( + "see <#CABC111> and <#CABC222>", client, ws + ) + assert "archives/CABC111" in result + assert "archives/CABC222" in result + assert "#alpha" in result + assert "#beta" in result + + def test_no_app_redirect_in_output(self): + client = self._make_client(channel_name="general", domain="acme") + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references("see <#CABC123>", client, ws) + assert "app_redirect" not in result From 9b34766265827f30d063618557f930e6041c7a02 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Thu, 26 Mar 2026 22:18:07 -0500 Subject: [PATCH 44/45] Changed messages with links to synced channels to reference the local channel. --- CHANGELOG.md | 4 +- docs/API_REFERENCE.md | 4 +- docs/ARCHITECTURE.md | 4 +- docs/USER_GUIDE.md | 5 +- syncbot/federation/api.py | 59 ++++++++++++- syncbot/handlers/messages.py | 12 ++- syncbot/helpers/__init__.py | 2 + syncbot/helpers/user_matching.py | 107 +++++++++++++++++++---- tests/test_federation_inbound_resolve.py | 76 ++++++++++++++++ tests/test_helpers.py | 28 ++++++ 10 files changed, 274 insertions(+), 27 deletions(-) create mode 100644 tests/test_federation_inbound_resolve.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 094011f..4891aeb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed -- Cross-workspace channel links in synced messages use workspace archive URLs (`https://{domain}.slack.com/archives/{id}`) instead of `slack.com/app_redirect`, with a `[#channel (Workspace)]` fallback when the domain cannot be resolved. Federation outbound messages now resolve channel references the same way as same-instance sync. +- Cross-workspace channel links in synced messages use workspace archive URLs (`https://{domain}.slack.com/archives/{id}`) instead of `slack.com/app_redirect`, with a code-formatted `[#channel (Workspace)]` fallback when the domain cannot be resolved. Federation outbound messages resolve raw `<#C…>` references to archive links on the sender; the receiver rewrites archive links and any remaining channel tokens to **native `<#C>`** when that channel is in the same sync on the local workspace. +- Same-instance sync resolves channel references **per target workspace**: if the mentioned channel is part of the same sync as the destination channel, the message uses the local channel ID instead of an archive URL. +- Federation **inbound** `message` and `message/edit` handlers resolve `@` mentions on the receiving instance using `UserMapping` / `UserDirectory` (native `<@U>` when mapped, otherwise a code-formatted `[@Name (Workspace)]` fallback). - `ENABLE_DB_RESET` is now a boolean (`true` / `1` / `yes`) instead of a Slack Team ID. Reset Database requires both `PRIMARY_WORKSPACE` to match the current workspace and `ENABLE_DB_RESET` to be truthy. ### Added diff --git a/docs/API_REFERENCE.md b/docs/API_REFERENCE.md index 58a0221..4d94594 100644 --- a/docs/API_REFERENCE.md +++ b/docs/API_REFERENCE.md @@ -10,8 +10,8 @@ All endpoints are served by a single Lambda function. Slack sends requests to th | `GET` | `/slack/install` | OAuth install page — redirects the user to Slack's authorization screen | | `GET` | `/slack/oauth_redirect` | OAuth callback — Slack redirects here after the user approves the app | | `POST` | `/api/federation/pair` | Accept an incoming external connection request | -| `POST` | `/api/federation/message` | Receive a forwarded message from a connected instance | -| `POST` | `/api/federation/message/edit` | Receive a message edit from a connected instance | +| `POST` | `/api/federation/message` | Receive a forwarded message from a connected instance; resolves `@` mentions and `#` channel references locally before posting | +| `POST` | `/api/federation/message/edit` | Receive a message edit from a connected instance; applies the same local mention and channel resolution before updating | | `POST` | `/api/federation/message/delete` | Receive a message deletion from a connected instance | | `POST` | `/api/federation/message/react` | Receive a reaction from a connected instance | | `POST` | `/api/federation/users` | Exchange user directory with a connected instance | diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index e263cbb..a395926 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -47,7 +47,7 @@ sequenceDiagram loop For each target channel L->>L: Re-map @mentions (cached user matching) - L->>L: Resolve #channel references (archive URLs) + L->>L: Resolve #channel refs (native if synced, else archive URL) L->>SB: chat.postMessage (as sender) SB-->>L: ts (timestamp) L->>DB: Save PostMeta record @@ -60,6 +60,8 @@ sequenceDiagram The same pattern applies to edits (`chat.update`), deletes (`chat.delete`), thread replies (with `thread_ts`), and reactions (threaded reply with emoji attribution). +For **federation**, the receiving instance resolves `@` mentions and `#` channel references locally before `chat.postMessage` / `chat.update`: mapped users become native `<@U>` tags, channels that are part of the same sync become native `<#C>` tags, and other channels keep the archive links sent by the origin instance. + ## AWS Infrastructure How to deploy or update this stack (guided script, `sam`, GitHub Actions) is documented in **[DEPLOYMENT.md](DEPLOYMENT.md)**. The diagram below reflects the **reference** SAM template (`infra/aws/template.yaml`). diff --git a/docs/USER_GUIDE.md b/docs/USER_GUIDE.md index 71077da..1bc266d 100644 --- a/docs/USER_GUIDE.md +++ b/docs/USER_GUIDE.md @@ -17,6 +17,7 @@ This guide is for **workspace admins and end users** configuring SyncBot in Slac - Only workspace **admins and owners** can configure syncs (set `REQUIRE_ADMIN=false` to allow all users) - Messages, threads, edits, deletes, reactions, images, videos, and GIFs are all synced +- **@mentions and #channel links** in synced messages are rewritten per target workspace: mapped users are tagged with the local Slack user, and channels that are part of the same sync are shown as native local channel links; otherwise users fall back to a code-style label and channels use a link back to the source workspace (or a code-style label if that cannot be built) - Messages from other bots are synced; only SyncBot's own messages are filtered to prevent loops - Existing messages are not back-filled; syncing starts from the moment a channel is linked - Do not add SyncBot manually to channels. SyncBot adds itself when you configure a Sync. If it detects it was added to an unconfigured channel it will post a message and leave automatically @@ -41,7 +42,7 @@ If a workspace uninstalls SyncBot, group memberships and syncs are paused (not d ## User Mapping -Users are automatically mapped across workspaces by email or display name. Admins can manually edit mappings via the User Mapping screen (scoped per group). Remote users are displayed as "Display Name (Workspace Name)" and sorted by normalized name. +Users are automatically mapped across workspaces by email or display name. Admins can manually edit mappings via the User Mapping screen (scoped per group). Remote users are displayed as "Display Name (Workspace Name)" and sorted by normalized name. In synced messages, a mapped user is mentioned with a normal `@` tag in the receiving workspace; unmapped users appear as a code-style `[@Name (Workspace)]` label. Channel names that point at another synced channel in the same sync group are shown as native `#channel` links in each workspace. ## Refresh Behavior @@ -64,7 +65,7 @@ Images and videos are downloaded from the source and uploaded directly to each t *(Opt-in — set `SYNCBOT_FEDERATION_ENABLED=true` and `SYNCBOT_PUBLIC_URL` to enable)* -Workspaces running their own SyncBot deployment can be connected via the "External Connections" section on the Home tab. One admin generates a connection code and shares it out-of-band; the other admin enters it. Messages, edits, deletes, reactions, and user matching work across instances. +Workspaces running their own SyncBot deployment can be connected via the "External Connections" section on the Home tab. One admin generates a connection code and shares it out-of-band; the other admin enters it. Messages, edits, deletes, reactions, and user matching work across instances. The receiving SyncBot instance rewrites `@` mentions and `#` channel links using the same rules as same-instance sync (native tags when mapped / synced, fallbacks otherwise). **Data Migration** in the same section lets you export your workspace data (syncs, channels, post meta, user directory, user mappings) for moving to another instance, or import a migration file after connecting. See [Backup and Migration](BACKUP_AND_MIGRATION.md) for details. diff --git a/syncbot/federation/api.py b/syncbot/federation/api.py index 738a714..91fd017 100644 --- a/syncbot/federation/api.py +++ b/syncbot/federation/api.py @@ -83,6 +83,55 @@ def _validate_fields(body: dict, required: list[str], extras: list[str] | None = return None +def _pick_user_mapping_for_federated_target(source_user_id: str, target_workspace_id: int) -> schemas.UserMapping | None: + maps = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.target_workspace_id == target_workspace_id, + schemas.UserMapping.source_user_id == source_user_id, + ], + ) + if not maps: + return None + for m in maps: + if m.target_user_id: + return m + return maps[0] + + +def _resolve_mentions_for_federated(msg_text: str, target_workspace_id: int, remote_workspace_label: str) -> str: + """Replace ``<@U_REMOTE>`` with native local mentions using *UserMapping* / *UserDirectory* on this instance.""" + if not msg_text: + return msg_text + + user_ids = re.findall(r"<@(\w+)>", msg_text) + if not user_ids: + return msg_text + + for uid in dict.fromkeys(user_ids): + mapping = _pick_user_mapping_for_federated_target(uid, target_workspace_id) + if mapping and mapping.target_user_id: + rep = f"<@{mapping.target_user_id}>" + elif mapping and mapping.source_display_name: + rep = f"`[@{mapping.source_display_name} ({remote_workspace_label})]`" + else: + display: str | None = None + for entry in DbManager.find_records( + schemas.UserDirectory, + [schemas.UserDirectory.slack_user_id == uid, schemas.UserDirectory.deleted_at.is_(None)], + ): + display = entry.display_name or entry.real_name + if display: + break + if display: + rep = f"`[@{display} ({remote_workspace_label})]`" + else: + rep = f"`[@{uid} ({remote_workspace_label})]`" + msg_text = re.sub(rf"<@{re.escape(uid)}>", rep, msg_text) + + return msg_text + + # --------------------------------------------------------------------------- # Authentication helpers # --------------------------------------------------------------------------- @@ -352,6 +401,10 @@ def handle_message(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, user_avatar = user.get("avatar_url") workspace_name = user.get("workspace_name", "Remote") + text = _resolve_mentions_for_federated(text, workspace.id, workspace_name) + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + text = helpers.resolve_channel_references(text, ws_client, None, target_workspace_id=workspace.id) + try: thread_ts = None if thread_post_id: @@ -429,10 +482,14 @@ def handle_message_edit(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple return _NOT_FOUND sync_channel, workspace = resolved + remote_label = fed_ws.primary_workspace_name or fed_ws.name or "Remote" + text = _resolve_mentions_for_federated(text, workspace.id, remote_label) + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + text = helpers.resolve_channel_references(text, ws_client, None, target_workspace_id=workspace.id) + post_records = _find_post_records(post_id, sync_channel.id) updated = 0 - ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) for post_meta in post_records: try: ws_client.chat_update(channel=channel_id, ts=str(post_meta.ts), text=text) diff --git a/syncbot/handlers/messages.py b/syncbot/handlers/messages.py index 887e91d..00aca22 100644 --- a/syncbot/handlers/messages.py +++ b/syncbot/handlers/messages.py @@ -246,7 +246,9 @@ def _handle_new_post( target_workspace_id=workspace.id, ) source_ws = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None - adapted_text = helpers.resolve_channel_references(adapted_text, client, source_ws) + adapted_text = helpers.resolve_channel_references( + adapted_text, client, source_ws, target_workspace_id=workspace.id + ) target_display_name, target_icon_url = helpers.get_display_name_and_icon_for_synced_message( user_id or "", @@ -403,7 +405,9 @@ def _handle_thread_reply( target_workspace_id=workspace.id, ) source_ws = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None - adapted_text = helpers.resolve_channel_references(adapted_text, client, source_ws) + adapted_text = helpers.resolve_channel_references( + adapted_text, client, source_ws, target_workspace_id=workspace.id + ) parent_ts = f"{post_meta.ts:.6f}" target_display_name, target_icon_url = helpers.get_display_name_and_icon_for_synced_message( @@ -542,7 +546,9 @@ def _handle_message_edit( target_workspace_id=workspace.id, ) source_ws = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None - adapted_text = helpers.resolve_channel_references(adapted_text, client, source_ws) + adapted_text = helpers.resolve_channel_references( + adapted_text, client, source_ws, target_workspace_id=workspace.id + ) helpers.post_message( bot_token=bot_token, channel_id=sync_channel.channel_id, diff --git a/syncbot/helpers/__init__.py b/syncbot/helpers/__init__.py index 2619f7d..8d7e131 100644 --- a/syncbot/helpers/__init__.py +++ b/syncbot/helpers/__init__.py @@ -64,6 +64,7 @@ _refresh_user_directory, _upsert_single_user_to_directory, apply_mentioned_users, + find_synced_channel_in_target, get_display_name_and_icon_for_synced_message, get_mapped_target_user_id, normalize_display_name, @@ -115,6 +116,7 @@ "get_group_members", "get_groups_for_workspace", "get_display_name_and_icon_for_synced_message", + "find_synced_channel_in_target", "get_mapped_target_user_id", "get_oauth_flow", "normalize_display_name", diff --git a/syncbot/helpers/user_matching.py b/syncbot/helpers/user_matching.py index 06dd133..e6661a6 100644 --- a/syncbot/helpers/user_matching.py +++ b/syncbot/helpers/user_matching.py @@ -489,6 +489,53 @@ def apply_mentioned_users( return re.sub(r"<@\w+>", lambda _: next(replace_iter), msg_text) +def find_synced_channel_in_target(source_channel_id: str, target_workspace_id: int) -> str | None: + """If *source_channel_id* belongs to an active sync that *target_workspace_id* also has a channel in, return the local channel ID.""" + source_rows = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.channel_id == source_channel_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.status == "active", + ], + ) + if not source_rows: + return None + sync_id = source_rows[0].sync_id + target_rows = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync_id, + schemas.SyncChannel.workspace_id == target_workspace_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.status == "active", + ], + ) + if not target_rows: + return None + return target_rows[0].channel_id + + +_ARCHIVE_LINK_PATTERN = re.compile( + r"]+)>" +) + + +def _rewrite_slack_archive_links_to_native_channels(msg_text: str, target_workspace_id: int) -> str: + """Replace Slack archive mrkdwn links with native ``<#C_LOCAL>`` when that channel is synced to *target_workspace_id*.""" + if not msg_text or not target_workspace_id: + return msg_text + + def repl(m: re.Match) -> str: + ch_id = m.group(2) + local = find_synced_channel_in_target(ch_id, target_workspace_id) + if local: + return f"<#{local}>" + return m.group(0) + + return _ARCHIVE_LINK_PATTERN.sub(repl, msg_text) + + def _get_workspace_domain(client: WebClient, team_id: str) -> str | None: """Return the workspace subdomain (e.g. ``acme`` for ``acme.slack.com``) from ``team.info``, cached.""" cache_key = f"ws_domain:{team_id}" @@ -509,37 +556,63 @@ def _get_workspace_domain(client: WebClient, team_id: str) -> str | None: def resolve_channel_references( msg_text: str, - source_client: WebClient, + source_client: WebClient | None, source_workspace: "schemas.Workspace | None" = None, + target_workspace_id: int | None = None, ) -> str: - """Replace ``<#CHANNEL_ID>`` references with deep-linked channel names.""" + """Replace ``<#CHANNEL_ID>`` references with native local channels when synced, else archive URLs or fallbacks. + + When *target_workspace_id* is set, Slack archive links from federated senders may be rewritten to + ``<#C_LOCAL>`` if that source channel is synced to the target workspace. + """ if not msg_text: return msg_text - channel_pattern = re.compile(r"<#(C[A-Z0-9]+)(?:\|[^>]*)?>") - matches = channel_pattern.findall(msg_text) - if not matches: + if target_workspace_id: + msg_text = _rewrite_slack_archive_links_to_native_channels(msg_text, target_workspace_id) + + channel_pattern = re.compile(r"<#(C[A-Z0-9]+)(?:\|([^>]*))?>") + pair_tuples = channel_pattern.findall(msg_text) + if not pair_tuples: return msg_text + by_channel_id: dict[str, str | None] = {} + for cid, pipe in pair_tuples: + if cid not in by_channel_id: + by_channel_id[cid] = pipe.strip() if pipe and pipe.strip() else None + team_id = getattr(source_workspace, "team_id", None) if source_workspace else None ws_name = resolve_workspace_name(source_workspace) if source_workspace else None - for ch_id in set(matches): + for ch_id, inline_label in by_channel_id.items(): + if target_workspace_id: + local_ch = find_synced_channel_in_target(ch_id, target_workspace_id) + if local_ch: + replacement = f"<#{local_ch}>" + msg_text = channel_pattern.sub( + lambda m, _cid=ch_id, _rep=replacement: _rep if m.group(1) == _cid else m.group(0), + msg_text, + ) + continue + ch_name = ch_id - try: - info = source_client.conversations_info(channel=ch_id) - ch_name = safe_get(info, "channel", "name") or ch_id - except Exception as exc: - # If we cannot resolve channel metadata, keep the raw channel ID. - # This preserves message content without blocking sync processing. - _logger.debug( - "resolve_channel_reference_failed", - extra={"channel_id": ch_id, "error": str(exc)}, - ) + if source_client: + try: + info = source_client.conversations_info(channel=ch_id) + ch_name = safe_get(info, "channel", "name") or ch_id + except Exception as exc: + _logger.debug( + "resolve_channel_reference_failed", + extra={"channel_id": ch_id, "error": str(exc)}, + ) + if inline_label: + ch_name = inline_label + elif inline_label: + ch_name = inline_label if ch_name != ch_id: label = f"#{ch_name} ({ws_name})" if ws_name else f"#{ch_name}" - domain = _get_workspace_domain(source_client, team_id) if team_id else None + domain = _get_workspace_domain(source_client, team_id) if source_client and team_id else None if domain: deep_link = f"https://{domain}.slack.com/archives/{ch_id}" replacement = f"<{deep_link}|{label}>" diff --git a/tests/test_federation_inbound_resolve.py b/tests/test_federation_inbound_resolve.py new file mode 100644 index 0000000..414c185 --- /dev/null +++ b/tests/test_federation_inbound_resolve.py @@ -0,0 +1,76 @@ +"""Tests for federation inbound text resolution (mentions and channels).""" + +import os +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from db import schemas +from federation import api as federation_api + + +class TestResolveMentionsForFederated: + def test_maps_via_user_mapping_target(self): + m = MagicMock() + m.target_user_id = "ULOCAL" + m.source_display_name = "Alice" + + def fake_find(model, _filters): + if model == schemas.UserMapping: + return [m] + return [] + + with patch.object(federation_api.DbManager, "find_records", side_effect=fake_find): + out = federation_api._resolve_mentions_for_federated("hi <@UREMOTE>", 10, "Partner WS") + assert out == "hi <@ULOCAL>" + + def test_fallback_stub_mapping_display_name(self): + m = MagicMock() + m.target_user_id = None + m.source_display_name = "Bob" + + def fake_find(model, _filters): + if model == schemas.UserMapping: + return [m] + return [] + + with patch.object(federation_api.DbManager, "find_records", side_effect=fake_find): + out = federation_api._resolve_mentions_for_federated("hi <@UREMOTE>", 10, "Partner WS") + assert out == "hi `[@Bob (Partner WS)]`" + + def test_fallback_user_directory_display_name(self): + entry = MagicMock() + entry.display_name = "Carol" + entry.real_name = None + + def fake_find(model, _filters): + if model == schemas.UserMapping: + return [] + if model == schemas.UserDirectory: + return [entry] + return [] + + with patch.object(federation_api.DbManager, "find_records", side_effect=fake_find): + out = federation_api._resolve_mentions_for_federated("hey <@UX>", 10, "Remote") + assert out == "hey `[@Carol (Remote)]`" + + def test_prefers_mapping_with_target_user_id(self): + good = MagicMock() + good.target_user_id = "UBEST" + good.source_display_name = "Best" + stale = MagicMock() + stale.target_user_id = None + stale.source_display_name = "Stale" + + def fake_find(model, _filters): + if model == schemas.UserMapping: + return [stale, good] + return [] + + with patch.object(federation_api.DbManager, "find_records", side_effect=fake_find): + out = federation_api._resolve_mentions_for_federated("<@U1>", 10, "R") + assert out == "<@UBEST>" diff --git a/tests/test_helpers.py b/tests/test_helpers.py index 8bea143..b81b937 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -338,3 +338,31 @@ def test_no_app_redirect_in_output(self): ws = self._make_workspace(team_id="T123", name="Acme") result = helpers.resolve_channel_references("see <#CABC123>", client, ws) assert "app_redirect" not in result + + @patch("helpers.user_matching.find_synced_channel_in_target") + def test_native_channel_when_synced_to_target(self, mock_find): + mock_find.return_value = "C_LOCAL_TARGET" + client = self._make_client(channel_name="general", domain="acme") + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references( + "see <#CSOURCE123>", client, ws, target_workspace_id=42 + ) + assert result == "see <#C_LOCAL_TARGET>" + mock_find.assert_called_with("CSOURCE123", 42) + assert "slack.com" not in result + + @patch("helpers.user_matching.find_synced_channel_in_target") + def test_archive_mrkdwn_rewritten_to_native_when_synced(self, mock_find): + mock_find.return_value = "C_LOCAL" + client = MagicMock() + text = "see " + result = helpers.resolve_channel_references(text, client, None, target_workspace_id=1) + assert result == "see <#C_LOCAL>" + + @patch("helpers.user_matching.find_synced_channel_in_target") + def test_archive_mrkdwn_unchanged_when_not_synced(self, mock_find): + mock_find.return_value = None + client = MagicMock() + text = "see " + result = helpers.resolve_channel_references(text, client, None, target_workspace_id=1) + assert result == text From 0c0b2e3999ae5e740979eb447daca4828fa44da6 Mon Sep 17 00:00:00 2001 From: Klint Van Tassel Date: Thu, 26 Mar 2026 22:36:28 -0500 Subject: [PATCH 45/45] Update to 1.0.1 --- CHANGELOG.md | 9 ++++----- pyproject.toml | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4891aeb..0068f3f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,14 +5,13 @@ All notable changes to this project are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [Unreleased] +## [1.0.1] - 2026-03-26 ### Changed -- Cross-workspace channel links in synced messages use workspace archive URLs (`https://{domain}.slack.com/archives/{id}`) instead of `slack.com/app_redirect`, with a code-formatted `[#channel (Workspace)]` fallback when the domain cannot be resolved. Federation outbound messages resolve raw `<#C…>` references to archive links on the sender; the receiver rewrites archive links and any remaining channel tokens to **native `<#C>`** when that channel is in the same sync on the local workspace. -- Same-instance sync resolves channel references **per target workspace**: if the mentioned channel is part of the same sync as the destination channel, the message uses the local channel ID instead of an archive URL. -- Federation **inbound** `message` and `message/edit` handlers resolve `@` mentions on the receiving instance using `UserMapping` / `UserDirectory` (native `<@U>` when mapped, otherwise a code-formatted `[@Name (Workspace)]` fallback). -- `ENABLE_DB_RESET` is now a boolean (`true` / `1` / `yes`) instead of a Slack Team ID. Reset Database requires both `PRIMARY_WORKSPACE` to match the current workspace and `ENABLE_DB_RESET` to be truthy. +- Cross-workspace `#channel` links resolve to native local channels when the channel is part of the same sync; otherwise use workspace archive URLs with a code-formatted fallback +- `@mentions` and `#channel` links in federated messages are now resolved on the receiving instance (native tags when mapped/synced, fallbacks otherwise) +- `ENABLE_DB_RESET` is now a boolean (`true` / `1` / `yes`) instead of a Slack Team ID; requires `PRIMARY_WORKSPACE` to match ### Added diff --git a/pyproject.toml b/pyproject.toml index 83ff438..aef914d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "syncbot" -version = "1.0.0" +version = "1.0.1" description = "Sync chat threads between Slack Workspaces." authors = ["Evan Petzoldt ", "Klint Van Tassel "] readme = "README.md"