diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..583c158 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,42 @@ +{ + "name": "SyncBot", + "dockerComposeFile": "docker-compose.dev.yml", + "service": "app", + "workspaceFolder": "/app", + + "features": { + "ghcr.io/devcontainers/features/aws-cli:1": {} + }, + + "customizations": { + "vscode": { + "extensions": [ + "ms-python.python", + "ms-python.vscode-pylance", + "charliermarsh.ruff" + ], + "settings": { + "python.defaultInterpreterPath": "/usr/local/bin/python", + "python.testing.pytestEnabled": true, + "python.testing.pytestArgs": ["tests"], + "[python]": { + "editor.defaultFormatter": "charliermarsh.ruff", + "editor.formatOnSave": true + } + } + } + }, + + "forwardPorts": [3000, 3306], + + "postCreateCommand": "pip install --no-cache-dir boto3 pytest && echo '✅ Dev container ready'", + + "remoteEnv": { + "PYTHONPATH": "/app/syncbot", + "LOCAL_DEVELOPMENT": "true", + "DATABASE_HOST": "db", + "DATABASE_USER": "root", + "DATABASE_PASSWORD": "rootpass", + "DATABASE_SCHEMA": "syncbot" + } +} diff --git a/.devcontainer/docker-compose.dev.yml b/.devcontainer/docker-compose.dev.yml new file mode 100644 index 0000000..ba23142 --- /dev/null +++ b/.devcontainer/docker-compose.dev.yml @@ -0,0 +1,45 @@ +services: + db: + image: mysql:8 + environment: + MYSQL_ROOT_PASSWORD: rootpass + MYSQL_DATABASE: syncbot + MYSQL_ROOT_HOST: "%" + ports: + - "3306:3306" + volumes: + - syncbot-db:/var/lib/mysql + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost"] + interval: 5s + timeout: 5s + retries: 10 + + app: + build: + context: .. + dockerfile: Dockerfile + command: sleep infinity + depends_on: + db: + condition: service_healthy + env_file: + - ../.env + environment: + # Overrides that are always fixed for local dev + LOCAL_DEVELOPMENT: "true" + DATABASE_BACKEND: ${DATABASE_BACKEND:-mysql} + DATABASE_URL: ${DATABASE_URL:-} + DATABASE_HOST: ${DATABASE_HOST:-db} + DATABASE_USER: ${DATABASE_USER:-root} + DATABASE_PASSWORD: ${DATABASE_PASSWORD:-rootpass} + DATABASE_SCHEMA: ${DATABASE_SCHEMA:-syncbot} + DATABASE_TLS_ENABLED: ${DATABASE_TLS_ENABLED:-false} + DATABASE_SSL_CA_PATH: ${DATABASE_SSL_CA_PATH:-/etc/pki/tls/certs/ca-bundle.crt} + volumes: + - ..:/app:cached + ports: + - "3000:3000" + +volumes: + syncbot-db: diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..026f186 --- /dev/null +++ b/.env.example @@ -0,0 +1,99 @@ +# ============================================================================= +# SyncBot Environment Variables +# ============================================================================= +# Copy this file to .env and fill in your values: +# cp .env.example .env +# +# Docker Compose and Dev Containers read .env automatically. +# For native Python development, source it: source .env or export $(cat .env | xargs) + +# ----------------------------------------------------------------------------- +# Database (mysql, postgresql, or sqlite) +# ----------------------------------------------------------------------------- +# Option A — MySQL (default): legacy vars or DATABASE_URL +DATABASE_BACKEND=mysql +DATABASE_HOST=127.0.0.1 +# DATABASE_PORT=3306 +DATABASE_USER=root +DATABASE_PASSWORD=rootpass +DATABASE_SCHEMA=syncbot +# Optional TLS (provider-dependent) +# DATABASE_TLS_ENABLED=true +# DATABASE_SSL_CA_PATH=/etc/pki/tls/certs/ca-bundle.crt + +# Option B — PostgreSQL: set backend and PostgreSQL vars or DATABASE_URL +# DATABASE_BACKEND=postgresql +# DATABASE_HOST=127.0.0.1 +# DATABASE_PORT=5432 +# DATABASE_USER=postgres +# DATABASE_PASSWORD=postgres +# DATABASE_SCHEMA=syncbot + +# Option C — SQLite (forks / local): set backend and URL only +# DATABASE_BACKEND=sqlite +# DATABASE_URL=sqlite:///syncbot.db + +# Slack Team ID of the primary workspace. Required for backup/restore to appear. +# DB reset (when enabled below) is also scoped to this workspace. +# PRIMARY_WORKSPACE=T0123456789 + +# When true (and PRIMARY_WORKSPACE matches), show "Reset Database" on the Home tab. +# ENABLE_DB_RESET=true + +# ----------------------------------------------------------------------------- +# Local Development Mode +# ----------------------------------------------------------------------------- +# This lets you run the app without all the Slack credentials. +# LOCAL_DEVELOPMENT=true + +# ----------------------------------------------------------------------------- +# Slack +# In cloud deploys these are usually injected by your provider's secret system +# (AWS/GCP/Azure). Uncomment if running locally with OAuth flow. +# ----------------------------------------------------------------------------- +# SLACK_BOT_TOKEN=xoxb-your-bot-token +# SLACK_SIGNING_SECRET=your-signing-secret +# SLACK_CLIENT_ID=your-client-id +# SLACK_CLIENT_SECRET=your-client-secret +# SLACK_BOT_SCOPES — bot OAuth scopes; must match slack-manifest.json oauth_config.scopes.bot (see syncbot/slack_manifest_scopes.py). +# SLACK_BOT_SCOPES=app_mentions:read,channels:history,channels:join,channels:read,channels:manage,chat:write,chat:write.customize,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email +# SLACK_USER_SCOPES — user OAuth scopes; must match oauth_config.scopes.user and USER_SCOPES in slack_manifest_scopes.py. +# SLACK_USER_SCOPES=chat:write,channels:history,channels:read,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email +# OAuth state and installation data are stored in the same database (PostgreSQL, MySQL, or SQLite). + +# ----------------------------------------------------------------------------- +# Encryption (optional) +# ----------------------------------------------------------------------------- +# Passphrase for Fernet bot-token encryption at rest. +# Use any value except "123" to enable encryption. +# TOKEN_ENCRYPTION_KEY=my-secret-passphrase + +# ----------------------------------------------------------------------------- +# Admin Authorization (optional) +# ----------------------------------------------------------------------------- +# Set to "false" to allow all users to configure syncs (default: true). +# REQUIRE_ADMIN=true + +# ----------------------------------------------------------------------------- +# Logging (optional) +# ----------------------------------------------------------------------------- +# Log output level: DEBUG, INFO, WARNING, ERROR, or CRITICAL (default: INFO). +# LOG_LEVEL=INFO + +# ----------------------------------------------------------------------------- +# Soft-Delete Retention (optional) +# ----------------------------------------------------------------------------- +# Number of days to keep soft-deleted workspace data before permanent purge. +# When a workspace uninstalls the app, its group memberships and syncs are paused. +# If it reinstalls within this window, everything is restored automatically. +# SOFT_DELETE_RETENTION_DAYS=30 + +# ----------------------------------------------------------------------------- +# External Connections (optional, disabled by default) +# ----------------------------------------------------------------------------- +# Set SYNCBOT_FEDERATION_ENABLED=true to activate external connections. +# SYNCBOT_INSTANCE_ID is a unique UUID for this instance (auto-generated if not set). +# SYNCBOT_PUBLIC_URL is the publicly reachable base URL (required for external connections). +# SYNCBOT_FEDERATION_ENABLED=false +# SYNCBOT_INSTANCE_ID=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +# SYNCBOT_PUBLIC_URL=https://your-syncbot.example.com diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..431ca64 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,25 @@ +--- +name: Bug report +about: Report something that is not working as expected +labels: bug +--- + +## What happened + + + +## Steps to reproduce + +1. +2. +3. + +## Expected behavior + + + +## Environment + +- Cloud / deploy: +- Database: +- Browser (if UI-related): diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..07e6dc0 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,17 @@ +--- +name: Feature request +about: Suggest an idea or improvement +labels: enhancement +--- + +## Problem or use case + + + +## Proposed solution + + + +## Alternatives (optional) + + diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..e644f37 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,28 @@ +version: 2 +updates: + - package-ecosystem: "pip" + directory: "/syncbot" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + groups: + minor-and-patch: + update-types: ["minor", "patch"] + + - package-ecosystem: "pip" + directory: "/infra/aws/db_setup" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 + + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "weekly" + open-pull-requests-limit: 10 diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..994ec77 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,13 @@ +## Summary + + + +## How to test + + + +## Checklist + +- [ ] CI passes (requirements sync, SAM lint, tests) +- [ ] Docs updated if behavior or deploy steps changed +- [ ] No new cloud-provider-specific code under `syncbot/` (keep infra in `infra/` and workflows) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..9824f5a --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,77 @@ +# PR / branch checks without cloud credentials. Deploy workflows stay in deploy-*.yml. +name: CI + +on: + pull_request: + push: + branches: [main, test, prod] + +concurrency: + group: ci-${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + requirements-sync: + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref || github.ref_name }} + repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }} + fetch-depth: 0 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install Poetry and export plugin + run: | + python -m pip install --upgrade pip + pip install poetry + poetry self add poetry-plugin-export + - name: Sync requirements.txt with poetry.lock + env: + PR_HEAD_REPO: ${{ github.event.pull_request.head.repo.full_name }} + run: | + poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt + if git diff --quiet syncbot/requirements.txt; then + echo "requirements.txt is already in sync." + elif [[ -n "${PR_HEAD_REPO}" && "${PR_HEAD_REPO}" != "${GITHUB_REPOSITORY}" ]]; then + echo "::error::syncbot/requirements.txt is out of sync with poetry.lock. From the repo root run: poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt" + exit 1 + else + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git add syncbot/requirements.txt + git commit -m "chore: sync requirements.txt with poetry.lock" + git push + echo "::notice::requirements.txt was out of sync and has been auto-fixed." + fi + + sam-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: aws-actions/setup-sam@v2 + with: + use-installer: true + - name: sam validate --lint + run: | + sam validate -t infra/aws/template.yaml --lint + sam validate -t infra/aws/template.bootstrap.yaml --lint + + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install poetry + poetry install --with dev + # Infra + deploy-script smoke tests (fast). Use `poetry run pytest` locally for the full suite. + - name: pytest (infra & deploy scripts) + run: poetry run pytest -q tests/test_deploy_script_syntax.py infra/aws/tests infra/gcp/tests diff --git a/.github/workflows/deploy-aws.yml b/.github/workflows/deploy-aws.yml new file mode 100644 index 0000000..fab46d5 --- /dev/null +++ b/.github/workflows/deploy-aws.yml @@ -0,0 +1,180 @@ +# Deploy SyncBot to AWS (SAM). See docs/DEPLOYMENT.md and docs/INFRA_CONTRACT.md. +# To use GCP instead: set repository variable DEPLOY_TARGET=gcp and disable this workflow. +# +# Token key policy: Non-local deploys require a secure TOKEN_ENCRYPTION_KEY. The AWS app stack +# auto-generates it in Secrets Manager by default. Back up the generated key after first deploy. +# Optional disaster recovery secret TOKEN_ENCRYPTION_KEY_OVERRIDE passes TokenEncryptionKeyOverride for restore. + +name: Deploy (AWS) + +on: + push: + branches: + - test + - prod + +permissions: + id-token: write + contents: read + +# Skip when using GCP (set DEPLOY_TARGET=gcp to use deploy-gcp.yml instead) +jobs: + sam-build: + if: vars.DEPLOY_TARGET != 'gcp' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.12' + - uses: aws-actions/setup-sam@v2 + with: + use-installer: true + + - name: Validate SAM templates (cfn-lint) + run: | + sam validate -t infra/aws/template.yaml --lint + sam validate -t infra/aws/template.bootstrap.yaml --lint + + - uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ vars.AWS_ROLE_TO_ASSUME }} + aws-region: ${{ vars.AWS_REGION }} + + - run: sam build -t infra/aws/template.yaml + + - name: Security audit Python dependencies + run: | + python -m pip install --upgrade pip pip-audit + pip-audit -r syncbot/requirements.txt + pip-audit -r infra/aws/db_setup/requirements.txt + + - name: Publish artifact + uses: actions/upload-artifact@v4 + with: + name: build-artifact + path: './.aws-sam/build' + + sam-deploy-test: + if: github.ref == 'refs/heads/test' + runs-on: ubuntu-latest + environment: test + needs: sam-build + steps: + - uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ vars.AWS_ROLE_TO_ASSUME }} + aws-region: ${{ vars.AWS_REGION }} + + - name: Download artifact + uses: actions/download-artifact@v4 + with: + name: build-artifact + path: './.aws-sam/build' + + - uses: aws-actions/setup-sam@v2 + with: + use-installer: true + + - name: Deploy to test + run: | + TOKEN_KEY_OVERRIDE="${{ secrets.TOKEN_ENCRYPTION_KEY_OVERRIDE }}" + OVERRIDE_PARAM="" + if [ -n "$TOKEN_KEY_OVERRIDE" ]; then + OVERRIDE_PARAM=" TokenEncryptionKeyOverride=$TOKEN_KEY_OVERRIDE" + echo "Using TOKEN_ENCRYPTION_KEY_OVERRIDE for disaster-recovery deploy." + fi + sam deploy \ + -t .aws-sam/build/template.yaml \ + --no-confirm-changeset \ + --no-fail-on-empty-changeset \ + --stack-name ${{ vars.AWS_STACK_NAME }} \ + --s3-bucket ${{ vars.AWS_S3_BUCKET }} \ + --capabilities CAPABILITY_IAM \ + --region ${{ vars.AWS_REGION }} \ + --no-disable-rollback \ + --force-upload \ + --parameter-overrides \ + "Stage=${{ vars.STAGE_NAME }} \ + DatabaseEngine=${{ vars.DATABASE_ENGINE || 'mysql' }} \ + ExistingDatabaseHost=${{ vars.EXISTING_DATABASE_HOST }} \ + ExistingDatabaseAdminUser=${{ vars.EXISTING_DATABASE_ADMIN_USER }} \ + ExistingDatabaseAdminPassword=${{ secrets.EXISTING_DATABASE_ADMIN_PASSWORD }} \ + ExistingDatabaseNetworkMode=${{ vars.EXISTING_DATABASE_NETWORK_MODE || 'public' }} \ + ExistingDatabaseSubnetIdsCsv=${{ vars.EXISTING_DATABASE_SUBNET_IDS_CSV }} \ + ExistingDatabaseLambdaSecurityGroupId=${{ vars.EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID }} \ + DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ + LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ + RequireAdmin=${{ vars.REQUIRE_ADMIN || 'true' }} \ + SoftDeleteRetentionDays=${{ vars.SOFT_DELETE_RETENTION_DAYS || '30' }} \ + SyncbotFederationEnabled=${{ vars.SYNCBOT_FEDERATION_ENABLED || 'false' }} \ + SyncbotInstanceId=${{ vars.SYNCBOT_INSTANCE_ID }} \ + SyncbotPublicUrl=${{ vars.SYNCBOT_PUBLIC_URL }} \ + PrimaryWorkspace=${{ vars.PRIMARY_WORKSPACE }} \ + EnableDbReset=${{ vars.ENABLE_DB_RESET }} \ + SlackClientID=${{ vars.SLACK_CLIENT_ID }} \ + SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ + SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ + $OVERRIDE_PARAM" + + sam-deploy-prod: + if: github.ref == 'refs/heads/prod' + runs-on: ubuntu-latest + environment: prod + needs: sam-build + steps: + - uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ vars.AWS_ROLE_TO_ASSUME }} + aws-region: ${{ vars.AWS_REGION }} + + - name: Download artifact + uses: actions/download-artifact@v4 + with: + name: build-artifact + path: './.aws-sam/build' + + - uses: aws-actions/setup-sam@v2 + with: + use-installer: true + + - name: Deploy to prod + run: | + TOKEN_KEY_OVERRIDE="${{ secrets.TOKEN_ENCRYPTION_KEY_OVERRIDE }}" + OVERRIDE_PARAM="" + if [ -n "$TOKEN_KEY_OVERRIDE" ]; then + OVERRIDE_PARAM=" TokenEncryptionKeyOverride=$TOKEN_KEY_OVERRIDE" + echo "Using TOKEN_ENCRYPTION_KEY_OVERRIDE for disaster-recovery deploy." + fi + sam deploy \ + -t .aws-sam/build/template.yaml \ + --no-confirm-changeset \ + --no-fail-on-empty-changeset \ + --stack-name ${{ vars.AWS_STACK_NAME }} \ + --s3-bucket ${{ vars.AWS_S3_BUCKET }} \ + --capabilities CAPABILITY_IAM \ + --region ${{ vars.AWS_REGION }} \ + --no-disable-rollback \ + --force-upload \ + --parameter-overrides \ + "Stage=${{ vars.STAGE_NAME }} \ + DatabaseEngine=${{ vars.DATABASE_ENGINE || 'mysql' }} \ + ExistingDatabaseHost=${{ vars.EXISTING_DATABASE_HOST }} \ + ExistingDatabaseAdminUser=${{ vars.EXISTING_DATABASE_ADMIN_USER }} \ + ExistingDatabaseAdminPassword=${{ secrets.EXISTING_DATABASE_ADMIN_PASSWORD }} \ + ExistingDatabaseNetworkMode=${{ vars.EXISTING_DATABASE_NETWORK_MODE || 'public' }} \ + ExistingDatabaseSubnetIdsCsv=${{ vars.EXISTING_DATABASE_SUBNET_IDS_CSV }} \ + ExistingDatabaseLambdaSecurityGroupId=${{ vars.EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID }} \ + DatabaseSchema=${{ vars.DATABASE_SCHEMA }} \ + LogLevel=${{ vars.LOG_LEVEL || 'INFO' }} \ + RequireAdmin=${{ vars.REQUIRE_ADMIN || 'true' }} \ + SoftDeleteRetentionDays=${{ vars.SOFT_DELETE_RETENTION_DAYS || '30' }} \ + SyncbotFederationEnabled=${{ vars.SYNCBOT_FEDERATION_ENABLED || 'false' }} \ + SyncbotInstanceId=${{ vars.SYNCBOT_INSTANCE_ID }} \ + SyncbotPublicUrl=${{ vars.SYNCBOT_PUBLIC_URL }} \ + PrimaryWorkspace=${{ vars.PRIMARY_WORKSPACE }} \ + EnableDbReset=${{ vars.ENABLE_DB_RESET }} \ + SlackClientID=${{ vars.SLACK_CLIENT_ID }} \ + SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} \ + SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} \ + $OVERRIDE_PARAM" diff --git a/.github/workflows/deploy-gcp.yml b/.github/workflows/deploy-gcp.yml new file mode 100644 index 0000000..0415265 --- /dev/null +++ b/.github/workflows/deploy-gcp.yml @@ -0,0 +1,76 @@ +# Deploy SyncBot to GCP (Cloud Run). See docs/DEPLOYMENT.md and docs/INFRA_CONTRACT.md. +# To use AWS instead, disable this workflow and use deploy-aws.yml. +# +# Setup: +# 1. Run infra/gcp Terraform and configure Workload Identity Federation for GitHub. +# 2. Set GitHub vars: GCP_PROJECT_ID, GCP_REGION, GCP_WORKLOAD_IDENTITY_PROVIDER, GCP_SERVICE_ACCOUNT. +# 3. Set GitHub secrets for Slack/DB/encryption as needed for your CI (or use Secret Manager only). +# 4. This workflow intentionally fails until real build/deploy steps are configured. + +name: Deploy (GCP) + +on: + push: + branches: + - test + - prod + +permissions: + id-token: write + contents: read + +# Run only when DEPLOY_TARGET is set to 'gcp' (disable deploy-aws.yml if using GCP only) +jobs: + build-and-push: + if: vars.DEPLOY_TARGET == 'gcp' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + # Workload Identity Federation: authenticate without a key file + # - uses: google-github-actions/auth@v2 + # with: + # workload_identity_provider: ${{ vars.GCP_WORKLOAD_IDENTITY_PROVIDER }} + # service_account: ${{ vars.GCP_SERVICE_ACCOUNT }} + + # - uses: google-github-actions/setup-gcloud@v2 + + # Build and push container image to Artifact Registry + # - run: | + # gcloud builds submit --tag "${{ vars.GCP_REGION }}-docker.pkg.dev/${{ vars.GCP_PROJECT_ID }}/syncbot-TEST-images/syncbot:${{ github.sha }}" . + + - name: Fail until GCP CI is configured + run: | + echo "GCP CI deploy is not configured in this repository." + echo "Implement WIF auth, image build/push, and deploy steps in deploy-gcp.yml before enabling DEPLOY_TARGET=gcp." + exit 1 + + deploy-test: + if: github.ref == 'refs/heads/test' + runs-on: ubuntu-latest + needs: build-and-push + # environment: test + steps: + # - uses: google-github-actions/auth@v2 + # with: + # workload_identity_provider: ${{ vars.GCP_WORKLOAD_IDENTITY_PROVIDER }} + # service_account: ${{ vars.GCP_SERVICE_ACCOUNT }} + # - run: gcloud run deploy syncbot-test --image=... --region=... + - run: | + echo "deploy-test is blocked because GCP CI deploy is not configured." + exit 1 + + deploy-prod: + if: github.ref == 'refs/heads/prod' + runs-on: ubuntu-latest + needs: build-and-push + # environment: prod + steps: + # - uses: google-github-actions/auth@v2 + # with: + # workload_identity_provider: ${{ vars.GCP_WORKLOAD_IDENTITY_PROVIDER }} + # service_account: ${{ vars.GCP_SERVICE_ACCOUNT }} + # - run: gcloud run deploy syncbot-prod --image=... --region=... + - run: | + echo "deploy-prod is blocked because GCP CI deploy is not configured." + exit 1 diff --git a/.github/workflows/sam-pipeline.yml b/.github/workflows/sam-pipeline.yml deleted file mode 100644 index b715108..0000000 --- a/.github/workflows/sam-pipeline.yml +++ /dev/null @@ -1,71 +0,0 @@ -on: - push: - branches: - - main - -jobs: - sam-build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: '3.11' - - uses: aws-actions/setup-sam@v2 - with: - use-installer: true - - uses: aws-actions/configure-aws-credentials@v3 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: us-east-2 - - # sam build - - run: sam build --use-container - # Run Unit tests- Specify unit tests here - - # Publish artifact (need to publish app?) - - name: Publish artifact - uses: actions/upload-artifact@v3 - with: - name: build-artifact - path: './.aws-sam/build' - - sam-deploy-test: - runs-on: "ubuntu-latest" - environment: "test" - needs: sam-build - steps: - - uses: aws-actions/configure-aws-credentials@v3 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: us-east-2 - - - name: Download artifact - uses: actions/download-artifact@v3 - with: - name: build-artifact - path: './.aws-sam/build' - - - run: sam deploy -t .aws-sam/build/template.yaml --no-confirm-changeset --no-fail-on-empty-changeset --stack-name ${{ vars.AWS_STACK_NAME }} --s3-bucket ${{ vars.AWS_S3_BUCKET }} --capabilities CAPABILITY_IAM --region us-east-2 --no-disable-rollback --force-upload --parameter-overrides "SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} Stage=${{ vars.STAGE_NAME }} DatabaseHost=${{ secrets.DATABASE_HOST }} DatabasePassword=${{ secrets.ADMIN_DATABASE_PASSWORD }} PasswordEncryptKey=${{ secrets.PASSWORD_ENCRYPT_KEY }} StravaClientID=${{ secrets.STRAVA_CLIENT_ID }} StravaClientSecret=${{ secrets.STRAVA_CLIENT_SECRET }}" - - sam-deploy-prod: - runs-on: "ubuntu-latest" - environment: "prod" - needs: [sam-build, sam-deploy-test] - steps: - - uses: aws-actions/configure-aws-credentials@v2 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws-region: us-east-2 - - - name: Download artifact - uses: actions/download-artifact@v3 - with: - name: build-artifact - path: './.aws-sam/build' - - - run: sam deploy -t .aws-sam/build/template.yaml --no-confirm-changeset --no-fail-on-empty-changeset --stack-name ${{ vars.AWS_STACK_NAME }} --s3-bucket ${{ vars.AWS_S3_BUCKET }} --capabilities CAPABILITY_IAM --region us-east-2 --no-disable-rollback --force-upload --parameter-overrides "SlackClientSecret=${{ secrets.SLACK_CLIENT_SECRET }} SlackSigningSecret=${{ secrets.SLACK_SIGNING_SECRET }} Stage=${{ vars.STAGE_NAME }} DatabaseHost=${{ secrets.DATABASE_HOST }} DatabasePassword=${{ secrets.ADMIN_DATABASE_PASSWORD }} PasswordEncryptKey=${{ secrets.PASSWORD_ENCRYPT_KEY }} StravaClientID=${{ secrets.STRAVA_CLIENT_ID }} StravaClientSecret=${{ secrets.STRAVA_CLIENT_SECRET }}" - diff --git a/.gitignore b/.gitignore index ff501f8..77c5db2 100644 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,8 @@ cover/ local_settings.py db.sqlite3 db.sqlite3-journal +test_syncbot.db +test_bootstrap.db # Flask stuff: instance/ @@ -156,9 +158,19 @@ env.json .aws-sam/ data/cache/ +# Local OAuth file stores (dev only) +syncbot/.oauth-data/ + # PyCharm # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ + +# Cursor +.cursor/ + +# Generated deploy artifacts +slack-manifest_*.json +deploy-receipts/ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..3bf51ed --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,34 @@ +# Pre-commit hooks for SyncBot +# +# Install: pip install pre-commit && pre-commit install +# Run all: pre-commit run --all-files +# +# See https://pre-commit.com for more information. + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.6.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + args: ["--maxkb=500"] + - id: check-merge-conflict + - id: detect-private-key + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.8.6 + hooks: + - id: ruff + args: [--fix, --exit-non-zero-on-fix] + - id: ruff-format + + - repo: local + hooks: + - id: sync-requirements + name: Sync requirements.txt with poetry.lock + entry: bash -c 'poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt && git add syncbot/requirements.txt' + language: system + files: ^poetry\.lock$ + pass_filenames: false diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..0068f3f --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,37 @@ +# Changelog + +All notable changes to this project are documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.0.1] - 2026-03-26 + +### Changed + +- Cross-workspace `#channel` links resolve to native local channels when the channel is part of the same sync; otherwise use workspace archive URLs with a code-formatted fallback +- `@mentions` and `#channel` links in federated messages are now resolved on the receiving instance (native tags when mapped/synced, fallbacks otherwise) +- `ENABLE_DB_RESET` is now a boolean (`true` / `1` / `yes`) instead of a Slack Team ID; requires `PRIMARY_WORKSPACE` to match + +### Added + +- `PRIMARY_WORKSPACE` env var: must be set to a Slack Team ID for backup/restore to appear. Also scopes DB reset to that workspace. + +## [1.0.0] - 2026-03-25 + +### Added + +- Multi-workspace message sync: messages, threads, edits, deletes, reactions, images, videos, and GIFs +- Cross-workspace @mention resolution (email, name, and manual matching) +- Workspace Groups with invite codes (many-to-many collaboration; direct and group-wide sync modes) +- Pause, resume, and stop per-channel sync controls +- App Home tab for configuration (no slash commands) +- Cross-instance federation (optional, HMAC-authenticated) +- Backup/restore and workspace data migration +- Bot token encryption at rest (Fernet) +- AWS deployment (SAM/CloudFormation) with optional CI/CD via GitHub Actions +- GCP deployment (Terraform/Cloud Run) with interactive deploy script; GitHub Actions workflow for GCP is not yet fully wired +- Dev Container and Docker Compose for local development +- Structured JSON logging with correlation IDs and CloudWatch alarms (AWS) +- PostgreSQL, MySQL, and SQLite database backends +- Alembic-managed schema migrations applied at startup diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..426d16e --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,30 @@ +# Contributing + +Thanks for helping improve SyncBot. + +## Branching (upstream vs downstream) + +The **upstream** repository ([F3Nation-Community/syncbot](https://github.com/F3Nation-Community/syncbot)) is the shared codebase. Each deployment maintains its own **fork**: + +| Branch | Role | +|--------|------| +| **`main`** | Tracks upstream. Use it to merge PRs and to **sync with the upstream repository** (`git pull upstream main`, etc.). | +| **`test`** / **`prod`** | On your fork, use these for **deployments**: GitHub Actions deploy workflows run on **push** to `test` and `prod` (see [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md)). | + +Typical flow: develop on a feature branch → open a PR to **`main`** → merge → when ready to deploy, merge **`main`** into **`test`** or **`prod`** on your fork. + +## Workflow + +1. **Fork** the repository and create a branch from **`main`**. +2. Open a **pull request** targeting **`main`** on the upstream repo (or the repo you were asked to contribute to). +3. Keep application code **provider-neutral**: put cloud-specific logic only under `infra//` and in `deploy-.yml` workflows. See [docs/INFRA_CONTRACT.md](docs/INFRA_CONTRACT.md) (Fork Compatibility Policy). + +## Before you submit + +- Run **`pre-commit run --all-files`** (install with `pip install pre-commit && pre-commit install` if needed). +- Ensure **CI passes**: requirements export check, SAM template lint, and tests (see [.github/workflows/ci.yml](.github/workflows/ci.yml)). +- If you change dependencies in `pyproject.toml`, refresh the lockfile and `syncbot/requirements.txt` as described in [docs/DEVELOPMENT.md](docs/DEVELOPMENT.md). + +## Questions + +Use [GitHub Issues](https://github.com/F3Nation-Community/syncbot/issues) for bugs and feature ideas, or check [docs/DEPLOYMENT.md](docs/DEPLOYMENT.md) for deploy-related questions. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..dc365f2 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,26 @@ +FROM python:3.12-slim + +WORKDIR /app + +# Install system dependencies for cryptography and MySQL client bindings. +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + libffi-dev \ + default-libmysqlclient-dev \ + && rm -rf /var/lib/apt/lists/* + +# Install runtime dependencies from pinned requirements. +COPY syncbot/requirements.txt /app/requirements.txt +RUN python -m pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r /app/requirements.txt + +# Copy application code +COPY syncbot/ ./syncbot/ + +WORKDIR /app/syncbot + +# Cloud Run sets PORT (default 8080); local dev may use 3000. +EXPOSE 8080 + +CMD ["python", "app.py"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..be3f7b2 --- /dev/null +++ b/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/README.md b/README.md index 14936bf..c3b640e 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,66 @@ # SyncBot +SyncBot Icon -[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +SyncBot is a Slack app for syncing messages across workspaces. Once configured, this app will sync messages, threads, edits, deletes, reactions, images, videos, and GIFs to every channel in a SyncBot group. -SyncBot is a Slack app that replicates ("syncs") posts and replies across Slack workspaces. Once configured, this will happen automatically in synced channels. +> **Using SyncBot in Slack?** See the [User Guide](docs/USER_GUIDE.md). -## Installation and Getting Started +--- -Set up is simple: +## Slack app setup -1. Click [this link](https://utazcizeo0.execute-api.us-east-2.amazonaws.com/Prod/slack/install) from a desktop computer. Make sure you have selected your desired workspace in the upper right! -2. Next, you can configure SyncBot by using the `/config-syncbot` slash command -3. If this is the first workspace you are configuring, use the "Create new Sync" button. Otherwise, use "Join existing Sync". +Do this before you deploy or run locally: -Some notes: - - Bot messages will not be synced, only actual user messages - - Existing messages are not synced, but going forward all posts and their thread replies will be - - Do not add SyncBot manually to channels - SyncBot will add itself to channels you configure. If it detects that it has been added to a non-configured channel, it will leave the channel - - Private channels are not supported +1. [api.slack.com/apps](https://api.slack.com/apps) → **Create New App** → **From an app manifest** → paste [`slack-manifest.json`](slack-manifest.json). +2. Upload [`assets/icon.png`](assets/icon.png) under **Basic Information** → **Display Information**. +3. Copy **Signing Secret**, **Client ID**, and **Client Secret** (needed for deploy). For **local dev**, install the app under **OAuth & Permissions** and copy the **Bot User OAuth Token** (`xoxb-...`). -## Feature Request and Roadmap +--- -I use GitHub Issues for tracking feature requests. Feel free to add some here: https://github.com/F3Nation-Community/syncbot/issues +## Deploy -Roadmap: - - Picture sync - - Reaction sync \ No newline at end of file +From the **repo root**, run the deploy script once for **`test`** and once for **`prod`** to automatically deploy to your infrastructure provider (currently AWS and GCP are supported). + +| OS | Command | +|----|---------| +| macOS / Linux | `./deploy.sh` | +| Windows (PowerShell) | `.\deploy.ps1` | + +You can also fork the repo, set GitHub variables/secrets, and push to **`test`** or **`prod`** to trigger CI — see [DEPLOYMENT.md](docs/DEPLOYMENT.md). + +### Prerequisites + +In order for the deploy script to work, you need **Git** and **Bash** (on Windows, use **Git for Windows** / **Git Bash** or **WSL**). + +**AWS:** AWS CLI v2, SAM CLI, Docker (for `sam build --use-container`), Python 3, and `curl`. Optional: `gh` for GitHub Actions setup. + +**GCP:** Terraform, `gcloud`, Python 3, and `curl`. Optional: `gh`. + +Full prerequisite checks, manual `sam` / Terraform, Slack URLs after deploy, and CI variables: **[docs/DEPLOYMENT.md](docs/DEPLOYMENT.md)**. + + +--- + +## Local development + +See **[docs/DEVELOPMENT.md](docs/DEVELOPMENT.md)** for Dev Container, Docker Compose, native Python, project layout, and refreshing `syncbot/requirements.txt` after dependency changes. + +--- + +## Further reading + +| Doc | Contents | +|-----|----------| +| [USER_GUIDE.md](docs/USER_GUIDE.md) | End-user features (Home tab, syncs, groups) | +| [DEPLOYMENT.md](docs/DEPLOYMENT.md) | Guided + manual AWS/GCP deploy, CI, GitHub | +| [DEVELOPMENT.md](docs/DEVELOPMENT.md) | Local dev, branching for forks, dependencies | +| [INFRA_CONTRACT.md](docs/INFRA_CONTRACT.md) | Environment variables and platform expectations | +| [ARCHITECTURE.md](docs/ARCHITECTURE.md) | Sync flow, AWS reference architecture | +| [BACKUP_AND_MIGRATION.md](docs/BACKUP_AND_MIGRATION.md) | Backup/restore and federation migration | +| [API_REFERENCE.md](docs/API_REFERENCE.md) | HTTP routes and Slack events | +| [CHANGELOG.md](CHANGELOG.md) | Release history | +| [CONTRIBUTING.md](CONTRIBUTING.md) | How to contribute | + +## License + +**AGPL-3.0** — see [LICENSE](LICENSE). diff --git a/alembic.ini b/alembic.ini new file mode 100644 index 0000000..b7e34df --- /dev/null +++ b/alembic.ini @@ -0,0 +1,46 @@ +# Alembic config for SyncBot. Run from repo root: +# alembic -c alembic.ini upgrade head +# The app runs migrations programmatically via db.initialize_database(). + +[alembic] +script_location = syncbot/db/alembic +prepend_sys_path = . +version_path_separator = os + +sqlalchemy.url = driver://user:pass@localhost/dbname + +[post_write_hooks] + +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/assets/icon.png b/assets/icon.png new file mode 100644 index 0000000..b98dc3e Binary files /dev/null and b/assets/icon.png differ diff --git a/deploy.ps1 b/deploy.ps1 new file mode 100644 index 0000000..41f8bb2 --- /dev/null +++ b/deploy.ps1 @@ -0,0 +1,235 @@ +#requires -Version 5.1 +<# +.SYNOPSIS + SyncBot root deploy launcher for Windows (PowerShell). + + Verifies a bash environment (Git Bash or WSL), scans infra/*/scripts/deploy.sh, + then runs the selected script in bash — same contract as ./deploy.sh on macOS/Linux. + + Provider-specific prerequisite checks live in infra//scripts/deploy.sh + (sourcing repo-root deploy.sh for shared helpers). There are no deploy.ps1 files under infra/. + +.EXAMPLE + .\deploy.ps1 + .\deploy.ps1 aws + .\deploy.ps1 1 +#> +param( + [Parameter(Position = 0)] + [string] $Selection = "", + [Parameter(ValueFromRemainingArguments = $true)] + [string[]] $ScriptArgs +) + +$ErrorActionPreference = "Stop" + +function Find-GitBash { + $cmd = Get-Command bash -ErrorAction SilentlyContinue + if ($cmd) { return $cmd.Source } + $candidates = @( + "${env:ProgramFiles}\Git\bin\bash.exe", + "${env:ProgramFiles(x86)}\Git\bin\bash.exe", + "${env:LocalAppData}\Programs\Git\bin\bash.exe" + ) + foreach ($p in $candidates) { + if (Test-Path -LiteralPath $p) { return $p } + } + return $null +} + +function Test-WslBashWorks { + if (-not (Get-Command wsl.exe -ErrorAction SilentlyContinue)) { return $false } + try { + $null = & wsl.exe -e bash -c "echo wsl_ok" 2>&1 + return ($LASTEXITCODE -eq 0) + } catch { + return $false + } +} + +function Convert-WindowsPathToWsl { + param([string] $WindowsPath) + $full = (Resolve-Path -LiteralPath $WindowsPath).Path + if ($full -match '^([A-Za-z]):[\\/](.*)$') { + $drive = $Matches[1].ToLowerInvariant() + $tail = $Matches[2] -replace '\\', '/' + return "/mnt/$drive/$tail" + } + throw "Cannot map path to WSL (expected C:\...): $WindowsPath" +} + +function Find-DeployBash { + $gitBash = Find-GitBash + if ($gitBash) { + return [pscustomobject]@{ Kind = 'GitBash'; Executable = $gitBash } + } + if (Test-WslBashWorks) { + return [pscustomobject]@{ Kind = 'Wsl'; Executable = 'wsl.exe' } + } + $bashCmd = Get-Command bash -ErrorAction SilentlyContinue + if ($bashCmd) { + return [pscustomobject]@{ Kind = 'Path'; Executable = $bashCmd.Source } + } + return $null +} + +function Show-WindowsPrereqStatus { + param( + [Parameter(Mandatory = $true)] + [string] $RepoRoot, + [Parameter(Mandatory = $true)] + $BashInfo + ) + Write-Host "" + Write-Host "=== SyncBot Deploy (Windows) ===" + Write-Host "Repository: $RepoRoot" + Write-Host "" + Write-Host "Bash environment:" + switch ($BashInfo.Kind) { + 'GitBash' { + Write-Host " Git Bash: $($BashInfo.Executable)" -ForegroundColor Green + if (Test-WslBashWorks) { + Write-Host " WSL: available (not used; Git Bash preferred)" -ForegroundColor DarkGray + } else { + Write-Host " WSL: not found or not ready" -ForegroundColor DarkGray + } + } + 'Wsl' { + Write-Host " Git Bash: not found" -ForegroundColor DarkGray + Write-Host " WSL: bash (will run deploy.sh with Windows paths mapped to /mnt/...)" -ForegroundColor Green + } + 'Path' { + Write-Host " bash: $($BashInfo.Executable)" -ForegroundColor Green + } + } + Write-Host "" +} + +function Invoke-DeploySh { + param( + [Parameter(Mandatory = $true)] + $BashInfo, + [Parameter(Mandatory = $true)] + [string] $ScriptPath, + [string[]] $BashArgs + ) + $extra = if ($null -ne $BashArgs -and $BashArgs.Count -gt 0) { @($BashArgs) } else { @() } + if ($BashInfo.Kind -eq 'Wsl') { + $wslPath = Convert-WindowsPathToWsl -WindowsPath $ScriptPath + & wsl.exe -e bash $wslPath @extra + } else { + & $BashInfo.Executable $ScriptPath @extra + } +} + +function Show-Usage { + @" +Usage: .\deploy.ps1 [selection] [provider-script-args...] + +No args: + Scan infra/*/scripts/deploy.sh, show a numbered menu, and run your choice. + +With [selection]: + - provider name (e.g. aws, gcp), OR + - menu index (e.g. 1, 2) +"@ +} + +function Get-DeployScripts { + param([string] $RepoRoot) + $infraDir = Join-Path $RepoRoot "infra" + if (-not (Test-Path -LiteralPath $infraDir)) { return @() } + + $providers = Get-ChildItem -LiteralPath $infraDir -Directory -ErrorAction SilentlyContinue | Sort-Object Name + $results = @() + foreach ($provider in $providers) { + $scriptPath = Join-Path $provider.FullName "scripts/deploy.sh" + if (Test-Path -LiteralPath $scriptPath) { + $results += [pscustomobject]@{ + Provider = $provider.Name + Path = $scriptPath + } + } + } + return $results +} + +function Resolve-Selection { + param( + [array] $Entries, + [string] $Selection + ) + + if ($Selection -match '^\d+$') { + $index = [int]$Selection + if ($index -ge 1 -and $index -le $Entries.Count) { + return $Entries[$index - 1] + } + return $null + } + + foreach ($entry in $Entries) { + if ($entry.Provider -ieq $Selection) { + return $entry + } + } + return $null +} + +$RepoRoot = Split-Path -Parent $MyInvocation.MyCommand.Path + +if ($Selection -in @("-h", "--help", "help")) { + Show-Usage + exit 0 +} + +$bashInfo = Find-DeployBash +if (-not $bashInfo) { + Write-Host @" +Error: no bash found. Install one of: + + • Git for Windows (Git Bash): https://git-scm.com/download/win + • WSL (Windows Subsystem for Linux): https://learn.microsoft.com/windows/wsl/install + +Then re-run: .\deploy.ps1 +"@ -ForegroundColor Red + exit 1 +} + +Show-WindowsPrereqStatus -RepoRoot $RepoRoot -BashInfo $bashInfo + +$entries = Get-DeployScripts -RepoRoot $RepoRoot +if ($entries.Count -eq 0) { + Write-Error "No deploy scripts found under infra/*/scripts/deploy.sh" + exit 1 +} + +if ([string]::IsNullOrWhiteSpace($Selection)) { + Write-Host "Discovered deploy scripts:" + for ($i = 0; $i -lt $entries.Count; $i++) { + $n = $i + 1 + $relativePath = $entries[$i].Path + if ($relativePath.StartsWith($RepoRoot, [System.StringComparison]::OrdinalIgnoreCase)) { + $relativePath = $relativePath.Substring($RepoRoot.Length).TrimStart('\', '/') + } + Write-Host " $n) $($entries[$i].Provider) ($relativePath)" + } + Write-Host " 0) Exit" + Write-Host "" + $choice = Read-Host "Choose provider [1]" + if ([string]::IsNullOrWhiteSpace($choice)) { $choice = "1" } + if ($choice -eq "0") { exit 0 } + $Selection = $choice +} + +$selected = Resolve-Selection -Entries $entries -Selection $Selection +if (-not $selected) { + Write-Host "Invalid selection: $Selection" -ForegroundColor Red + Write-Host "" + Show-Usage + exit 1 +} + +Write-Host "Running: $($selected.Path)" +Invoke-DeploySh -BashInfo $bashInfo -ScriptPath $selected.Path -BashArgs $ScriptArgs +exit $LASTEXITCODE diff --git a/deploy.sh b/deploy.sh new file mode 100755 index 0000000..34c59ad --- /dev/null +++ b/deploy.sh @@ -0,0 +1,686 @@ +#!/usr/bin/env bash +# SyncBot infra-agnostic deploy launcher. +# Discovers provider scripts at infra//scripts/deploy.sh and runs one. +# +# Phases when executed as ./deploy.sh (not when sourced): +# 1) Discover infra/*/scripts/deploy.sh +# 2) Interactive menu or CLI selection (provider name or index) +# 3) Resolve script path and exec the provider deploy script with bash +# +# Prerequisite helpers below are also sourced by infra/*/scripts/deploy.sh: +# source "$REPO_ROOT/deploy.sh" +# Also includes prompt_deploy_tasks_aws / prompt_deploy_tasks_gcp for multi-select deploy steps. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$SCRIPT_DIR" + +# --------------------------------------------------------------------------- +# Prerequisite helpers (shared with infra/aws and infra/gcp deploy scripts). +# macOS: Homebrew one-liners where common. Otherwise: vendor install documentation +# (Darwin / Linux / other uname from uname -s only — no platform-specific logic beyond that). +# Root: ./deploy.sh; alternate entrypoint: deploy.ps1 in repo root (see README). +# --------------------------------------------------------------------------- + +prereqs_hint_aws_cli() { + echo "Install AWS CLI v2:" + case "$(uname -s 2>/dev/null)" in + Darwin) echo " brew install awscli" ;; + Linux) echo " https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html" ;; + *) echo " https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html" ;; + esac + echo " User guide: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html" +} + +prereqs_hint_sam_cli() { + echo "Install AWS SAM CLI:" + case "$(uname -s 2>/dev/null)" in + Darwin) echo " brew install aws-sam-cli" ;; + *) + echo " https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/install-sam-cli.html" + ;; + esac + echo " Developer guide: https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/what-is-sam.html" +} + +prereqs_hint_terraform() { + echo "Install Terraform:" + echo " https://developer.hashicorp.com/terraform/install" + echo " Introduction: https://developer.hashicorp.com/terraform/docs" +} + +prereqs_hint_gcloud() { + echo "Install Google Cloud SDK:" + case "$(uname -s 2>/dev/null)" in + Darwin) echo " brew install --cask google-cloud-sdk" ;; + *) echo " https://cloud.google.com/sdk/docs/install" ;; + esac + echo " gcloud CLI reference: https://cloud.google.com/sdk/gcloud/reference" +} + +prereqs_hint_gh_cli() { + echo "Install GitHub CLI (gh):" + case "$(uname -s 2>/dev/null)" in + Darwin) echo " brew install gh" ;; + Linux) echo " https://github.com/cli/cli/blob/trunk/docs/install_linux.md" ;; + *) echo " https://cli.github.com/" ;; + esac + echo " Manual: https://cli.github.com/manual/" +} + +prereqs_hint_python3() { + echo "Install Python 3.12+ (the deploy helpers use python3 for manifest/JSON helpers):" + echo " https://www.python.org/downloads/" + echo " Documentation: https://docs.python.org/3/" +} + +prereqs_hint_docker() { + echo "Install Docker (used by sam build --use-container on AWS):" + case "$(uname -s 2>/dev/null)" in + Linux) echo " https://docs.docker.com/engine/install/" ;; + *) echo " https://www.docker.com/products/docker-desktop/" ;; + esac +} + +prereqs_hint_curl() { + echo "Install curl (used for Slack manifest API and downloads):" + echo " https://curl.se/download.html" +} + +prereqs_hint_slack_apps_docs() { + echo "Slack apps (browser) and API tokens (optional manifest automation):" + echo " https://api.slack.com/apps" + echo " https://api.slack.com/authentication/token-types" + echo "Manifest API (apps.manifest.update / create):" + echo " https://api.slack.com/reference/methods/apps.manifest.update" +} + +prereqs_icon_ok() { + printf '\033[0;32m✓\033[0m' +} + +prereqs_icon_optional() { + printf '\033[1;33m!\033[0m' +} + +prereqs_icon_required_missing() { + printf '\033[0;31m✗\033[0m' +} + +prereqs_prompt_continue_without_optional() { + local answer + read -r -p "Do you want to proceed? [Y/n]: " answer + if [[ -z "$answer" || "$answer" =~ ^[Yy]$ ]]; then + return 0 + fi + return 1 +} + +prereqs_print_cli_status_matrix() { + local provider="$1" + shift + local name + echo "" >&2 + echo "=== CLI Prerequisites ($provider) ===" >&2 + for name in "$@"; do + if command -v "$name" >/dev/null 2>&1; then + printf ' %s: %s\n' "$name" "$(prereqs_icon_ok)" >&2 + else + printf ' %s: %s\n' "$name" "$(prereqs_icon_required_missing)" >&2 + fi + done + if command -v gh >/dev/null 2>&1; then + printf ' gh: %s\n' "$(prereqs_icon_ok)" >&2 + else + printf ' gh: %s\n' "$(prereqs_icon_optional)" >&2 + echo "" >&2 + echo "The GitHub gh command was not found; install it for automated GitHub repository setup." >&2 + prereqs_hint_gh_cli >&2 + echo "" >&2 + if ! prereqs_prompt_continue_without_optional; then + echo "Exiting. Install gh and rerun, or answer Y to continue without it." >&2 + exit 1 + fi + fi + echo "" >&2 + prereqs_hint_slack_apps_docs >&2 + echo "" >&2 +} + +prereqs_require_cmd() { + local cmd="$1" + local hint_fn="${2:-}" + if ! command -v "$cmd" >/dev/null 2>&1; then + echo "Error: required command '$cmd' not found in PATH." >&2 + if [[ -n "$hint_fn" ]] && declare -F "$hint_fn" >/dev/null 2>&1; then + "$hint_fn" >&2 + fi + exit 1 + fi +} + +# Slack Web API responses can be large; avoid flooding the terminal on errors (AWS/GCP deploy scripts). +slack_api_echo_truncated_body() { + local body="$1" + local max_len="${2:-500}" + if [[ -z "$body" ]]; then + echo "(empty response)" + return 0 + fi + if [[ ${#body} -gt max_len ]]; then + echo "${body:0:max_len}... (truncated, ${#body} chars total)" + else + echo "$body" + fi +} + +# Log level (shared by infra/aws and infra/gcp deploy scripts; matches syncbot/logger.py LOG_LEVEL). +is_valid_log_level() { + case "$1" in + DEBUG | INFO | WARNING | ERROR | CRITICAL) return 0 ;; + *) return 1 ;; + esac +} + +normalize_log_level() { + echo "$1" | tr "[:lower:]" "[:upper:]" +} + +# Menu order: DEBUG first (1), then INFO..CRITICAL. Matches Python logging severity order. +log_level_to_menu_index() { + case "$(normalize_log_level "$1")" in + DEBUG) echo 1 ;; + INFO) echo 2 ;; + WARNING) echo 3 ;; + ERROR) echo 4 ;; + CRITICAL) echo 5 ;; + *) echo 2 ;; + esac +} + +menu_index_to_log_level() { + case "$1" in + 1) echo DEBUG ;; + 2) echo INFO ;; + 3) echo WARNING ;; + 4) echo ERROR ;; + 5) echo CRITICAL ;; + *) return 1 ;; + esac +} + +prompt_log_level() { + local default_level="$1" + local default_idx choice i name suf + default_idx="$(log_level_to_menu_index "$default_level")" + + echo >&2 + for i in 1 2 3 4 5; do + name="$(menu_index_to_log_level "$i")" + suf="" + [[ "$i" == "$default_idx" ]] && suf=" (default/current)" + echo " $i) $name$suf" >&2 + done + + while true; do + read -r -p "Choose level [$default_idx]: " choice + [[ -z "$choice" ]] && choice="$default_idx" + case "$choice" in + 1 | 2 | 3 | 4 | 5) + menu_index_to_log_level "$choice" + return 0 + ;; + esac + echo "Invalid choice: $choice. Enter a number from 1 to 5." >&2 + done +} + +# App settings (used by infra/aws and infra/gcp deploy scripts). Hints on stderr; value on stdout. + +prompt_require_admin() { + local default="$1" + echo "Restrict sync configuration to workspace admins and owners only." >&2 + local hint="Y/n" + [[ "$default" == "false" ]] && hint="y/N" + while true; do + local answer + read -r -p "REQUIRE_ADMIN [$hint]: " answer + if [[ -z "$answer" ]]; then + echo "$default" + return 0 + fi + case "$answer" in + [Yy] | yes | YES | true | TRUE) echo "true"; return 0 ;; + [Nn] | no | NO | false | FALSE) echo "false"; return 0 ;; + esac + echo "Enter y or n (current: $default)." >&2 + done +} + +prompt_soft_delete_retention_days() { + local default="$1" + echo "Days to keep soft-deleted workspace data before permanent purge." >&2 + while true; do + local v + read -r -p "SOFT_DELETE_RETENTION_DAYS [$default]: " v + v="${v:-$default}" + if [[ "$v" =~ ^[0-9]+$ ]] && [[ "$v" -gt 0 ]]; then + echo "$v" + return 0 + fi + echo "Enter a positive integer." >&2 + done +} + +prompt_primary_workspace() { + local default="$1" + echo "Slack Team ID for PRIMARY_WORKSPACE (required for backup/restore to appear; also scopes DB reset)." >&2 + echo "Leave empty to hide backup/restore from all workspaces." >&2 + local disp + if [[ -z "$default" ]]; then + disp="(any workspace)" + else + disp="$default" + fi + local v + read -r -p "PRIMARY_WORKSPACE (Slack Team ID) [$disp]: " v + v="${v:-$default}" + case "$(echo "$v" | tr "[:upper:]" "[:lower:]")" in + "" | none) echo "" ;; + *) echo "$v" ;; + esac +} + +prompt_federation_enabled() { + local default="$1" + echo "Allow external connections between SyncBot instances (federation)." >&2 + local hint="y/N" + [[ "$default" == "true" ]] && hint="Y/n" + while true; do + local answer + read -r -p "SYNCBOT_FEDERATION_ENABLED [$hint]: " answer + if [[ -z "$answer" ]]; then + echo "$default" + return 0 + fi + case "$answer" in + [Yy] | yes | YES | true | TRUE) echo "true"; return 0 ;; + [Nn] | no | NO | false | FALSE) echo "false"; return 0 ;; + esac + echo "Enter y or n (current: $default)." >&2 + done +} + +prompt_instance_id() { + local default="$1" + echo "Unique UUID for this SyncBot instance (leave empty to auto-generate at runtime)." >&2 + local disp="${default:-(empty)}" + local v + read -r -p "SYNCBOT_INSTANCE_ID [$disp]: " v + echo "${v:-$default}" +} + +prompt_public_url() { + local default="$1" + echo "Public HTTPS base URL for this instance (required for federation)." >&2 + local disp="${default:-(empty)}" + local v + read -r -p "SYNCBOT_PUBLIC_URL [$disp]: " v + echo "${v:-$default}" +} + +# Parse owner/repo from a github.com git remote URL (ssh, https, ssh://). Empty if not GitHub. +github_owner_repo_from_url() { + local url="$1" + url="${url%.git}" + url="${url%/}" + if [[ "$url" =~ ^git@github\.com:([^/]+)/(.+)$ ]]; then + echo "${BASH_REMATCH[1]}/${BASH_REMATCH[2]}" + return 0 + fi + if [[ "$url" =~ ^ssh://git@github\.com/([^/]+)/(.+)$ ]]; then + echo "${BASH_REMATCH[1]}/${BASH_REMATCH[2]}" + return 0 + fi + if [[ "$url" =~ ^https://([^/@]+@)?github\.com/([^/]+)/([^/]+)$ ]]; then + echo "${BASH_REMATCH[2]}/${BASH_REMATCH[3]}" + return 0 + fi + return 1 +} + +# Emit owner/repo for GitHub Actions variables. Uses git remotes (origin, upstream, others) so forks +# are not confused with `gh repo view` (which often follows upstream). If there are no github.com +# remotes, falls back to `gh repo view` or a manual prompt. Prints chosen repo to stdout; hints to stderr. +prompt_github_repo_for_actions() { + local git_dir="${1:-$REPO_ROOT}" + local canon tmp url or n gh_inf nlines choice i line or_only lab_only + local _cr_done + _cr_done() { + rm -f "$canon" "$tmp" + } + canon="$(mktemp)" + tmp="$(mktemp)" + + if ! git -C "$git_dir" rev-parse --git-dir >/dev/null 2>&1; then + echo "Not a git checkout; enter GitHub owner/repo manually." >&2 + while true; do + read -r -p "GitHub repository (owner/repo): " choice + if [[ "$choice" =~ ^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+$ ]]; then + _cr_done + echo "$choice" + return 0 + fi + echo "Expected owner/repo (e.g. myorg/syncbot)." >&2 + done + fi + + _github_repo_add_unique() { + local o="$1" + local label="$2" + [[ -z "$o" ]] && return + if ! grep -Fxq "$o" "$tmp" 2>/dev/null; then + echo "$o" >>"$tmp" + printf '%s\t%s\n' "$o" "$label" >>"$canon" + fi + } + + for n in origin upstream; do + url="$(git -C "$git_dir" remote get-url "$n" 2>/dev/null || true)" + or="$(github_owner_repo_from_url "$url" || true)" + _github_repo_add_unique "$or" "git remote $n" + done + while IFS= read -r n; do + [[ "$n" == "origin" || "$n" == "upstream" ]] && continue + url="$(git -C "$git_dir" remote get-url "$n" 2>/dev/null || true)" + or="$(github_owner_repo_from_url "$url" || true)" + _github_repo_add_unique "$or" "git remote $n" + done < <(git -C "$git_dir" remote 2>/dev/null | LC_ALL=C sort) + + # Do not merge in `gh repo view` when remotes exist: gh often tracks upstream and + # disagrees with the fork (origin) the user wants for Actions variables. + + nlines="$(wc -l <"$canon" | tr -d ' ')" + gh_inf="" + if [[ "$nlines" -eq 0 ]] && command -v gh >/dev/null 2>&1; then + gh_inf="$(gh -C "$git_dir" repo view --json nameWithOwner -q .nameWithOwner 2>/dev/null || true)" + fi + + if [[ "$nlines" -eq 0 ]]; then + if [[ -n "$gh_inf" ]]; then + read -r -p "GitHub repository for Actions variables [$gh_inf] (from gh; no github.com remotes): " choice + choice="${choice:-$gh_inf}" + if [[ "$choice" =~ ^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+$ ]]; then + echo "Using GitHub repository: $choice" >&2 + _cr_done + echo "$choice" + return 0 + fi + echo "Using GitHub repository: $gh_inf" >&2 + _cr_done + echo "$gh_inf" + return 0 + fi + echo "Could not detect owner/repo from remotes. Enter it manually." >&2 + while true; do + read -r -p "GitHub repository (owner/repo): " choice + if [[ "$choice" =~ ^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+$ ]]; then + _cr_done + echo "$choice" + return 0 + fi + echo "Expected owner/repo (e.g. myorg/syncbot)." >&2 + done + fi + + if [[ "$nlines" -eq 1 ]]; then + IFS=$'\t' read -r or_only lab_only <"$canon" + read -r -p "GitHub repository for Actions variables [$or_only] ($lab_only): " choice + choice="${choice:-$or_only}" + if [[ "$choice" =~ ^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+$ ]]; then + echo "Using GitHub repository: $choice" >&2 + _cr_done + echo "$choice" + return 0 + fi + echo "Invalid owner/repo; using $or_only." >&2 + _cr_done + echo "$or_only" + return 0 + fi + + echo "Multiple GitHub repositories detected (fork vs upstream, etc.). Choose where to set Actions variables and secrets:" >&2 + i=1 + while IFS=$'\t' read -r or lab_only; do + echo " $i) $or ($lab_only)" >&2 + i=$((i + 1)) + done <"$canon" + + while true; do + read -r -p "Enter number [1-$nlines] or owner/repo: " choice + [[ -z "$choice" ]] && choice=1 + if [[ "$choice" =~ ^[0-9]+$ ]]; then + line="$(sed -n "${choice}p" "$canon")" + if [[ -n "$line" ]]; then + IFS=$'\t' read -r or_only lab_only <<<"$line" + echo "Using GitHub repository: $or_only" >&2 + _cr_done + echo "$or_only" + return 0 + fi + fi + if [[ "$choice" =~ ^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+$ ]]; then + echo "Using GitHub repository: $choice" >&2 + _cr_done + echo "$choice" + return 0 + fi + echo "Invalid choice. Enter 1-$nlines or owner/repo." >&2 + done +} + +# --------------------------------------------------------------------------- +# Deploy task selection (used by infra/aws and infra/gcp deploy scripts). +# Sets global variables named in flag_names to "true" or "false". +# --------------------------------------------------------------------------- + +_prompt_deploy_tasks_parsechoices() { + local choices_raw="${1:-}" + shift + local -a flag_names=("$@") + local n="${#flag_names[@]}" + local i name def="" part idx + for name in "${flag_names[@]}"; do + eval "${name}=false" + done + for ((i = 1; i <= n; i++)); do + [[ -n "$def" ]] && def+="," + def+="$i" + done + local choices="${choices_raw// /}" + [[ -z "$choices" ]] && choices="$def" + IFS=',' read -r -a parts <<<"$choices" + for part in "${parts[@]}"; do + part="${part// /}" + [[ -z "$part" ]] && continue + if [[ "$part" =~ ^[0-9]+$ ]]; then + idx="$part" + if [[ "$idx" -ge 1 && "$idx" -le "$n" ]]; then + eval "${flag_names[$((idx - 1))]}=true" + else + echo "Invalid task number: $part (must be 1-$n)" >&2 + exit 1 + fi + else + echo "Invalid task selection: $part (use comma-separated numbers)" >&2 + exit 1 + fi + done +} + +prompt_deploy_tasks_aws() { + echo "=== Deploy Tasks ===" + printf ' 1) %s\n' "Bootstrap - Create/sync bootstrap stack" + printf ' 2) %s\n' "Build/Deploy - SAM build + deploy" + printf ' 3) %s\n' "CI/CD - GitHub Actions configuration" + printf ' 4) %s\n' "Slack API - Configure Slack app via API" + printf ' 5) %s\n' "Backup Secrets - Print DR backup secrets" + local default_all="1,2,3,4,5" + local choices="" + read -r -e -p "Select tasks (comma-separated) [$default_all]: " choices + choices="${choices:-$default_all}" + _prompt_deploy_tasks_parsechoices "$choices" TASK_BOOTSTRAP TASK_BUILD_DEPLOY TASK_CICD TASK_SLACK_API TASK_BACKUP_SECRETS +} + +prompt_deploy_tasks_gcp() { + echo "=== Deploy Tasks ===" + printf ' 1) %s\n' "Build/Deploy - Terraform plan + apply" + printf ' 2) %s\n' "CI/CD - GitHub Actions configuration" + printf ' 3) %s\n' "Slack API - Configure Slack app via API" + printf ' 4) %s\n' "Backup Secrets - Print DR backup secrets" + local default_all="1,2,3,4" + local choices="" + read -r -e -p "Select tasks (comma-separated) [$default_all]: " choices + choices="${choices:-$default_all}" + _prompt_deploy_tasks_parsechoices "$choices" TASK_BUILD_DEPLOY TASK_CICD TASK_SLACK_API TASK_BACKUP_SECRETS +} + +# When sourced by infra/*/scripts/deploy.sh, only load helpers above. +if [[ "${BASH_SOURCE[0]}" != "${0}" ]]; then + return 0 +fi + +# --------------------------------------------------------------------------- +# Launcher (only when this file is executed: ./deploy.sh) +# --------------------------------------------------------------------------- + +usage() { + cat <&2 + echo "Repository: $REPO_ROOT" >&2 + echo >&2 + echo "Discovered deploy scripts:" >&2 + + while IFS=$'\t' read -r provider path; do + [[ -z "$provider" ]] && continue + rel_path="${path#$REPO_ROOT/}" + echo " $idx) $provider ($rel_path)" >&2 + idx=$((idx + 1)) + done <<< "$entries" + echo " 0) Exit" >&2 + echo >&2 + + local choice + read -r -p "Choose provider [1]: " choice >&2 + choice="${choice:-1}" + echo "$choice" +} + +resolve_script_from_selection() { + local entries="$1" + local selection="$2" + local line idx=1 provider path + + # Numeric selection + if [[ "$selection" =~ ^[0-9]+$ ]]; then + while IFS=$'\t' read -r provider path; do + [[ -z "$provider" ]] && continue + if [[ "$idx" -eq "$selection" ]]; then + echo "$path" + return 0 + fi + idx=$((idx + 1)) + done <<< "$entries" + return 1 + fi + + # Provider name selection + while IFS=$'\t' read -r provider path; do + [[ -z "$provider" ]] && continue + if [[ "$provider" == "$selection" ]]; then + echo "$path" + return 0 + fi + done <<< "$entries" + return 1 +} + +main() { + if [[ "${1:-}" == "-h" || "${1:-}" == "--help" || "${1:-}" == "help" ]]; then + usage + exit 0 + fi + + local entries + entries="$(discover_deploy_scripts)" + if [[ -z "$entries" ]]; then + echo "No deploy scripts found under infra/*/scripts/deploy.sh" >&2 + exit 1 + fi + + local selection="${1:-}" + if [[ -z "$selection" ]]; then + selection="$(select_script_interactive "$entries")" + fi + if [[ "$selection" == "0" ]]; then + exit 0 + fi + + local script_path + if ! script_path="$(resolve_script_from_selection "$entries" "$selection")"; then + echo "Invalid selection: $selection" >&2 + echo + usage + exit 1 + fi + + echo "=== Sync Python Dependencies ===" + if command -v poetry &>/dev/null; then + poetry update --quiet + if poetry self show plugins 2>/dev/null | grep -q poetry-plugin-export; then + poetry export -f requirements.txt --without-hashes -o "$REPO_ROOT/syncbot/requirements.txt" + echo "syncbot/requirements.txt updated from poetry.lock." + else + echo "Warning: poetry-plugin-export not installed. Run: poetry self add poetry-plugin-export" >&2 + echo "Skipping requirements.txt sync." >&2 + fi + else + echo "Warning: poetry not found. Skipping dependency sync." >&2 + fi + + echo "=== Run Provider Script ===" + echo "Running: $script_path" + bash "$script_path" +} + +main "$@" diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..7a2d1b3 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,44 @@ +services: + db: + image: mysql:8 + environment: + MYSQL_ROOT_PASSWORD: rootpass + MYSQL_ROOT_HOST: "%" # allow root from host (e.g. mysql -h 127.0.0.1 -P 3306 -u root -p) + MYSQL_DATABASE: syncbot + ports: + - "3306:3306" + volumes: + - syncbot-db:/var/lib/mysql + healthcheck: + test: ["CMD", "mysqladmin", "ping", "-h", "localhost"] + interval: 5s + timeout: 5s + retries: 10 + + app: + build: . + ports: + - "3000:3000" + depends_on: + db: + condition: service_healthy + environment: + # Database + DATABASE_BACKEND: ${DATABASE_BACKEND:-mysql} + DATABASE_URL: ${DATABASE_URL:-} + DATABASE_HOST: ${DATABASE_HOST:-db} + DATABASE_USER: ${DATABASE_USER:-root} + DATABASE_PASSWORD: ${DATABASE_PASSWORD:-rootpass} + DATABASE_SCHEMA: ${DATABASE_SCHEMA:-syncbot} + DATABASE_TLS_ENABLED: ${DATABASE_TLS_ENABLED:-false} + DATABASE_SSL_CA_PATH: ${DATABASE_SSL_CA_PATH:-/etc/pki/tls/certs/ca-bundle.crt} + # Slack — replace with your values or use a .env file + SLACK_BOT_TOKEN: ${SLACK_BOT_TOKEN:-xoxb-your-bot-token} + # Optional + TOKEN_ENCRYPTION_KEY: ${TOKEN_ENCRYPTION_KEY:-123} + REQUIRE_ADMIN: ${REQUIRE_ADMIN:-true} + volumes: + - ./syncbot:/app/syncbot + +volumes: + syncbot-db: diff --git a/docs/API_REFERENCE.md b/docs/API_REFERENCE.md new file mode 100644 index 0000000..4d94594 --- /dev/null +++ b/docs/API_REFERENCE.md @@ -0,0 +1,30 @@ +# API Reference + +## HTTP Endpoints (API Gateway) + +All endpoints are served by a single Lambda function. Slack sends requests to the `/slack/*` URLs after you configure the app. The `/api/federation/*` endpoints handle cross-instance communication for external connections. + +| Method | Path | Purpose | +|--------|------|---------| +| `POST` | `/slack/events` | Receives all Slack events (messages, actions, view submissions) and slash commands | +| `GET` | `/slack/install` | OAuth install page — redirects the user to Slack's authorization screen | +| `GET` | `/slack/oauth_redirect` | OAuth callback — Slack redirects here after the user approves the app | +| `POST` | `/api/federation/pair` | Accept an incoming external connection request | +| `POST` | `/api/federation/message` | Receive a forwarded message from a connected instance; resolves `@` mentions and `#` channel references locally before posting | +| `POST` | `/api/federation/message/edit` | Receive a message edit from a connected instance; applies the same local mention and channel resolution before updating | +| `POST` | `/api/federation/message/delete` | Receive a message deletion from a connected instance | +| `POST` | `/api/federation/message/react` | Receive a reaction from a connected instance | +| `POST` | `/api/federation/users` | Exchange user directory with a connected instance | +| `GET` | `/api/federation/ping` | Health check for connected instances | + +## Subscribed Slack Events + +| Event | Handler | Description | +|-------|---------|-------------| +| `app_home_opened` | `handle_app_home_opened` | Publishes the Home tab with workspace groups, channel syncs, and user matching. | +| `member_joined_channel` | `handle_member_joined_channel` | Detects when SyncBot is added to an unconfigured channel; posts a message and leaves. | +| `message.channels` / `message.groups` | `respond_to_message_event` | Fires on new messages, edits, deletes, and file shares in public/private channels. Dispatches to sub-handlers for new posts, thread replies, edits, deletes, and reactions. | +| `reaction_added` / `reaction_removed` | `_handle_reaction` | Syncs emoji reactions to the corresponding message in all target channels. | +| `team_join` | `handle_team_join` | Fires when a new user joins a connected workspace. Adds the user to the directory and re-checks unmatched user mappings. | +| `tokens_revoked` | `handle_tokens_revoked` | Handles workspace uninstall — soft-deletes workspace data and notifies group members. | +| `user_profile_changed` | `handle_user_profile_changed` | Detects display name or email changes and updates the user directory and mappings. | diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md new file mode 100644 index 0000000..a395926 --- /dev/null +++ b/docs/ARCHITECTURE.md @@ -0,0 +1,147 @@ +# Architecture + +## Module Overview + +SyncBot is organized into five top-level packages inside `syncbot/`: + +| Package | Responsibility | +|---------|----------------| +| `handlers/` | Slack event and action handlers (messages, groups, channel sync, users, tokens, federation UI, backup/restore, data migration) | +| `builders/` | Slack UI construction — Home tab, modals, and forms | +| `helpers/` | Business logic, Slack API wrappers, encryption, file handling, user matching, caching, export/import (backup dump/restore, migration build/import) | +| `federation/` | Cross-instance sync — Ed25519 signing/verification, HTTP client, API endpoint handlers, pair payload (optional team_id/workspace_name for Instance A detection) (opt-in) | +| `db/` | SQLAlchemy engine, session management, `DbManager` CRUD helper, ORM models | +| `slack/` | Block Kit abstractions — action/callback ID constants, form definitions, ORM elements | + +Top-level modules: `app.py` (entry point), `routing.py` (event dispatcher), `constants.py` (env-var names), `logger.py` (structured logging + metrics). + +## Message Sync Flow + +When a user posts a message in a synced channel, SyncBot replicates it to every other channel in the Sync group: + +```mermaid +sequenceDiagram + participant U as User (Workspace A) + participant S as Slack API + participant AG as API Gateway + participant L as Lambda (SyncBot) + participant DB as RDS + participant SB as Slack API (Workspace B) + + U->>S: Posts message in #general + S->>AG: POST /slack/events + AG->>L: Proxy event + L->>L: Assign correlation ID + L->>L: Acknowledge (ack) + L->>DB: Look up sync group for channel + DB-->>L: SyncChannel + Workspace records + + alt Message has images or files (streamed with size cap) + L->>S: Download file via URL + S-->>L: File bytes (streaming) + L->>SB: files_upload_v2 (direct upload to each target channel) + end + + L->>S: users.info (resolve sender) + S-->>L: display_name, avatar URL + + loop For each target channel + L->>L: Re-map @mentions (cached user matching) + L->>L: Resolve #channel refs (native if synced, else archive URL) + L->>SB: chat.postMessage (as sender) + SB-->>L: ts (timestamp) + L->>DB: Save PostMeta record + end + + L->>L: Emit metrics (messages_synced) + L-->>AG: 200 OK + AG-->>S: 200 OK +``` + +The same pattern applies to edits (`chat.update`), deletes (`chat.delete`), thread replies (with `thread_ts`), and reactions (threaded reply with emoji attribution). + +For **federation**, the receiving instance resolves `@` mentions and `#` channel references locally before `chat.postMessage` / `chat.update`: mapped users become native `<@U>` tags, channels that are part of the same sync become native `<#C>` tags, and other channels keep the archive links sent by the origin instance. + +## AWS Infrastructure + +How to deploy or update this stack (guided script, `sam`, GitHub Actions) is documented in **[DEPLOYMENT.md](DEPLOYMENT.md)**. The diagram below reflects the **reference** SAM template (`infra/aws/template.yaml`). + +```mermaid +flowchart TB + subgraph Slack["Slack Platform"] + WA["Workspace A"] + WB["Workspace B"] + end + + subgraph AWS["AWS Account"] + subgraph APIGW["API Gateway"] + EP["/slack/events
/slack/install
/slack/oauth_redirect
/api/federation/*"] + end + + subgraph Lambda["Lambda Function"] + APP["app.py → routing.py"] + HAND["handlers/"] + BUILD["builders/"] + HELP["helpers/"] + FED["federation/"] + end + + subgraph Database["RDS PostgreSQL or MySQL"] + T1["workspaces"] + T2["workspace_groups"] + T2a["workspace_group_members"] + T3["syncs"] + T4["sync_channels"] + T5["post_meta"] + T6["user_directory"] + T7["user_mappings"] + T8["federated_workspaces"] + end + + subgraph Monitoring["CloudWatch"] + CW["Alarms:
Lambda Errors
Throttles
Duration
API 5xx"] + LG["Logs:
Structured JSON
Correlation IDs
Metrics"] + end + + EB["EventBridge
(keep-warm every 5 min)"] + end + + WA & WB <-->|Events & API calls| EP + EP --> APP + APP --> HAND + HAND --> HELP + HAND --> BUILD + HELP --> FED + HELP -->|SQLAlchemy
QueuePool + retry| Database + EB -->|ScheduleV2| Lambda + Lambda -.->|logs & metrics| Monitoring +``` + +All infrastructure is defined in `infra/aws/template.yaml` (AWS SAM). Dashed lines indicate resources that are conditionally created — when `Existing*` parameters are set, those resources are skipped. + +## Security & Hardening + +| Layer | Protection | +|-------|------------| +| **Input** | File count caps (20), mention caps (50), federation user caps (5,000), federation body size limit (1 MB), `_sanitize_text` on form input | +| **Downloads** | Streaming with 30s timeout, 100 MB size cap, 8 KB chunks — prevents unbounded memory/disk usage | +| **Encryption** | Bot tokens encrypted at rest with Fernet (PBKDF2-derived key, cached to avoid repeated 600K iterations) | +| **Database** | `pool_pre_ping=True` for stale connection detection, retry decorator on all operations, `dispose()` only after all retries exhausted | +| **Slack API** | `slack_retry` decorator with exponential backoff, `Retry-After` header support, user profile caching | +| **Network** | RDS SSL/TLS enforcement, API Gateway throttling (20 burst / 10 sustained), federation HMAC-SHA256 signing with 5-minute replay window | +| **Authorization** | Admin/owner checks on all configuration actions, configurable via `REQUIRE_ADMIN` | + +## Performance & Cost (Home and User Mapping Refresh) + +To keep RDS and Slack API usage low when admins use the **Refresh** button on the Home tab or User Mapping screen: + +- **Content hash** — A minimal set of DB queries computes a hash of the data that drives the view (groups, members, syncs, pending invites; for User Mapping, mapping ids and methods). If the hash matches the last full refresh, the app skips expensive work. +- **Cached built blocks** — After a full refresh, the built Block Kit payload is cached (keyed by workspace and user). When the hash matches, the app re-publishes that cached view with one `views.publish` instead of re-running all DB and Slack calls. +- **60-second cooldown** — If the user clicks Refresh again within 60 seconds and the hash is unchanged, the app re-publishes the cached view with a message: "No new data. Wait __ seconds before refreshing again." (seconds remaining from the last refresh). This avoids redundant full refreshes from repeated clicks. +- **Request-scoped caching** — Within a single Lambda invocation, `get_workspace_by_id` and `get_admin_ids` use the request `context` as a cache so repeated lookups for the same workspace or admin list do not hit the DB or Slack again. The same context is passed through all "push refresh" paths (e.g. when one workspace publishes a channel and other workspaces' Home tabs are updated), so those updates share the cache and stay lightweight. + +## Backup, Restore, and Data Migration + +- **Full-instance backup** — All tables are dumped as plain JSON (no compression). The payload includes `version`, `exported_at`, `encryption_key_hash` (SHA-256 of `TOKEN_ENCRYPTION_KEY`), and `hmac` (HMAC-SHA256 over canonical JSON). Restore inserts rows in FK order; it is intended for an empty or fresh database (e.g. after an AWS rebuild). On HMAC or encryption-key mismatch, the UI warns but allows proceeding. After restore, Home tab caches (`home_tab_hash`, `home_tab_blocks`) are invalidated for all restored workspaces. +- **Data migration (workspace-scoped)** — Export produces a JSON file with syncs, sync channels, post meta, user directory, and user mappings keyed by stable identifiers (team_id, sync title, channel_id). The export can include `source_instance` (webhook_url, instance_id, public_key, one-time connection code) so import on the new instance can establish the federation connection and then import in one step. The payload is signed with the instance Ed25519 key; import verifies the signature and warns (but does not block) on mismatch. Import uses replace mode: existing SyncChannels and PostMeta for that workspace in the federated group are removed, then data from the file is created. User mappings are imported where both source and target workspace exist on the new instance. After import, Home tab caches for that workspace are invalidated. +- **Instance A detection** — When instance B connects to A via federation, B can send optional `team_id` and `workspace_name` in the pair request. A stores them on the `federated_workspaces` row (`primary_team_id`, `primary_workspace_name`) and, if a local workspace with that `team_id` exists, soft-deletes it so the only representation of that workspace on A is the federated connection. diff --git a/docs/BACKUP_AND_MIGRATION.md b/docs/BACKUP_AND_MIGRATION.md new file mode 100644 index 0000000..ce9de9b --- /dev/null +++ b/docs/BACKUP_AND_MIGRATION.md @@ -0,0 +1,29 @@ +# Backup, Restore, and Data Migration + +## Full-Instance Backup and Restore + +**`PRIMARY_WORKSPACE`** must be set to a Slack Team ID for backup/restore to be available. When set, the **Backup/Restore** button is only shown in that workspace. When unset, backup/restore is hidden everywhere. + +Use **Backup/Restore** (Home tab, next to Refresh) to: + +- **Download backup** — Generates a JSON file containing all tables (workspaces, groups, syncs, channels, post meta, user directory, user mappings, federation, instance keys). The file is sent to your DM. Backup includes an HMAC for integrity and a hash of the encryption key. **Use the same `TOKEN_ENCRYPTION_KEY` on the target instance** so restored bot tokens decrypt; otherwise workspaces must reinstall the app to re-authorize. +- **Restore from backup** — Paste the backup JSON in the modal and submit. Restore is intended for an **empty or fresh database** (e.g. after an AWS rebuild). If the encryption key hash or HMAC does not match, you will see a warning and can still proceed (e.g. if you edited the file on purpose). + +After restore, Home tab caches are cleared so the next Refresh shows current data. + +## Reset Database + +Setting **`ENABLE_DB_RESET=true`** (with **`PRIMARY_WORKSPACE`** matching the current workspace) shows a **Reset Database** button on the Home tab. This is an advanced/destructive feature -- it drops and reinitializes the entire database. The deploy scripts do not prompt for it; set it manually via your provider's env/secret configuration (e.g. AWS CloudFormation `EnableDbReset` parameter, GCP Terraform `enable_db_reset` variable, or GitHub Actions `ENABLE_DB_RESET` variable). + +## Workspace Data Migration (Federation) + +When **External Connections** is enabled, **Data Migration** (in that section) lets you: + +- **Export** — Download a workspace-scoped JSON file (syncs, sync channels, post meta, user directory, user mappings) plus an optional one-time connection code so the new instance can connect to the source in one step. The file is signed (Ed25519) for tampering detection. +- **Import** — Paste a migration file, then submit. If the file includes a connection payload and you are not yet connected, the app establishes the federation connection and creates the group, then imports. Existing sync channels for that workspace in the federated group are **replaced** (replace mode). User mappings are imported where both workspaces exist on the new instance. If the signature check fails, a warning is shown but you can still proceed. + +After import, Home tab and sync-list caches for that workspace are cleared. + +### Instance A Behavior + +When a workspace that used to be on Instance A connects to A from a new instance (B) via federation and sends its `team_id`, A soft-deletes the matching local workspace row so only the federated connection represents that workspace. See [ARCHITECTURE.md](ARCHITECTURE.md) for details. diff --git a/docs/DEPLOYMENT.md b/docs/DEPLOYMENT.md new file mode 100644 index 0000000..cf02d2e --- /dev/null +++ b/docs/DEPLOYMENT.md @@ -0,0 +1,290 @@ +# Deployment Guide + +This guide explains **what the guided deploy scripts do**, how to perform the **same steps manually** on **AWS** or **GCP**, and how **GitHub Actions** fits in. For the runtime environment variables the app expects in any cloud, see [INFRA_CONTRACT.md](INFRA_CONTRACT.md). + +**Runtime baseline:** Python 3.12 — keep `pyproject.toml`, `syncbot/requirements.txt`, Lambda/Cloud Run runtimes, and CI aligned. + +--- + +## Quick start: root launcher + +From the **repository root**: + +| OS | Command | +|----|---------| +| macOS / Linux | `./deploy.sh` | +| Windows (PowerShell) | `.\deploy.ps1` | + +The launcher discovers `infra//scripts/deploy.sh`, shows a numbered menu, and runs the script you pick. + +**Non-interactive:** `./deploy.sh aws`, `./deploy.sh gcp`, `./deploy.sh 1` (same for `deploy.ps1`). + +**Windows:** `deploy.ps1` requires **Git Bash** or **WSL** with bash, then runs the same `infra/.../deploy.sh` as macOS/Linux. Alternatively install [Git for Windows](https://git-scm.com/download/win) or [WSL](https://learn.microsoft.com/windows/wsl/install) and run `./deploy.sh` from Git Bash or a WSL shell. + +**Prerequisites** (short list in the root [README](../README.md); full detail below): + +- **AWS path:** AWS CLI v2, SAM CLI, Docker (`sam build --use-container`), Python 3 (`python3`), **`curl`** (Slack manifest API). **Optional:** `gh` (GitHub Actions setup). The script prints a CLI status line per tool (✓ / !) and Slack doc links; if `gh` is missing, it asks whether to continue. +- **GCP path:** Terraform, `gcloud`, Python 3, **`curl`**. **Optional:** `gh` — same behavior as AWS. + +**Slack install error `invalid_scope` / “Invalid permissions requested”:** The OAuth authorize URL is built from **`SLACK_BOT_SCOPES`** and **`SLACK_USER_SCOPES`** in your deployed app (Lambda / Cloud Run). They must **exactly match** the scopes on your Slack app (`slack-manifest.json` → **OAuth & Permissions** after manifest update) and `BOT_SCOPES` / `USER_SCOPES` in `syncbot/slack_manifest_scopes.py`. SAM and GCP Terraform defaults include both bot and user scope strings; if your environment has **stale** overrides, redeploy with parameters matching the manifest or update the Slack app to match. On GCP, `slack_user_scopes` must stay aligned with `oauth_config.scopes.user`. **Renames (older stacks):** `SLACK_SCOPES` → `SLACK_BOT_SCOPES`; SAM `SlackOauthScopes` → `SlackOauthBotScopes`; SAM `SlackUserOauthScopes` → `SlackOauthUserScopes` (`SLACK_USER_SCOPES` unchanged). + +--- + +## What the deploy scripts do + +### Root: `deploy.sh` / `deploy.ps1` + +- Scans `infra/*/scripts/deploy.sh` and lists providers (e.g. **aws**, **gcp**). +- Runs the selected provider script in Bash. +- **`./deploy.sh` (macOS / Linux):** Invokes `bash` with the chosen `infra//scripts/deploy.sh`. +- **`.\deploy.ps1` (Windows):** Verifies **Git Bash** or **WSL** bash is available (shows which one will be used), then runs the same `deploy.sh` path. There are **no** `deploy.ps1` files under `infra/` — only the repo-root launcher uses PowerShell. Provider prerequisite checks (AWS/GCP tools, optional `gh`, Slack links) run **inside** the bash `deploy.sh` scripts. + +### AWS: `infra/aws/scripts/deploy.sh` + +Runs from repo root (or via `./deploy.sh` → **aws**). It: + +1. **Prerequisites** — Verifies `aws`, `sam`, `docker`, `python3`, `curl` are on `PATH` (with install hints). Prints a status matrix; if optional `gh` is missing, shows install hints and asks whether to continue. Prints Slack app / API token / manifest API links. +2. **AWS auth** — Checks credentials; suggests `aws login`, SSO, or `aws configure` as appropriate. +3. **Bootstrap probe** — Reads bootstrap stack outputs if the stack exists (for suggested stack names and later CI/CD). Full **bootstrap** create/sync runs only if you select it in **Deploy Tasks** (see below). +4. **App stack identity** — Prompts for stage (`test`/`prod`) and stack name; detects an existing CloudFormation stack for update. +5. **Deploy Tasks** — Multi-select menu (comma-separated, default all): **Bootstrap** (create/sync bootstrap stack; respects `SYNCBOT_SKIP_BOOTSTRAP_SYNC=1` for sync), **Build/Deploy** (full config + SAM), **CI/CD** (`gh` / GitHub Actions), **Slack API**, **Backup Secrets** (DR plaintext echo). Omitting **Build/Deploy** requires an existing stack for tasks that need live outputs. +6. **Configuration** (if Build/Deploy selected) — **Database source** (stack-managed RDS vs existing RDS host) and **engine** (MySQL vs PostgreSQL). **Slack app credentials** (signing secret, client secret, client ID). **Existing database host** mode: RDS endpoint, admin user/password, **public vs private** network mode, and for **private** mode: subnet IDs and Lambda security group (with optional auto-detect and **connectivity preflight**). **New RDS in stack** mode: summarizes auto-generated DB users and prompts for **DatabaseSchema**. Optional **token encryption** recovery override, **log level** (numbered list `1`–`5` with `Choose level [N]:`, default from prior stack or **INFO**), **deploy summary**, then **SAM build** (`--use-container`) and **sam deploy**. +7. **Post-deploy** — According to selected tasks: stack outputs, `slack-manifest_.json`, Slack API, **`gh`** setup, deploy receipt under `deploy-receipts/` (gitignored), and DR backup lines. + +### GCP: `infra/gcp/scripts/deploy.sh` + +Runs from repo root (or `./deploy.sh` → **gcp**). It: + +1. Verifies **Terraform**, **gcloud**, **python3**, **curl**; optional **gh** handling (same as AWS). +2. Guides **auth** (`gcloud auth login` plus `gcloud auth application-default login`; quota project as needed). +3. **Project / stage / existing service** — Prompts for project, region, stage; can detect existing Cloud Run for defaults. +4. **Deploy Tasks** — Multi-select menu (comma-separated, default all): **Build/Deploy** (full Terraform flow), **CI/CD**, **Slack API**, **Backup Secrets**. Skipping **Build/Deploy** requires existing Terraform state/outputs for tasks that need them. +5. **Terraform** (if Build/Deploy selected) — Prompts for DB mode, `cloud_run_image` (required), log level, etc.; `terraform init` / `plan` / `apply` in `infra/gcp` (no separate y/n gates on plan/apply). +6. **Post-deploy** — According to selected tasks: manifest, Slack API, deploy receipt, **`gh`**, `print-bootstrap-outputs.sh`, DR backup lines. + +See [infra/gcp/README.md](../infra/gcp/README.md) for Terraform variables and outputs. + +--- + +## Fork-First model (recommended for forks) + +**Branch roles** (see [CONTRIBUTING.md](../CONTRIBUTING.md)): use **`main`** to track upstream and merge contributions; on your fork, use **`test`** and **`prod`** for automated deploys (CI runs on push to those branches). + +1. Keep `syncbot/` provider-neutral; use only env vars from [INFRA_CONTRACT.md](INFRA_CONTRACT.md). +2. Put provider code in `infra//` and `.github/workflows/deploy-.yml`. +3. Prefer the AWS layout as reference; treat other providers as swappable scaffolds. + +--- + +## Provider selection (CI) + +| Provider | Infra | CI workflow | Default | +|----------|-------|-------------|---------| +| **AWS** | `infra/aws/` | `.github/workflows/deploy-aws.yml` | Yes | +| **GCP** | `infra/gcp/` | `.github/workflows/deploy-gcp.yml` | Opt-in | + +- **AWS only:** Do not set `DEPLOY_TARGET=gcp` (or set it to something other than `gcp`). +- **GCP only:** Set repository variable **`DEPLOY_TARGET`** = **`gcp`**, complete GCP bootstrap + WIF, and disable or skip the AWS workflow so only `deploy-gcp.yml` runs. + +--- + +## Database backends + +The app supports **MySQL** (default), **PostgreSQL**, and **SQLite**. Schema changes are applied at startup via Alembic (`alembic upgrade head`). + +- **AWS:** Choose engine in the deploy script or pass `DatabaseEngine=mysql` / `postgresql` to `sam deploy`. +- **Contract:** [INFRA_CONTRACT.md](INFRA_CONTRACT.md) — `DATABASE_BACKEND`, `DATABASE_URL` or host/user/password/schema. + +--- + +## AWS — manual steps (no helper script) + +Use this when you already know SAM/CloudFormation or are debugging. + +### 1. One-time bootstrap + +**Prerequisites:** AWS CLI, SAM CLI (for later app deploy). + +```bash +aws cloudformation deploy \ + --template-file infra/aws/template.bootstrap.yaml \ + --stack-name syncbot-bootstrap \ + --parameter-overrides \ + GitHubRepository=YOUR_GITHUB_OWNER/YOUR_REPO \ + --capabilities CAPABILITY_NAMED_IAM \ + --region us-east-2 +``` + +Optional: `CreateOIDCProvider=false` if the GitHub OIDC provider already exists. + +**Outputs:** + +```bash +./infra/aws/scripts/print-bootstrap-outputs.sh +``` + +Map **GitHubDeployRoleArn** → `AWS_ROLE_TO_ASSUME`, **DeploymentBucketName** → `AWS_S3_BUCKET`, **BootstrapRegion** → `AWS_REGION`. + +### 2. Build and deploy the app stack + +```bash +sam build -t infra/aws/template.yaml --use-container +sam deploy \ + -t .aws-sam/build/template.yaml \ + --stack-name syncbot-test \ + --s3-bucket YOUR_DEPLOYMENT_BUCKET_NAME \ + --capabilities CAPABILITY_IAM \ + --region us-east-2 \ + --parameter-overrides \ + Stage=test \ + SlackSigningSecret=... \ + SlackClientID=... \ + SlackClientSecret=... \ + SlackOauthBotScopes=... \ + SlackOauthUserScopes=... \ + DatabaseEngine=mysql \ + ... +``` + +Use **`sam deploy --guided`** the first time if you prefer prompts. For **existing RDS**, set `ExistingDatabaseHost`, `ExistingDatabaseAdminUser`, `ExistingDatabaseAdminPassword`, and for **private** DBs also `ExistingDatabaseNetworkMode=private`, `ExistingDatabaseSubnetIdsCsv`, `ExistingDatabaseLambdaSecurityGroupId`. Omit `ExistingDatabaseHost` to create a **new** RDS in the stack. + +**samconfig:** Predefined profiles in `samconfig.toml` (`test-new-rds`, `test-existing-rds`, etc.) — adjust placeholders before use. + +**Token key:** The stack can auto-generate `TOKEN_ENCRYPTION_KEY` in Secrets Manager. Back it up after first deploy. Optional: `TokenEncryptionKeyOverride`, `ExistingTokenEncryptionKeySecretArn` for recovery. + +### 3. GitHub Actions (AWS) + +Workflow: `.github/workflows/deploy-aws.yml` (runs on push to `test`/`prod` when not using GCP). + +Configure **repository** variables: `AWS_ROLE_TO_ASSUME`, `AWS_S3_BUCKET`, `AWS_REGION`. + +`AWS_S3_BUCKET` is the bootstrap **SAM deploy artifact** bucket (`DeploymentBucketName`): CI uses it for `sam deploy --s3-bucket` (Lambda package uploads) only. It is **not** for Slack file hosting or other app media. The guided deploy script resolves the target repo from **git remotes** (origin, upstream, then others): if your fork and upstream differ, it asks which `owner/repo` should receive variables, then passes `-R owner/repo` to `gh` so writes go there (not whatever `gh` infers from context alone). + +Configure **per-environment** (`test` / `prod`) variables and secrets so they match your stack — especially if you use **existing RDS** or **private** networking: + +| Type | Name | Notes | +|------|------|--------| +| Var | `AWS_STACK_NAME` | CloudFormation stack name | +| Var | `STAGE_NAME` | `test` or `prod` | +| Var | `DATABASE_SCHEMA` | e.g. `syncbot_test` | +| Var | `LOG_LEVEL` | Optional. `DEBUG`, `INFO`, `WARNING`, `ERROR`, or `CRITICAL`. Passed to SAM as `LogLevel`; defaults to `INFO` in the workflow when unset. | +| Var | `SLACK_CLIENT_ID` | From Slack app | +| Var | `DATABASE_ENGINE` | `mysql` or `postgresql` (workflow defaults to `mysql` if unset) | +| Var | `EXISTING_DATABASE_HOST` | Empty for **new** RDS in stack | +| Var | `EXISTING_DATABASE_ADMIN_USER` | When using existing host | +| Var | `EXISTING_DATABASE_NETWORK_MODE` | `public` or `private` | +| Var | `EXISTING_DATABASE_SUBNET_IDS_CSV` | **Private** mode: comma-separated subnet IDs (no spaces) | +| Var | `EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID` | **Private** mode: Lambda ENI security group | +| Secret | `SLACK_SIGNING_SECRET`, `SLACK_CLIENT_SECRET` | | +| Secret | `EXISTING_DATABASE_ADMIN_PASSWORD` | When `EXISTING_DATABASE_HOST` is set | +| Secret | `TOKEN_ENCRYPTION_KEY_OVERRIDE` | Optional DR only | + +The interactive deploy script can set these via `gh` when you opt in. Re-run that step after changing DB mode or engine so CI stays aligned. + +**Dependency hygiene:** The workflow runs `pip-audit` on `syncbot/requirements.txt` and `infra/aws/db_setup/requirements.txt`. After changing `pyproject.toml`: + +```bash +poetry lock +poetry export --only main --format requirements.txt --without-hashes --output syncbot/requirements.txt +``` + +### 4. Ongoing local deploys (least privilege) + +Assume the bootstrap **GitHubDeployRole** (or equivalent) and run `sam build` / `sam deploy` as in step 2. + +--- + +## GCP — manual steps + +### 1. Terraform bootstrap + +From `infra/gcp` (or repo root with paths adjusted): + +```bash +terraform init +terraform plan -var="project_id=YOUR_PROJECT_ID" -var="stage=test" +terraform apply -var="project_id=YOUR_PROJECT_ID" -var="stage=test" +``` + +Set Secret Manager values for Slack/DB as in [infra/gcp/README.md](../infra/gcp/README.md). Set **`cloud_run_image`** after building and pushing the container. Capture outputs: service URL, region, project, Artifact Registry, deploy service account. + +```bash +./infra/gcp/scripts/print-bootstrap-outputs.sh +``` + +**DR:** Optional `token_encryption_key_override` if you must preserve existing encrypted tokens. + +### 2. GitHub Actions (GCP) + +1. Configure [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation) for GitHub → deploy service account. +2. Set **`DEPLOY_TARGET=gcp`** at repo level so `deploy-gcp.yml` runs and `deploy-aws.yml` is skipped. +3. Set variables: `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_WORKLOAD_IDENTITY_PROVIDER`, `GCP_SERVICE_ACCOUNT`, etc. + + The interactive `infra/gcp/scripts/deploy.sh` uses the same GitHub `owner/repo` selection as the AWS script (based on git remotes when fork and upstream differ). + +**Note:** `.github/workflows/deploy-gcp.yml` is intentionally configured to fail until real CI steps are implemented (WIF auth, image build/push, deploy). Keep using `infra/gcp/scripts/deploy.sh` for interactive deploys until CI is fully wired. + +### 3. Ongoing deploys + +Build and push an image to Artifact Registry, then `gcloud run deploy` or `terraform apply` with updated `cloud_run_image`. + +--- + +## Using an existing RDS host (AWS) + +When **ExistingDatabaseHost** is set, the template **does not** create VPC/RDS; a custom resource creates the schema and `syncbot_user_` with a generated app password in Secrets Manager. + +- **Public:** Lambda is not in your VPC; RDS must be reachable on the Internet on port **3306** or **5432**. +- **Private:** Lambda uses `ExistingDatabaseSubnetIdsCsv` and `ExistingDatabaseLambdaSecurityGroupId`; DB security group must allow the Lambda SG; subnets need **NAT** egress for Slack API calls. + +See also [Sharing infrastructure across apps](#sharing-infrastructure-across-apps-aws) below. + +--- + +## Swapping providers + +1. Keep [INFRA_CONTRACT.md](INFRA_CONTRACT.md) satisfied. +2. Disable the old provider’s workflow; set `DEPLOY_TARGET` if using GCP. +3. Bootstrap the new provider; reconfigure GitHub and Slack URLs. + +--- + +## Helper scripts + +| Script | Purpose | +|--------|---------| +| `infra/aws/scripts/print-bootstrap-outputs.sh` | Bootstrap stack outputs → suggested GitHub vars | +| `infra/aws/scripts/deploy.sh` | Interactive AWS deploy (see [What the deploy scripts do](#what-the-deploy-scripts-do)) | +| `infra/gcp/scripts/print-bootstrap-outputs.sh` | Terraform outputs → suggested GitHub vars | +| `infra/gcp/scripts/deploy.sh` | Interactive GCP deploy | + +--- + +## Security summary + +- **Bootstrap** runs once with elevated credentials; creates deploy identity + artifact storage. +- **GitHub:** Short-lived **AWS OIDC** or **GCP WIF** — no long-lived cloud API keys in repos for deploy. +- **Prod:** Use GitHub environment protection rules as needed. + +--- + +## Database schema (Alembic) + +Schema lives under `syncbot/db/alembic/`. On startup the app runs **`alembic upgrade head`**. + +--- + +## Post-deploy: Slack deferred modal flows (manual smoke test) + +After deploying a build that changes Slack listener wiring, verify **in the deployed workspace** (not only local dev) that modals using custom interaction responses still work. These flows rely on `view_submission` acks (`response_action`: `update`, `errors`, or `push`) being returned in the **first** Lambda response: + +1. **Sync Channel (publish)** — Open **Sync Channel**, choose sync mode, press **Next**; confirm step 2 (channel picker) appears. Submit with an invalid state to confirm field errors if applicable. +2. **Backup / Restore** — Open Backup/Restore; try restore validation (e.g. missing file) and, if possible, the integrity-warning confirmation path (`push`). +3. **Data migration** (if federation enabled) — Same style of checks for import validation and confirmation. +4. **Optional** — Trigger a Home tab action that opens a modal via **`views_open`** (uses `trigger_id`) after a cold start to spot-check latency. + +--- + +## Sharing infrastructure across apps (AWS) + +Reuse one RDS with **different `DatabaseSchema`** per app/environment; set **ExistingDatabaseHost** and distinct schemas. API Gateway and Lambda remain per stack. diff --git a/docs/DEVELOPMENT.md b/docs/DEVELOPMENT.md new file mode 100644 index 0000000..b2affa4 --- /dev/null +++ b/docs/DEVELOPMENT.md @@ -0,0 +1,74 @@ +# Development Guide + +How to run SyncBot locally (Dev Container, Docker Compose, native Python) and manage dependencies. For **cloud deploy** and CI/CD, see [DEPLOYMENT.md](DEPLOYMENT.md). For runtime env vars in any environment, see [INFRA_CONTRACT.md](INFRA_CONTRACT.md). + +## Branching (upstream vs downstream) + +The **upstream** repository ([F3Nation-Community/syncbot](https://github.com/F3Nation-Community/syncbot)) is the shared codebase. Each deployment maintains its own **fork**: + +| Branch | Role | +|--------|------| +| **`main`** | Tracks upstream. Use it to merge PRs and to **sync with the upstream repository** (`git pull upstream main`, etc.). | +| **`test`** / **`prod`** | On your fork, use these for **deployments**: GitHub Actions deploy workflows run on **push** to `test` and `prod` (see [DEPLOYMENT.md](DEPLOYMENT.md)). | + +Typical flow: develop on a feature branch → open a PR to **`main`** → merge → when ready to deploy, merge **`main`** into **`test`** or **`prod`** on your fork. + +## Local development + +### Dev Container (recommended) + +**Needs:** [Docker Desktop](https://www.docker.com/products/docker-desktop/) (or Docker Engine on Linux) + [Dev Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) in VS Code. + +1. `cp .env.example .env` and set `SLACK_BOT_TOKEN` (`xoxb-...`). +2. **Dev Containers: Reopen in Container** — Python, MySQL, and deps run inside the container. +3. `cd syncbot && python app.py` → app on **port 3000** (forwarded). +4. Expose to Slack with **cloudflared** or **ngrok** from the host; set Slack **Event Subscriptions** / **Interactivity** URLs to the public URL. + +Optional **SQLite**: in `.env` set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:////app/syncbot/syncbot.db`. + +### Docker Compose (no Dev Container) + +```bash +cp .env.example .env # set SLACK_BOT_TOKEN +docker compose up --build +``` + +App on port **3000**; restart the `app` service after code changes. + +### Native Python + +**Needs:** Python 3.12+, Poetry. Run MySQL locally (e.g. `docker run ... mysql:8`) or SQLite. See [`.env.example`](../.env.example) and [INFRA_CONTRACT.md](INFRA_CONTRACT.md). + +## Configuration reference + +- **[`.env.example`](../.env.example)** — local env vars with comments. +- **[INFRA_CONTRACT.md](INFRA_CONTRACT.md)** — runtime contract for any cloud (DB, Slack, OAuth, production vs local). + +## Project layout + +``` +syncbot/ +├── syncbot/ # App (app.py); slack_manifest_scopes.py = bot/user OAuth scope lists (manifest + SLACK_BOT_SCOPES / SLACK_USER_SCOPES) +├── syncbot/db/alembic/ # Migrations (bundled with app for Lambda) +├── tests/ +├── docs/ +├── infra/aws/ # SAM, bootstrap stack +├── infra/gcp/ # Terraform +├── deploy.sh # Root launcher (macOS / Linux / Git Bash) +├── deploy.ps1 # Windows launcher → Git Bash or WSL → infra/.../deploy.sh +├── slack-manifest.json +└── docker-compose.yml +``` + +## Dependency management + +After `poetry add` / `poetry update`, regenerate the pinned file used by the Docker image and **`pip-audit`** in CI so it matches `poetry.lock`: + +```bash +poetry self add poetry-plugin-export # Poetry 2.x; once per Poetry install +poetry export -f requirements.txt --without-hashes -o syncbot/requirements.txt +``` + +The root **`./deploy.sh`** may run `poetry update` and regenerate `syncbot/requirements.txt` when Poetry is on your `PATH` (see [DEPLOYMENT.md](DEPLOYMENT.md)). + +CI runs `pip-audit` on `syncbot/requirements.txt` and `infra/aws/db_setup/requirements.txt` (see [.github/workflows/ci.yml](../.github/workflows/ci.yml)). diff --git a/docs/INFRA_CONTRACT.md b/docs/INFRA_CONTRACT.md new file mode 100644 index 0000000..216dabe --- /dev/null +++ b/docs/INFRA_CONTRACT.md @@ -0,0 +1,141 @@ +# Infrastructure Contract (Provider-Agnostic) + +This document defines what any infrastructure provider (AWS, GCP, Azure, etc.) must supply so SyncBot runs correctly. Forks can swap provider-specific IaC in `infra//` as long as they satisfy this contract. + +**Deploy entrypoint:** From the repo root, `./deploy.sh` (macOS/Linux, or Git Bash/WSL bash) or `.\deploy.ps1` (Windows PowerShell — finds Git Bash or WSL, then bash) runs an interactive helper that delegates to `infra//scripts/deploy.sh`. After identity/auth prompts, each provider script shows a **Deploy Tasks** menu (comma-separated numbers, default all): bootstrap (AWS only), build/deploy, CI/CD (GitHub Actions), Slack API configuration, and DR backup secret output—so operators can run subsets (e.g. CI/CD only against an existing stack) without mid-flow surprises. That flow sets Cloud/Terraform resources and runtime env vars consistent with this document. Step-by-step and manual alternatives: [DEPLOYMENT.md](DEPLOYMENT.md). + +**Schema:** The database schema is managed by **Alembic**. On startup the app runs **`alembic upgrade head`** so new and existing databases stay current with the latest migrations. + +## Runtime Environment Variables + +The application reads configuration from environment variables. Providers must inject these at runtime (e.g. Lambda env, Cloud Run env, or a compatible secret/config layer). + +## Toolchain Baseline + +- Runtime baseline: **Python 3.12**. +- Keep runtime/tooling aligned across: + - Lambda/Cloud Run runtime configuration + - CI Python version + - `pyproject.toml` Python constraint + - `syncbot/requirements.txt` deployment pins +- When dependency constraints change in `pyproject.toml`, refresh both lock and deployment requirements: + +```bash +poetry lock +poetry export --only main --format requirements.txt --without-hashes --output syncbot/requirements.txt +``` + +### Database (backend-agnostic) + +| Variable | Description | +|----------|-------------| +| `DATABASE_BACKEND` | `mysql` (default), `postgresql`, or `sqlite`. | +| `DATABASE_URL` | Full SQLAlchemy URL. When set, overrides host/user/password/schema. **Required for SQLite** (e.g. `sqlite:///path/to/syncbot.db`). For `mysql` / `postgresql`, optional if unset (legacy vars below are used). | +| `DATABASE_HOST` | Database hostname (IP or FQDN). Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | +| `DATABASE_PORT` | Optional. Defaults to **5432** for `postgresql`, **3306** for `mysql`. | +| `DATABASE_USER` | Username. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | +| `DATABASE_PASSWORD` | Password. Required when backend is `mysql` or `postgresql` and `DATABASE_URL` is unset. | +| `DATABASE_SCHEMA` | Database name (MySQL) or PostgreSQL database name (same convention as MySQL). Use alphanumeric and underscore only for PostgreSQL when the app must `CREATE DATABASE` at bootstrap. | +| `DATABASE_TLS_ENABLED` | Optional TLS toggle (`true`/`false`). Defaults to enabled outside local dev. | +| `DATABASE_SSL_CA_PATH` | Optional CA bundle path when TLS is enabled. If unset, the app uses the first existing file among common OS locations (Amazon Linux, Debian, Alpine); PostgreSQL omits `sslrootcert` when none exist so libpq uses the system trust store. | + +**SQLite (forks / local):** Set `DATABASE_BACKEND=sqlite` and `DATABASE_URL=sqlite:///path/to/file.db`. Single-writer; suitable for small teams and dev. + +**MySQL (default):** Set `DATABASE_BACKEND=mysql` (or rely on the default) and either `DATABASE_URL` (`mysql+pymysql://...`) or the four host/user/password/schema vars. The AWS SAM template parameter `DatabaseEngine=mysql` (default) matches this backend. + +**PostgreSQL:** Set `DATABASE_BACKEND=postgresql` and either `DATABASE_URL` (`postgresql+psycopg2://...`) or `DATABASE_HOST`, `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA`. Deploy-time bootstrap credentials (e.g. `ExistingDatabaseAdmin*` in AWS) are used only for one-time setup; the app reads `DATABASE_USER`, `DATABASE_PASSWORD`, `DATABASE_SCHEMA` at runtime. + +### Required in production (non–local) + +| Variable | Description | +|----------|-------------| +| `SLACK_SIGNING_SECRET` | Slack request verification (Basic Information → App Credentials). | +| `SLACK_CLIENT_ID` | Slack OAuth client ID. | +| `SLACK_CLIENT_SECRET` | Slack OAuth client secret. | +| `SLACK_BOT_SCOPES` | Comma-separated OAuth **bot** scopes. Must match `slack-manifest.json` `oauth_config.scopes.bot` and `syncbot/slack_manifest_scopes.py` `BOT_SCOPES`. | +| `SLACK_USER_SCOPES` | Comma-separated OAuth **user** scopes. Must match `oauth_config.scopes.user` and `syncbot/slack_manifest_scopes.py` `USER_SCOPES`. If this env requests scopes that are not declared on the Slack app, install fails with `invalid_scope`. | +| `TOKEN_ENCRYPTION_KEY` | **Required** in production; must be a strong, random value (e.g. 16+ characters). Providers may auto-generate it (e.g. AWS Secrets Manager). Back up the key after first deploy. In local dev you may set it manually or leave unset. | + +**Reference wiring:** AWS SAM ([`infra/aws/template.yaml`](../infra/aws/template.yaml)) maps CloudFormation parameters to Lambda env: **`SlackOauthBotScopes`** / **`SlackOauthUserScopes`** → **`SLACK_BOT_SCOPES`** / **`SLACK_USER_SCOPES`** (defaults match `BOT_SCOPES` / `USER_SCOPES`); **`LogLevel`** → **`LOG_LEVEL`**; **`RequireAdmin`** → **`REQUIRE_ADMIN`**; **`SoftDeleteRetentionDays`** → **`SOFT_DELETE_RETENTION_DAYS`**; **`SyncbotFederationEnabled`**, **`SyncbotInstanceId`**, **`SyncbotPublicUrl`** (optional override) → federation env vars; **`PrimaryWorkspace`** → **`PRIMARY_WORKSPACE`**; **`EnableDbReset`** → **`ENABLE_DB_RESET`** (boolean `true` when enabled); optional **`DatabaseTlsEnabled`** / **`DatabaseSslCaPath`** → **`DATABASE_TLS_ENABLED`** / **`DATABASE_SSL_CA_PATH`** (omit when empty so app defaults apply). **`SYNCBOT_PUBLIC_URL`** defaults to the API Gateway stage base URL unless **`SyncbotPublicUrl`** is set; stack output **`SyncBotPublicBaseUrl`** documents that base. GCP Terraform uses **`secret_slack_bot_scopes`** (Secret Manager → `SLACK_BOT_SCOPES`) and variables **`slack_user_scopes`**, **`log_level`**, **`require_admin`**, **`database_backend`**, **`database_port`**, **`soft_delete_retention_days`**, **`syncbot_federation_enabled`**, **`syncbot_instance_id`**, **`syncbot_public_url_override`**, **`primary_workspace`**, **`enable_db_reset`**, **`database_tls_enabled`**, **`database_ssl_ca_path`** for the corresponding runtime env on Cloud Run (see [infra/gcp/README.md](../infra/gcp/README.md)); **`syncbot_public_url_override`** is empty by default—set it to your service’s public HTTPS base (e.g. after first deploy) if you need **`SYNCBOT_PUBLIC_URL`** for federation. + +### Optional + +| Variable | Description | +|----------|-------------| +| `SLACK_BOT_TOKEN` | Set by OAuth flow; placeholder until first install. | +| `REQUIRE_ADMIN` | `true` (default) or `false`; restricts config to admins/owners. | +| `PRIMARY_WORKSPACE` | Slack Team ID of the primary workspace. Required for backup/restore to be visible. DB reset (if enabled) is also scoped to this workspace. | +| `ENABLE_DB_RESET` | When `true` / `1` / `yes` and `PRIMARY_WORKSPACE` matches the current workspace, shows the Reset Database button. Not prompted during deploy; set manually via infra config or GitHub Actions variable. | +| `LOCAL_DEVELOPMENT` | `true` only for local dev; disables token verification and enables dev shortcuts. | +| `LOG_LEVEL` | `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` (default `INFO`). | +| `PORT` | HTTP listen port for container entrypoint (`python app.py` / Cloud Run). Cloud Run injects this (typically `8080`); default `3000` when unset. | +| `SOFT_DELETE_RETENTION_DAYS` | Days to retain soft-deleted workspace data (default `30`). | +| `SYNCBOT_FEDERATION_ENABLED` | `true` to enable external connections (federation). | +| `SYNCBOT_INSTANCE_ID` | UUID for this instance (optional; can be auto-generated). | +| `SYNCBOT_PUBLIC_URL` | Public base URL of the app (required when federation is enabled). | + +## Platform Capabilities + +The provider must deliver: + +1. **Public HTTPS endpoint** + Slack sends events and interactivity to a single base URL. The app expects: + - `POST /slack/events` — events and actions + - `GET /slack/install` — OAuth start + - `GET /slack/oauth_redirect` — OAuth callback + - `GET /health` — liveness (JSON `{"status":"ok"}`) for keep-warm probes + Any path under `/api/federation` is used for federation when enabled. + +2. **Secret injection** + Slack and DB credentials must be available as environment variables (or equivalent) at process start. No assumption of a specific secret store; provider chooses (e.g. Lambda env, Secret Manager, Parameter Store). + +3. **Database** + **PostgreSQL / MySQL:** In non–local environments the app uses TLS by default; allow outbound TCP to the DB host (typically **5432** for PostgreSQL, **3306** for MySQL). **SQLite:** No network; the app uses a local file. Single-writer; ensure backups and file durability for production use. + +4. **Keep-warm / scheduled ping (optional but recommended)** + To avoid cold-start latency, the app supports a periodic HTTP GET to a configurable path. The provider should support a scheduled job (e.g. CloudWatch Events, Cloud Scheduler) that hits the service on an interval (e.g. 5 minutes). + +5. **Stateless execution** + The app is stateless; state lives in the configured database (PostgreSQL, MySQL, or SQLite). Horizontal scaling is supported with PostgreSQL/MySQL as long as all instances share the same DB and env; SQLite is single-writer. + +## CI Auth Model + +- **Preferred:** Short-lived federation (e.g. OIDC for AWS, Workload Identity Federation for GCP). No long-lived API keys in GitHub Secrets for deploy. +- **Bootstrap:** One-time creation of a deploy role (or service account) with least-privilege permissions for deploying the app and its resources. +- **Outputs:** Bootstrap should expose values needed for CI (see below) so users can plug them into GitHub variables. + +## Bootstrap Output Contract + +After running provider-specific bootstrap (e.g. AWS CloudFormation bootstrap stack, GCP Terraform), the following outputs should be available so users can configure GitHub Actions and/or local deploy: + +| Output key | Description | Typical use | +|------------|-------------|-------------| +| `deploy_role` | ARN or identifier of the role/identity that CI (or local) uses to deploy | GitHub variable for OIDC/WIF role-to-assume | +| `artifact_bucket` (or equivalent) | Bucket or registry where deploy artifacts (packages, images) are stored | GitHub variable; deploy step uploads here | +| `region` | Primary region for the deployment | GitHub variable (e.g. `AWS_REGION`, `GCP_REGION`) | +| `service_url` | Public base URL of the deployed app (optional at bootstrap; may come from app stack) | For Slack app configuration and docs | + +**AWS:** `artifact_bucket` is `DeploymentBucketName` in bootstrap outputs; this repo stores it as the GitHub variable `AWS_S3_BUCKET` (SAM/CI packaging for `sam deploy` only; not Slack or app media). + +Provider-specific implementations may use different names (e.g. `GitHubDeployRoleArn`, `DeploymentBucketName`) but should document the mapping to this contract. + +## Swapping Providers + +To use a different cloud or IaC stack: + +1. Keep `syncbot/` and app behavior unchanged. +2. Add or replace contents of `infra//` with templates/scripts that satisfy the contract above. + - To integrate with the repo-level launcher (`./deploy.sh` and `.\deploy.ps1`), provide `infra//scripts/deploy.sh` only. On Windows, `deploy.ps1` invokes that bash script via Git Bash or WSL; do not add a separate `deploy.ps1` under `infra/`. +3. Point CI (e.g. `.github/workflows/deploy-.yml`) at the new infra paths and provider-specific auth (OIDC, WIF, etc.). +4. Update [DEPLOYMENT.md](DEPLOYMENT.md) (or provider-specific README under `infra//`) with bootstrap and deploy steps that emit the bootstrap output contract. + +No application code changes are required when swapping infra as long as the runtime environment variables and platform capabilities are met. + +## Fork Compatibility Policy + +To keep forks easy to rebase and upstream contributions easy to merge: + +1. Keep provider-specific changes under `infra//` and `.github/workflows/deploy-.yml`. +2. Do not couple `syncbot/` application code to a cloud provider (AWS/GCP/Azure-specific SDK calls, metadata assumptions, or IAM wiring). +3. Treat this file as the source of truth for runtime env contract; if a fork adds infra behavior, map it back to this contract. +4. Upstream PRs should include only provider-neutral app changes unless a provider-specific file is explicitly being updated. diff --git a/docs/USER_GUIDE.md b/docs/USER_GUIDE.md new file mode 100644 index 0000000..1bc266d --- /dev/null +++ b/docs/USER_GUIDE.md @@ -0,0 +1,74 @@ +# SyncBot User Guide + +This guide is for **workspace admins and end users** configuring SyncBot in Slack. For **installing or hosting** the app (AWS, GCP, Docker, GitHub Actions), see **[DEPLOYMENT.md](DEPLOYMENT.md)** and the root **[README](../README.md)**. + +## Getting Started + +1. Click the install link from a desktop browser (make sure you've selected the correct workspace in the upper right) +2. Open the **SyncBot** app from the sidebar and click the **Home** tab (requires workspace admin or owner) +3. The Home tab shows everything in one view: + - **SyncBot Configuration (bottom row)** — **Refresh** and **Backup/Restore** (full-instance backup download and restore from JSON) + - **Workspace Groups** — create or join groups of workspaces that can sync channels together + - **Per-group sections** — for each group you can publish channels, manage user mapping (dedicated Home tab screen), and see/manage channel syncs inline + - **Synced Channels** — each row shows the local channel and workspace list in brackets (e.g. _[Any: Your Workspace, Other Workspace]_), with pause/resume and stop controls, synced-since date, and tracked message count + - **External Connections** *(when federation is enabled)* — Generate/Enter Connection Code and **Data Migration** (export workspace data for migration to another instance, or import a migration file) + +## Things to Know + +- Only workspace **admins and owners** can configure syncs (set `REQUIRE_ADMIN=false` to allow all users) +- Messages, threads, edits, deletes, reactions, images, videos, and GIFs are all synced +- **@mentions and #channel links** in synced messages are rewritten per target workspace: mapped users are tagged with the local Slack user, and channels that are part of the same sync are shown as native local channel links; otherwise users fall back to a code-style label and channels use a link back to the source workspace (or a code-style label if that cannot be built) +- Messages from other bots are synced; only SyncBot's own messages are filtered to prevent loops +- Existing messages are not back-filled; syncing starts from the moment a channel is linked +- Do not add SyncBot manually to channels. SyncBot adds itself when you configure a Sync. If it detects it was added to an unconfigured channel it will post a message and leave automatically +- Both public and private channels are supported + +## Workspace Groups + +Workspaces must belong to the same **group** before they can sync channels or map users. Admins can create a new group (which generates an invite code) or join an existing group by entering a code. A workspace can be in multiple groups with different combinations of other workspaces. + +## Sync Modes + +When publishing a channel inside a group, admins choose either **1-to-1** (only a specific workspace can subscribe) or **group-wide** (any group member can subscribe independently). + +## Pause / Resume / Stop + +- **Pause/Resume** — Individual channel syncs can be paused and resumed without losing configuration. Paused channels do not sync any messages, threads, or reactions. +- **Selective Stop** — When a workspace stops syncing a channel, only that workspace's history is removed. Other workspaces continue syncing uninterrupted. The published channel remains available until the original publisher unpublishes it. + +## Uninstall / Reinstall + +If a workspace uninstalls SyncBot, group memberships and syncs are paused (not deleted). Reinstalling within the retention period (default 30 days, configurable via `SOFT_DELETE_RETENTION_DAYS`) automatically restores everything. Group members are notified via DMs and channel messages. + +## User Mapping + +Users are automatically mapped across workspaces by email or display name. Admins can manually edit mappings via the User Mapping screen (scoped per group). Remote users are displayed as "Display Name (Workspace Name)" and sorted by normalized name. In synced messages, a mapped user is mentioned with a normal `@` tag in the receiving workspace; unmapped users appear as a code-style `[@Name (Workspace)]` label. Channel names that point at another synced channel in the same sync group are shown as native `#channel` links in each workspace. + +## Refresh Behavior + +The Home tab and User Mapping screens have Refresh buttons. To keep API usage low, repeated clicks with no data changes are handled lightly: a 60-second cooldown applies, and when nothing has changed the app reuses cached content and shows "No new data. Wait __ seconds before refreshing again." + +## Media Sync + +Images and videos are downloaded from the source and uploaded directly to each target channel. GIFs from the Slack GIF picker or GIPHY are synced as image blocks. + +| Source message | What appears in target workspace | +|---|---| +| Text only | Single message with text, shown under the original poster's name and avatar | +| GIF (Slack picker / GIPHY) | Single message with the GIF embedded inline via image block, under the poster's name | +| GIF + text | Single message with text and GIF together, under the poster's name | +| Photo or video only (no text) | Single file upload with `Shared by @User` (tagged if mapped, plain name otherwise) | +| Text + photo or video | Text message under the poster's name, then the file in a thread reply with `Shared by @User in this message` linking back to the text | +| Multiple files | Same as above; all files are uploaded together in a single thread reply | + +## External Connections + +*(Opt-in — set `SYNCBOT_FEDERATION_ENABLED=true` and `SYNCBOT_PUBLIC_URL` to enable)* + +Workspaces running their own SyncBot deployment can be connected via the "External Connections" section on the Home tab. One admin generates a connection code and shares it out-of-band; the other admin enters it. Messages, edits, deletes, reactions, and user matching work across instances. The receiving SyncBot instance rewrites `@` mentions and `#` channel links using the same rules as same-instance sync (native tags when mapped / synced, fallbacks otherwise). + +**Data Migration** in the same section lets you export your workspace data (syncs, channels, post meta, user directory, user mappings) for moving to another instance, or import a migration file after connecting. See [Backup and Migration](BACKUP_AND_MIGRATION.md) for details. + +## Backup / Restore + +Use **Backup/Restore** on the Home tab to download a full-instance backup (all tables as JSON) or restore from a backup file. Intended for disaster recovery (e.g. before rebuilding AWS). See [Backup and Migration](BACKUP_AND_MIGRATION.md) for details. diff --git a/infra/aws/db_setup/Makefile b/infra/aws/db_setup/Makefile new file mode 100644 index 0000000..ca40881 --- /dev/null +++ b/infra/aws/db_setup/Makefile @@ -0,0 +1,6 @@ +# SAM build: copy handler and install dependencies so pymysql is in the deployment package. +build-DbSetupFunction: + cp handler.py $(ARTIFACTS_DIR)/ + cp requirements.txt $(ARTIFACTS_DIR)/ + python -m pip install -r requirements.txt -t $(ARTIFACTS_DIR) --quiet + rm -rf $(ARTIFACTS_DIR)/bin diff --git a/infra/aws/db_setup/handler.py b/infra/aws/db_setup/handler.py new file mode 100644 index 0000000..f72a328 --- /dev/null +++ b/infra/aws/db_setup/handler.py @@ -0,0 +1,333 @@ +""" +Custom CloudFormation resource: create database and app user for SyncBot. + +Supports MySQL (port 3306) and PostgreSQL (port 5432). It can use: +- explicit admin password (existing-host mode), or +- admin password fetched from an admin secret ARN (new-RDS mode). +""" + +import json +import re +import base64 +import time +import socket + +import boto3 +import psycopg2 +import pymysql +from psycopg2 import sql as psql +from pymysql.cursors import DictCursor + +DB_CONNECT_TIMEOUT_SECONDS = 5 +DB_CONNECT_ATTEMPTS = 6 +DB_CONNECT_RETRY_SECONDS = 2 +POSTGRES_DB_CONNECT_ATTEMPTS = 5 +POSTGRES_DB_CONNECT_RETRY_SECONDS = 1 + + +# CloudFormation custom resource response helper (no cfnresponse in Lambda by default for Python 3) +def send(event, context, status, data=None, reason=None, physical_resource_id=None): + import urllib.error + import urllib.request + + pid = physical_resource_id or event.get("PhysicalResourceId") or event["LogicalResourceId"] + log_ref = getattr(context, "log_stream_name", None) or "n/a" + body = json.dumps( + { + "Status": status, + "Reason": reason or f"See CloudWatch Log Stream: {log_ref}", + "PhysicalResourceId": pid, + "StackId": event["StackId"], + "RequestId": event["RequestId"], + "LogicalResourceId": event["LogicalResourceId"], + "Data": data or {}, + } + ).encode("utf-8") + req = urllib.request.Request( + event["ResponseURL"], + data=body, + method="PUT", + headers={"Content-Type": "application/json"}, + ) + # Custom resource responses must reach CloudFormation or the stack hangs (delete/update failures). + try: + with urllib.request.urlopen(req, timeout=60) as f: + f.read() + except urllib.error.HTTPError as e: + raise RuntimeError(f"CFN response HTTP {e.code}: {e.read()!r}") from e + except urllib.error.URLError as e: + raise RuntimeError(f"CFN response URL error: {e}") from e + + +def handler(event, context): + try: + return _handler_impl(event, context) + except Exception as e: + try: + send(event, context, "FAILED", reason=f"Unhandled error: {e}") + except Exception as send_err: + raise RuntimeError( + f"Unhandled error in handler: {e}; failed to notify CloudFormation: {send_err}" + ) from e + raise + + +def _safe_ident(name: str) -> str: + if not re.match(r"^[A-Za-z_][A-Za-z0-9_]*$", name): + raise ValueError(f"Invalid identifier: {name}") + return name + + +def _handler_impl(event, context): + request_type = event.get("RequestType", "Create") + props = event.get("ResourceProperties", {}) + host = props.get("Host", "").strip() + admin_user = (props.get("AdminUser") or "").strip() + admin_password = props.get("AdminPassword") or "" + admin_secret_arn = (props.get("AdminSecretArn") or "").strip() + schema = (props.get("Schema") or "syncbot").strip() + stage = (props.get("Stage") or "test").strip() + secret_arn = (props.get("SecretArn") or "").strip() + database_engine = (props.get("DatabaseEngine") or "mysql").strip().lower() + + if request_type == "Delete": + # Must return the same PhysicalResourceId as Create; never use a placeholder. + delete_pid = event.get("PhysicalResourceId") or event["LogicalResourceId"] + send(event, context, "SUCCESS", {"Username": ""}, physical_resource_id=delete_pid) + return + + if not all([host, admin_user, schema, stage, secret_arn]): + send( + event, + context, + "FAILED", + reason="Missing Host, AdminUser, Schema, Stage, or SecretArn", + ) + return + if not admin_password and not admin_secret_arn: + send( + event, + context, + "FAILED", + reason="Missing admin credentials: set AdminPassword or AdminSecretArn", + ) + return + + app_username = f"syncbot_user_{stage}".replace("-", "_") + try: + app_password = get_secret_value(secret_arn) + except Exception as e: + send(event, context, "FAILED", reason=f"GetSecretValue failed: {e}") + return + if not admin_password: + try: + # RDS-managed master-user secrets store JSON; extract the password field. + admin_password = get_secret_value(admin_secret_arn, json_key="password") + except Exception as e: + send(event, context, "FAILED", reason=f"Get admin secret failed: {e}") + return + + try: + # Fail fast on obvious network connectivity issues before opening DB client sessions. + _assert_tcp_reachable(host, 3306 if database_engine == "mysql" else 5432) + if database_engine == "mysql": + setup_database_mysql( + host=host, + admin_user=admin_user, + admin_password=admin_password, + schema=schema, + app_username=app_username, + app_password=app_password, + ) + else: + setup_database_postgresql( + host=host, + admin_user=admin_user, + admin_password=admin_password, + schema=schema, + app_username=app_username, + app_password=app_password, + ) + except Exception as e: + send(event, context, "FAILED", reason=f"Database setup failed: {e}") + return + + send(event, context, "SUCCESS", {"Username": app_username}, reason="OK", physical_resource_id=app_username) + return {"Username": app_username} + + +def _assert_tcp_reachable(host: str, port: int) -> None: + last_exc = None + for _attempt in range(1, DB_CONNECT_ATTEMPTS + 1): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(DB_CONNECT_TIMEOUT_SECONDS) + try: + sock.connect((host, port)) + return + except Exception as exc: + last_exc = exc + time.sleep(DB_CONNECT_RETRY_SECONDS) + finally: + sock.close() + raise RuntimeError( + f"Cannot reach {host}:{port} over TCP after {DB_CONNECT_ATTEMPTS} attempts: {last_exc}" + ) + + +def get_secret_value(secret_arn: str, json_key: str | None = None) -> str: + client = boto3.client("secretsmanager") + resp = client.get_secret_value(SecretId=secret_arn) + secret_string = resp.get("SecretString") + if secret_string is None: + secret_binary = resp.get("SecretBinary") + if secret_binary is not None: + secret_string = base64.b64decode(secret_binary).decode("utf-8") + secret_string = (secret_string or "").strip() + if not secret_string: + raise ValueError(f"Secret {secret_arn} is empty") + + if json_key: + try: + payload = json.loads(secret_string) + except json.JSONDecodeError as exc: + raise ValueError(f"Secret {secret_arn} is not JSON; cannot read key '{json_key}'") from exc + value = (payload.get(json_key) or "").strip() if isinstance(payload, dict) else "" + if not value: + raise ValueError(f"Secret {secret_arn} missing key '{json_key}'") + return value + + return secret_string + + +def setup_database_mysql( + *, + host: str, + admin_user: str, + admin_password: str, + schema: str, + app_username: str, + app_password: str, +) -> None: + safe_schema = _safe_ident(schema) + _safe_ident(app_username) + conn = None + last_exc = None + for _attempt in range(1, DB_CONNECT_ATTEMPTS + 1): + try: + conn = pymysql.connect( + host=host, + user=admin_user, + password=admin_password, + port=3306, + charset="utf8mb4", + cursorclass=DictCursor, + connect_timeout=DB_CONNECT_TIMEOUT_SECONDS, + ) + break + except Exception as exc: + last_exc = exc + time.sleep(DB_CONNECT_RETRY_SECONDS) + if conn is None: + raise RuntimeError( + f"MySQL connect failed after {DB_CONNECT_ATTEMPTS} attempts: {last_exc}" + ) + try: + with conn.cursor() as cur: + cur.execute(f"CREATE DATABASE IF NOT EXISTS `{safe_schema}`") + cur.execute( + "CREATE USER IF NOT EXISTS %s@'%%' IDENTIFIED BY %s", + (app_username, app_password), + ) + cur.execute(f"GRANT ALL PRIVILEGES ON `{safe_schema}`.* TO %s@'%%'", (app_username,)) + cur.execute("FLUSH PRIVILEGES") + conn.commit() + finally: + conn.close() + + +def setup_database_postgresql( + *, + host: str, + admin_user: str, + admin_password: str, + schema: str, + app_username: str, + app_password: str, +) -> None: + max_db_connect_attempts = POSTGRES_DB_CONNECT_ATTEMPTS + db_connect_retry_seconds = POSTGRES_DB_CONNECT_RETRY_SECONDS + _safe_ident(schema) + _safe_ident(app_username) + conn = psycopg2.connect( + host=host, + user=admin_user, + password=admin_password, + port=5432, + dbname="postgres", + connect_timeout=DB_CONNECT_TIMEOUT_SECONDS, + sslmode="require", + ) + conn.autocommit = True + try: + with conn.cursor() as cur: + cur.execute("SELECT 1 FROM pg_roles WHERE rolname = %s", (app_username,)) + if cur.fetchone() is None: + q = psql.SQL("CREATE ROLE {name} WITH LOGIN PASSWORD %s").format( + name=psql.Identifier(app_username), + ) + cur.execute(q, (app_password,)) + else: + q = psql.SQL("ALTER ROLE {name} WITH LOGIN PASSWORD %s").format( + name=psql.Identifier(app_username), + ) + cur.execute(q, (app_password,)) + + cur.execute("SELECT 1 FROM pg_database WHERE datname = %s", (schema,)) + if cur.fetchone() is None: + cur.execute( + psql.SQL("CREATE DATABASE {db} OWNER {owner}").format( + db=psql.Identifier(schema), + owner=psql.Identifier(app_username), + ) + ) + finally: + conn.close() + + # Ensure runtime role can connect and run migrations in the target DB. + # After CREATE DATABASE, RDS can take a short time before accepting connections. + last_exc = None + for _attempt in range(1, max_db_connect_attempts + 1): + try: + db_conn = psycopg2.connect( + host=host, + user=admin_user, + password=admin_password, + port=5432, + dbname=schema, + connect_timeout=DB_CONNECT_TIMEOUT_SECONDS, + sslmode="require", + ) + db_conn.autocommit = True + try: + with db_conn.cursor() as cur: + cur.execute( + psql.SQL("GRANT CONNECT, TEMP ON DATABASE {db} TO {user}").format( + db=psql.Identifier(schema), + user=psql.Identifier(app_username), + ) + ) + cur.execute( + psql.SQL("GRANT USAGE, CREATE ON SCHEMA public TO {user}").format( + user=psql.Identifier(app_username), + ) + ) + finally: + db_conn.close() + return + except Exception as exc: + last_exc = exc + time.sleep(db_connect_retry_seconds) + raise RuntimeError( + f"Failed connecting to newly created database '{schema}' after " + f"{max_db_connect_attempts} attempts: {last_exc}" + ) diff --git a/infra/aws/db_setup/requirements.txt b/infra/aws/db_setup/requirements.txt new file mode 100644 index 0000000..a1c6e6f --- /dev/null +++ b/infra/aws/db_setup/requirements.txt @@ -0,0 +1,4 @@ +pymysql==1.1.2 +psycopg2-binary==2.9.11 +# Required for MySQL 8+ caching_sha2_password; pin for reproducible CI (pip-audit / sam build). +cryptography==46.0.5 diff --git a/infra/aws/scripts/deploy.sh b/infra/aws/scripts/deploy.sh new file mode 100755 index 0000000..0380d76 --- /dev/null +++ b/infra/aws/scripts/deploy.sh @@ -0,0 +1,1837 @@ +#!/usr/bin/env bash +# Interactive AWS deploy helper for SyncBot. +# Handles: bootstrap (optional), sam build, sam deploy (new RDS or existing RDS). +# +# Run from repo root: +# ./infra/aws/scripts/deploy.sh +# +# Phases (main path, after functions are defined below): +# 1) Prerequisites: CLI checks, template paths +# 2) Authentication: AWS region and credentials +# 3) Bootstrap probe: read bootstrap stack outputs (create/sync runs only if task 1 selected) +# 4) Stack identity: stage, app stack name; detect existing stack for update +# 5) Deploy Tasks: multi-select menu (bootstrap, build/deploy, CI/CD, Slack API, backup secrets) +# 6) Configuration (if build/deploy): database, Slack creds, SAM build + deploy +# 7) Post-tasks: Slack manifest/API, GitHub Actions, deploy receipt, DR secret backup + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" + +BOOTSTRAP_TEMPLATE="$REPO_ROOT/infra/aws/template.bootstrap.yaml" +APP_TEMPLATE="$REPO_ROOT/infra/aws/template.yaml" +SLACK_MANIFEST_GENERATED_PATH="" +APP_DB_PASSWORD_OVERRIDE="${APP_DB_PASSWORD_OVERRIDE:-}" +APP_DB_PASSWORD_REUSED_FROM_SECRET="" +SLACK_SIGNING_SECRET_SOURCE="" +SLACK_CLIENT_SECRET_SOURCE="" +EXISTING_DB_ADMIN_PASSWORD_SOURCE="" +# Populated before write_deploy_receipt: backup summary + markdown receipt (deploy-receipts/*.md). +RECEIPT_TOKEN_SECRET_ID="" +RECEIPT_APP_DB_SECRET_NAME="" + +# shellcheck source=/dev/null +source "$REPO_ROOT/deploy.sh" + +prompt_default() { + local prompt="$1" + local default="$2" + local value + read -r -p "$prompt [$default]: " value + if [[ -z "$value" ]]; then + value="$default" + fi + echo "$value" +} + +prompt_secret() { + local prompt="$1" + local value + read -r -s -p "$prompt: " value + # Keep the visual newline on the terminal even when called via $(...). + printf '\n' >&2 + echo "$value" +} + +prompt_required() { + local prompt="$1" + local value + while true; do + read -r -p "$prompt: " value + if [[ -n "$value" ]]; then + echo "$value" + return 0 + fi + echo "Error: $prompt is required." >&2 + done +} + +prompt_secret_required() { + local prompt="$1" + local value + while true; do + value="$(prompt_secret "$prompt")" + if [[ -n "$value" ]]; then + echo "$value" + return 0 + fi + echo "Error: $prompt is required." >&2 + done +} + +required_from_env_or_prompt() { + local env_name="$1" + local prompt="$2" + local mode="${3:-plain}" # plain|secret + local env_value="${!env_name:-}" + if [[ -n "$env_value" ]]; then + echo "Using $prompt from environment variable $env_name." >&2 + echo "$env_value" + return 0 + fi + if [[ "$mode" == "secret" ]]; then + prompt_secret_required "$prompt" + else + prompt_required "$prompt" + fi +} + +prompt_yes_no() { + local prompt="$1" + local default="${2:-y}" + local answer + local shown="y/N" + [[ "$default" == "y" ]] && shown="Y/n" + read -r -p "$prompt [$shown]: " answer + if [[ -z "$answer" ]]; then + answer="$default" + fi + [[ "$answer" =~ ^[Yy]$ ]] +} + +ensure_aws_authenticated() { + local profile active_profile sso_start_url sso_region + profile="${AWS_PROFILE:-}" + active_profile="$profile" + if [[ -z "$active_profile" ]]; then + active_profile="$(aws configure get profile 2>/dev/null || true)" + [[ -z "$active_profile" ]] && active_profile="default" + fi + + if aws sts get-caller-identity >/dev/null 2>&1; then + return 0 + fi + + sso_start_url="$(aws configure get sso_start_url --profile "$active_profile" 2>/dev/null || true)" + sso_region="$(aws configure get sso_region --profile "$active_profile" 2>/dev/null || true)" + + echo "AWS CLI is not authenticated." + if [[ -n "$sso_start_url" && -n "$sso_region" ]]; then + if prompt_yes_no "Run 'aws sso login --profile $active_profile' now?" "y"; then + aws sso login --profile "$active_profile" || true + fi + else + echo "No complete SSO config found for profile '$active_profile'." + # Prefer the user's default interactive AWS login flow when available. + if aws login help >/dev/null 2>&1; then + if prompt_yes_no "Run 'aws login' now?" "y"; then + aws login || true + fi + fi + + if ! aws sts get-caller-identity >/dev/null 2>&1; then + if prompt_yes_no "Run 'aws configure sso --profile $active_profile' now?" "n"; then + aws configure sso --profile "$active_profile" || true + if prompt_yes_no "Run 'aws sso login --profile $active_profile' now?" "y"; then + aws sso login --profile "$active_profile" || true + fi + else + echo "Tip: use 'aws configure' if you authenticate with access keys." + fi + fi + fi + + if ! aws sts get-caller-identity >/dev/null 2>&1; then + echo "Unable to authenticate AWS CLI." + echo "Run one of the following, then rerun deploy:" + echo " aws login" + echo " aws configure sso [--profile ]" + echo " aws sso login [--profile ]" + echo " aws configure" + exit 1 + fi +} + +ensure_gh_authenticated() { + if ! command -v gh >/dev/null 2>&1; then + prereqs_hint_gh_cli >&2 + return 1 + fi + if gh auth status >/dev/null 2>&1; then + return 0 + fi + echo "gh CLI is not authenticated." + if prompt_yes_no "Run 'gh auth login' now?" "y"; then + gh auth login || true + fi + if gh auth status >/dev/null 2>&1; then + return 0 + fi + echo "gh authentication is still missing. Skipping automatic GitHub setup." + return 1 +} + +slack_manifest_json_compact() { + local manifest_file="$1" + python3 - "$manifest_file" <<'PY' +import json +import sys +path = sys.argv[1] +with open(path, "r", encoding="utf-8") as f: + data = json.load(f) +print(json.dumps(data, separators=(",", ":"))) +PY +} + +slack_api_configure_from_manifest() { + local manifest_file="$1" + local install_url="$2" + local token app_id team_id manifest_json api_resp ok + + echo + echo "=== Slack App API ===" + + token="$(required_from_env_or_prompt "SLACK_API_TOKEN" "Slack API token (required scopes: apps.manifest:write)" "secret")" + app_id="$(prompt_default "Slack App ID (optional; blank = create new app)" "${SLACK_APP_ID:-}")" + team_id="$(prompt_default "Slack Team ID (optional; usually blank)" "${SLACK_TEAM_ID:-}")" + + manifest_json="$(slack_manifest_json_compact "$manifest_file" 2>/dev/null || true)" + if [[ -z "$manifest_json" ]]; then + echo "Could not parse manifest JSON automatically." + echo "Ensure $manifest_file is valid JSON and Python 3 is installed." + return 0 + fi + + if [[ -n "$app_id" ]]; then + if [[ -n "$team_id" ]]; then + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "app_id=$app_id" \ + --data-urlencode "team_id=$team_id" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.update" || true)" + else + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "app_id=$app_id" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.update" || true)" + fi + ok="$(python3 - "$api_resp" <<'PY' +import json,sys +try: + data=json.loads(sys.argv[1]) +except Exception: + print("invalid-json") + sys.exit(0) +print("ok" if data.get("ok") else f"error:{data.get('error','unknown_error')}") +PY +)" + if [[ "$ok" == "ok" ]]; then + echo "Slack app manifest updated for App ID: $app_id" + echo "Open install URL: $install_url" + else + echo "Slack API update failed: ${ok#error:}" + echo "Response (truncated):" + slack_api_echo_truncated_body "$api_resp" + echo "Hint: check token scopes (apps.manifest:write), manifest JSON, and api.slack.com methods apps.manifest.update" + fi + return 0 + fi + + # No App ID supplied: create a new Slack app from manifest. + if [[ -n "$team_id" ]]; then + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "team_id=$team_id" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.create" || true)" + else + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.create" || true)" + fi + ok="$(python3 - "$api_resp" <<'PY' +import json,sys +try: + data=json.loads(sys.argv[1]) +except Exception: + print("invalid-json") + sys.exit(0) +if not data.get("ok"): + print(f"error:{data.get('error','unknown_error')}") + sys.exit(0) +app_id = data.get("app_id") or (data.get("app", {}) or {}).get("id") or "" +print(f"ok:{app_id}") +PY +)" + if [[ "$ok" == ok:* ]]; then + app_id="${ok#ok:}" + echo "Slack app created successfully." + [[ -n "$app_id" ]] && echo "New Slack App ID: $app_id" + echo "Open install URL: $install_url" + else + echo "Slack API create failed: ${ok#error:}" + echo "Response (truncated):" + slack_api_echo_truncated_body "$api_resp" + echo "Hint: check token scopes (apps.manifest:write), manifest JSON, and api.slack.com methods apps.manifest.create" + fi +} + +bootstrap_describe_outputs() { + local stack_name="$1" + local region="$2" + aws cloudformation describe-stacks \ + --stack-name "$stack_name" \ + --query 'Stacks[0].Outputs[*].[OutputKey,OutputValue]' \ + --output text \ + --region "$region" 2>/dev/null || true +} + +app_describe_outputs() { + local stack_name="$1" + local region="$2" + aws cloudformation describe-stacks \ + --stack-name "$stack_name" \ + --query 'Stacks[0].Outputs[*].[OutputKey,OutputValue]' \ + --output text \ + --region "$region" 2>/dev/null || true +} + +output_value() { + local outputs="$1" + local key="$2" + echo "$outputs" | awk -F'\t' -v k="$key" '$1==k {print $2}' +} + +configure_github_actions_aws() { + # $1 Bootstrap stack outputs (tab-separated OutputKey / OutputValue) + # $2 Bootstrap CloudFormation stack name (for OIDC drift check vs gh repo) + # $3 AWS region for this deploy session (fallback if bootstrap has no BootstrapRegion output) + # $4 App CloudFormation stack name + # $5 Stage name (test|prod) — GitHub environment name + # $6 Database schema name + # $7 DB source mode: 1 = stack-managed RDS, 2 = external or existing host (matches SAM / prompts) + # $8 Existing DB host (mode 2) + # $9 Existing DB admin user (mode 2) + # $10 Existing DB admin password (mode 2) + # $11 Existing DB network mode: public | private + # $12 Comma-separated subnet IDs for Lambda in private mode + # $13 Lambda ENI security group id in private mode + # $14 Database engine: mysql | postgresql + local bootstrap_outputs="$1" + local bootstrap_stack_name="$2" + local aws_region="$3" + local app_stack_name="$4" + local deploy_stage="$5" + local database_schema="$6" + local db_mode="$7" + local existing_db_host="$8" + local existing_db_admin_user="$9" + local existing_db_admin_password="${10}" + local existing_db_network_mode="${11:-}" + [[ -z "$existing_db_network_mode" ]] && existing_db_network_mode="public" + local existing_db_subnet_ids_csv="${12:-}" + local existing_db_lambda_sg_id="${13:-}" + local database_engine="${14:-}" + [[ -z "$database_engine" ]] && database_engine="mysql" + local role bucket boot_region + role="$(output_value "$bootstrap_outputs" "GitHubDeployRoleArn")" + bucket="$(output_value "$bootstrap_outputs" "DeploymentBucketName")" + boot_region="$(output_value "$bootstrap_outputs" "BootstrapRegion")" + [[ -z "$boot_region" ]] && boot_region="$aws_region" + local repo env_name + env_name="$deploy_stage" + + echo + echo "=== GitHub Actions (AWS) ===" + echo "Detected bootstrap role: $role" + echo "Detected deploy bucket: $bucket (SAM/CI packaging for sam deploy — not Slack or app media)" + echo "Detected bootstrap region: $boot_region" + repo="$(prompt_github_repo_for_actions "$REPO_ROOT")" + maybe_prompt_bootstrap_github_trust_update "$repo" "$bootstrap_stack_name" "$aws_region" + + if ! ensure_gh_authenticated; then + echo + echo "Set these GitHub Actions Variables manually (on the repo you intend):" + echo " AWS_ROLE_TO_ASSUME = $role" + echo " AWS_S3_BUCKET = $bucket (SAM deploy artifact bucket / DeploymentBucketName; not Slack file storage)" + echo " AWS_REGION = $boot_region" + echo "For environment '$env_name' also set AWS_STACK_NAME, STAGE_NAME, DATABASE_SCHEMA, DATABASE_ENGINE," + echo "and (if using existing RDS) EXISTING_DATABASE_* / private VPC vars — see docs/DEPLOYMENT.md." + return 0 + fi + + if prompt_yes_no "Create/update GitHub environments 'test' and 'prod' now?" "y"; then + gh api -X PUT "repos/$repo/environments/test" >/dev/null + gh api -X PUT "repos/$repo/environments/prod" >/dev/null + echo "GitHub environments ensured: test, prod." + fi + + if prompt_yes_no "Set repo variables with gh now (AWS_ROLE_TO_ASSUME, AWS_S3_BUCKET, AWS_REGION)? AWS_S3_BUCKET is SAM/CI packaging only (DeploymentBucketName)." "y"; then + [[ -n "$role" ]] && gh variable set AWS_ROLE_TO_ASSUME --body "$role" -R "$repo" + [[ -n "$bucket" ]] && gh variable set AWS_S3_BUCKET --body "$bucket" -R "$repo" + [[ -n "$boot_region" ]] && gh variable set AWS_REGION --body "$boot_region" -R "$repo" + echo "GitHub repository variables updated." + fi + + if prompt_yes_no "Set environment variables for '$env_name' now (AWS_STACK_NAME, STAGE_NAME, DATABASE_SCHEMA, DB host/user vars)?" "y"; then + gh variable set AWS_STACK_NAME --env "$env_name" --body "$app_stack_name" -R "$repo" + gh variable set STAGE_NAME --env "$env_name" --body "$deploy_stage" -R "$repo" + gh variable set DATABASE_SCHEMA --env "$env_name" --body "$database_schema" -R "$repo" + gh variable set DATABASE_ENGINE --env "$env_name" --body "$database_engine" -R "$repo" + if [[ "$db_mode" == "2" ]]; then + gh variable set EXISTING_DATABASE_HOST --env "$env_name" --body "$existing_db_host" -R "$repo" + gh variable set EXISTING_DATABASE_ADMIN_USER --env "$env_name" --body "$existing_db_admin_user" -R "$repo" + gh variable set EXISTING_DATABASE_NETWORK_MODE --env "$env_name" --body "$existing_db_network_mode" -R "$repo" + if [[ "$existing_db_network_mode" == "private" ]]; then + gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "$existing_db_subnet_ids_csv" -R "$repo" + gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "$existing_db_lambda_sg_id" -R "$repo" + else + gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "" -R "$repo" + gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "" -R "$repo" + fi + else + # Clear existing-host vars for new-RDS mode to avoid stale CI config. + gh variable set EXISTING_DATABASE_HOST --env "$env_name" --body "" -R "$repo" + gh variable set EXISTING_DATABASE_ADMIN_USER --env "$env_name" --body "" -R "$repo" + gh variable set EXISTING_DATABASE_NETWORK_MODE --env "$env_name" --body "public" -R "$repo" + gh variable set EXISTING_DATABASE_SUBNET_IDS_CSV --env "$env_name" --body "" -R "$repo" + gh variable set EXISTING_DATABASE_LAMBDA_SECURITY_GROUP_ID --env "$env_name" --body "" -R "$repo" + fi + echo "Environment variables updated for '$env_name'." + fi + + if prompt_yes_no "Set environment secrets for '$env_name' now (Slack secrets + optional Existing DB admin password)?" "n"; then + if [[ -z "${SLACK_SIGNING_SECRET:-}" ]]; then + SLACK_SIGNING_SECRET_SOURCE="prompt" + SLACK_SIGNING_SECRET="$(required_from_env_or_prompt "SLACK_SIGNING_SECRET" "SlackSigningSecret" "secret")" + fi + if [[ -z "${SLACK_CLIENT_SECRET:-}" ]]; then + SLACK_CLIENT_SECRET_SOURCE="prompt" + SLACK_CLIENT_SECRET="$(required_from_env_or_prompt "SLACK_CLIENT_SECRET" "SlackClientSecret" "secret")" + fi + gh secret set SLACK_SIGNING_SECRET --env "$env_name" --body "$SLACK_SIGNING_SECRET" -R "$repo" + gh secret set SLACK_CLIENT_SECRET --env "$env_name" --body "$SLACK_CLIENT_SECRET" -R "$repo" + if [[ "$db_mode" == "2" && -n "$existing_db_admin_password" ]]; then + gh secret set EXISTING_DATABASE_ADMIN_PASSWORD --env "$env_name" --body "$existing_db_admin_password" -R "$repo" + fi + echo "Environment secrets updated for '$env_name'." + fi +} + +generate_stage_slack_manifest() { + local stage="$1" + local api_url="$2" + local install_url="$3" + local template="$REPO_ROOT/slack-manifest.json" + local manifest_out="$REPO_ROOT/slack-manifest_${stage}.json" + local events_url base_url oauth_redirect_url + + if [[ ! -f "$template" ]]; then + echo "Slack manifest template not found at $template" + return 0 + fi + if [[ -z "$api_url" ]]; then + echo "Could not determine API URL from stack outputs. Skipping Slack manifest generation." + return 0 + fi + + events_url="${api_url%/}" + base_url="${events_url%/slack/events}" + oauth_redirect_url="${base_url}/slack/oauth_redirect" + + if ! python3 - "$template" "$manifest_out" "$events_url" "$oauth_redirect_url" <<'PY' +import json +import sys + +template_path, out_path, events_url, redirect_url = sys.argv[1:5] +with open(template_path, "r", encoding="utf-8") as f: + manifest = json.load(f) + +manifest.setdefault("oauth_config", {}).setdefault("redirect_urls", []) +manifest["oauth_config"]["redirect_urls"] = [redirect_url] +manifest.setdefault("settings", {}).setdefault("event_subscriptions", {}) +manifest["settings"]["event_subscriptions"]["request_url"] = events_url +manifest.setdefault("settings", {}).setdefault("interactivity", {}) +manifest["settings"]["interactivity"]["request_url"] = events_url + +with open(out_path, "w", encoding="utf-8") as f: + json.dump(manifest, f, indent=2) + f.write("\n") +PY + then + echo "Failed to generate stage Slack manifest from JSON template." + return 0 + fi + + SLACK_MANIFEST_GENERATED_PATH="$manifest_out" + + echo "=== Slack Manifest (${stage}) ===" + echo "Saved file: $manifest_out" + echo "Install URL: $install_url" + echo + sed 's/^/ /' "$manifest_out" +} + +secret_arn_by_name() { + local secret_name="$1" + local region="$2" + aws secretsmanager describe-secret \ + --secret-id "$secret_name" \ + --region "$region" \ + --query 'ARN' \ + --output text 2>/dev/null || true +} + +secret_value_by_id() { + local secret_id="$1" + local region="$2" + aws secretsmanager get-secret-value \ + --secret-id "$secret_id" \ + --region "$region" \ + --query 'SecretString' \ + --output text 2>/dev/null || true +} + +rds_lookup_admin_defaults() { + local db_host="$1" + local region="$2" + aws rds describe-db-instances \ + --region "$region" \ + --query "DBInstances[?Endpoint.Address=='$db_host']|[0].[MasterUsername,MasterUserSecret.SecretArn]" \ + --output text 2>/dev/null || true +} + +secret_password_by_id() { + local secret_id="$1" + local region="$2" + local raw + raw="$(secret_value_by_id "$secret_id" "$region")" + if [[ -z "$raw" || "$raw" == "None" ]]; then + return 1 + fi + python3 - "$raw" <<'PY' +import json +import sys + +raw = sys.argv[1] +if not raw or raw == "None": + print("") + raise SystemExit(0) + +try: + data = json.loads(raw) +except Exception: + print(raw) + raise SystemExit(0) + +if isinstance(data, dict): + password = data.get("password") + if isinstance(password, str) and password: + print(password) + else: + print("") +else: + print("") +PY +} + +wait_for_secret_deleted() { + local secret_id="$1" + local region="$2" + local max_attempts="${3:-20}" + local sleep_seconds="${4:-3}" + local attempt + for ((attempt = 1; attempt <= max_attempts; attempt++)); do + if ! aws secretsmanager describe-secret --secret-id "$secret_id" --region "$region" >/dev/null 2>&1; then + return 0 + fi + sleep "$sleep_seconds" + done + return 1 +} + +handle_orphan_app_db_secret_on_create() { + local stack_status="$1" + local secret_name="$2" + local region="$3" + local secret_arn reuse_value + + # Only needed for brand-new stack creates where a previous failed stack left the named secret. + if [[ -n "$stack_status" && "$stack_status" != "None" ]]; then + return 0 + fi + + secret_arn="$(secret_arn_by_name "$secret_name" "$region")" + if [[ -z "$secret_arn" || "$secret_arn" == "None" ]]; then + return 0 + fi + + echo "Detected existing app DB secret: $secret_name" + if [[ -z "$APP_DB_PASSWORD_OVERRIDE" ]]; then + if prompt_yes_no "Reuse existing app DB password value when recreating this secret?" "y"; then + reuse_value="$(secret_password_by_id "$secret_arn" "$region" 2>/dev/null || true)" + if [[ -n "$reuse_value" && "$reuse_value" != "None" ]]; then + APP_DB_PASSWORD_OVERRIDE="$reuse_value" + APP_DB_PASSWORD_REUSED_FROM_SECRET="$secret_name" + echo "Will reuse existing app DB password value." + else + echo "Could not read existing app DB secret value; deploy will create a new app DB password." + fi + fi + else + echo "Using provided AppDbPasswordOverride for secret recreation." + [[ -z "$APP_DB_PASSWORD_REUSED_FROM_SECRET" ]] && APP_DB_PASSWORD_REUSED_FROM_SECRET="provided-override" + fi + + if ! prompt_yes_no "Delete detected secret now so create can continue?" "y"; then + echo "Cannot create new stack while this secret name already exists." >&2 + echo "Delete it manually or choose a different stage/stack." >&2 + exit 1 + fi + + if ! aws secretsmanager delete-secret \ + --secret-id "$secret_arn" \ + --region "$region" \ + --force-delete-without-recovery >/dev/null 2>&1; then + echo "Failed to delete secret '$secret_name'. Check IAM permissions and retry." >&2 + exit 1 + fi + + echo "Deleted secret '$secret_name'. Waiting for name to become available..." + if ! wait_for_secret_deleted "$secret_arn" "$region"; then + echo "Secret deletion is still propagating. Wait a minute and rerun deploy." >&2 + exit 1 + fi +} + +write_deploy_receipt() { + local provider="$1" + local stage="$2" + local project_or_stack="$3" + local region="$4" + local service_url="$5" + local install_url="$6" + local manifest_path="$7" + local ts_human ts_file receipt_dir receipt_path + + ts_human="$(date -u +"%Y-%m-%d %H:%M:%S UTC")" + ts_file="$(date -u +"%Y%m%dT%H%M%SZ")" + receipt_dir="$REPO_ROOT/deploy-receipts" + receipt_path="$receipt_dir/deploy-${provider}-${stage}-${ts_file}.md" + + mkdir -p "$receipt_dir" + cat >"$receipt_path" <&1 || true)" + if [[ "$describe_out" == *"AccessDenied"* || "$describe_out" == *"not authorized"* ]]; then + echo "Secrets Manager preflight failed: missing DescribeSecret on '$current_secret_id'." >&2 + echo "Fix: re-deploy bootstrap stack to update syncbot deploy policy, then retry." >&2 + exit 1 + fi + + get_out="$(aws secretsmanager get-secret-value \ + --secret-id "$current_secret_id" \ + --region "$region" \ + --query 'ARN' \ + --output text 2>&1 || true)" + if [[ "$get_out" == *"AccessDenied"* || "$get_out" == *"not authorized"* ]]; then + echo "Secrets Manager preflight failed: missing GetSecretValue on '$current_secret_id'." >&2 + echo "This commonly breaks CloudFormation when Lambda environment uses dynamic secret references." >&2 + echo "Fix: re-deploy bootstrap stack to update syncbot deploy policy, then retry." >&2 + exit 1 + fi + done + + # If explicitly reusing an ARN, validate direct access too. + if [[ -n "$existing_token_secret_arn" ]]; then + get_out="$(aws secretsmanager get-secret-value \ + --secret-id "$existing_token_secret_arn" \ + --region "$region" \ + --query 'ARN' \ + --output text 2>&1 || true)" + if [[ "$get_out" == *"AccessDenied"* || "$get_out" == *"not authorized"* ]]; then + echo "Secrets Manager preflight failed: missing GetSecretValue on '$existing_token_secret_arn'." >&2 + echo "Fix: re-deploy bootstrap stack to update syncbot deploy policy, then retry." >&2 + exit 1 + fi + fi + + echo "Secrets Manager preflight passed." +} + +rds_lookup_network_defaults() { + local db_host="$1" + local region="$2" + aws rds describe-db-instances \ + --region "$region" \ + --query "DBInstances[?Endpoint.Address=='$db_host']|[0].[PubliclyAccessible,join(',',DBSubnetGroup.Subnets[].SubnetIdentifier),join(',',VpcSecurityGroups[].VpcSecurityGroupId),DBSubnetGroup.VpcId,DBInstanceIdentifier]" \ + --output text 2>/dev/null || true +} + +ec2_subnet_vpc_ids() { + local region="$1" + shift + aws ec2 describe-subnets \ + --region "$region" \ + --subnet-ids "$@" \ + --query 'Subnets[*].[SubnetId,VpcId]' \ + --output text 2>/dev/null || true +} + +ec2_vpc_subnet_ids() { + local vpc_id="$1" + local region="$2" + aws ec2 describe-subnets \ + --region "$region" \ + --filters "Name=vpc-id,Values=$vpc_id" \ + --query 'Subnets[].SubnetId' \ + --output text 2>/dev/null || true +} + +ec2_security_group_vpc() { + local sg_id="$1" + local region="$2" + aws ec2 describe-security-groups \ + --region "$region" \ + --group-ids "$sg_id" \ + --query 'SecurityGroups[0].VpcId' \ + --output text 2>/dev/null || true +} + +ec2_sg_allows_from_sg_on_port() { + local db_sg_id="$1" + local source_sg_id="$2" + local port="$3" + local region="$4" + local allowed_groups + allowed_groups="$(aws ec2 describe-security-groups \ + --region "$region" \ + --group-ids "$db_sg_id" \ + --query "SecurityGroups[0].IpPermissions[?FromPort<=\`$port\` && ToPort>=\`$port\`].UserIdGroupPairs[].GroupId" \ + --output text 2>/dev/null || true)" + [[ " $allowed_groups " == *" $source_sg_id "* ]] +} + +ec2_subnet_route_table_id() { + local subnet_id="$1" + local vpc_id="$2" + local region="$3" + local rt_id + rt_id="$(aws ec2 describe-route-tables \ + --region "$region" \ + --filters "Name=association.subnet-id,Values=$subnet_id" \ + --query 'RouteTables[0].RouteTableId' \ + --output text 2>/dev/null || true)" + if [[ -z "$rt_id" || "$rt_id" == "None" ]]; then + rt_id="$(aws ec2 describe-route-tables \ + --region "$region" \ + --filters "Name=vpc-id,Values=$vpc_id" "Name=association.main,Values=true" \ + --query 'RouteTables[0].RouteTableId' \ + --output text 2>/dev/null || true)" + fi + echo "$rt_id" +} + +ec2_subnet_default_route_target() { + local subnet_id="$1" + local vpc_id="$2" + local region="$3" + local rt_id targets target + rt_id="$(ec2_subnet_route_table_id "$subnet_id" "$vpc_id" "$region")" + if [[ -z "$rt_id" || "$rt_id" == "None" ]]; then + echo "none" + return 0 + fi + + # Read all active default-route targets and pick the first concrete one. + targets="$(aws ec2 describe-route-tables \ + --region "$region" \ + --route-table-ids "$rt_id" \ + --query "RouteTables[0].Routes[?DestinationCidrBlock=='0.0.0.0/0' && State=='active'].[NatGatewayId,GatewayId,TransitGatewayId,NetworkInterfaceId,VpcPeeringConnectionId]" \ + --output text 2>/dev/null || true)" + for target in $targets; do + [[ "$target" == "None" ]] && continue + echo "$target" + return 0 + done + + echo "none" +} + +discover_private_lambda_subnets_for_db_vpc() { + local vpc_id="$1" + local region="$2" + local subnet_ids subnet_id route_target out + subnet_ids="$(ec2_vpc_subnet_ids "$vpc_id" "$region")" + if [[ -z "$subnet_ids" || "$subnet_ids" == "None" ]]; then + echo "" + return 0 + fi + + out="" + for subnet_id in $subnet_ids; do + [[ -z "$subnet_id" ]] && continue + route_target="$(ec2_subnet_default_route_target "$subnet_id" "$vpc_id" "$region")" + # Lambda private-subnet candidates: active default route through NAT. + if [[ "$route_target" == nat-* ]]; then + if [[ -z "$out" ]]; then + out="$subnet_id" + else + out="$out,$subnet_id" + fi + fi + done + echo "$out" +} + +validate_private_existing_db_connectivity() { + local region="$1" + local engine="$2" + local subnet_csv="$3" + local lambda_sg="$4" + local db_vpc="$5" + local db_sgs_csv="$6" + local db_host="$7" + local db_port subnet_list subnet_vpcs first_vpc line subnet_id subnet_vpc db_sg_id lambda_sg_vpc db_sg_list route_target rt_id ingress_ok + local -a no_nat_subnets + + db_port="3306" + [[ "$engine" == "postgresql" ]] && db_port="5432" + + IFS=',' read -r -a subnet_list <<< "$subnet_csv" + if [[ "${#subnet_list[@]}" -lt 1 ]]; then + echo "Connectivity preflight failed: no subnet IDs provided for private mode." >&2 + return 1 + fi + + subnet_vpcs="$(ec2_subnet_vpc_ids "$region" "${subnet_list[@]}")" + if [[ -z "$subnet_vpcs" || "$subnet_vpcs" == "None" ]]; then + echo "Connectivity preflight failed: could not read VPC IDs for provided subnets." >&2 + return 1 + fi + + first_vpc="" + while IFS=$'\t' read -r subnet_id subnet_vpc; do + [[ -z "$subnet_id" || -z "$subnet_vpc" ]] && continue + if [[ -z "$first_vpc" ]]; then + first_vpc="$subnet_vpc" + elif [[ "$subnet_vpc" != "$first_vpc" ]]; then + echo "Connectivity preflight failed: subnets span multiple VPCs." >&2 + return 1 + fi + done <<< "$subnet_vpcs" + + if [[ -z "$first_vpc" ]]; then + echo "Connectivity preflight failed: unable to determine subnet VPC." >&2 + return 1 + fi + + if [[ -n "$db_vpc" && "$db_vpc" != "$first_vpc" ]]; then + echo "Connectivity preflight failed: Lambda subnets are in $first_vpc but DB is in $db_vpc." >&2 + return 1 + fi + + lambda_sg_vpc="$(ec2_security_group_vpc "$lambda_sg" "$region")" + if [[ -z "$lambda_sg_vpc" || "$lambda_sg_vpc" == "None" ]]; then + echo "Connectivity preflight failed: Lambda security group '$lambda_sg' was not found." >&2 + return 1 + fi + if [[ "$lambda_sg_vpc" != "$first_vpc" ]]; then + echo "Connectivity preflight failed: Lambda security group is in $lambda_sg_vpc, expected $first_vpc." >&2 + return 1 + fi + + if [[ -n "$db_sgs_csv" ]]; then + ingress_ok="false" + IFS=',' read -r -a db_sg_list <<< "$db_sgs_csv" + for db_sg_id in "${db_sg_list[@]}"; do + db_sg_id="${db_sg_id// /}" + [[ -z "$db_sg_id" ]] && continue + if ec2_sg_allows_from_sg_on_port "$db_sg_id" "$lambda_sg" "$db_port" "$region"; then + echo "Connectivity preflight passed: DB SG $db_sg_id allows Lambda SG $lambda_sg on port $db_port." + ingress_ok="true" + break + fi + done + if [[ "$ingress_ok" != "true" ]]; then + echo "Connectivity preflight failed: none of the DB security groups allow Lambda SG $lambda_sg on port $db_port." >&2 + echo "Fix: add an inbound SG rule on the DB security group from '$lambda_sg' to TCP $db_port." >&2 + return 1 + fi + fi + + if [[ -z "$db_sgs_csv" ]]; then + echo "Connectivity preflight warning: DB SGs could not be auto-detected for host $db_host." >&2 + echo "Cannot verify ingress rule automatically; continuing with subnet/VPC checks only." >&2 + fi + + no_nat_subnets=() + for subnet_id in "${subnet_list[@]}"; do + subnet_id="${subnet_id// /}" + [[ -z "$subnet_id" ]] && continue + route_target="$(ec2_subnet_default_route_target "$subnet_id" "$first_vpc" "$region")" + if [[ "$route_target" != nat-* ]]; then + no_nat_subnets+=("$subnet_id:$route_target") + fi + done + + if [[ "${#no_nat_subnets[@]}" -gt 0 ]]; then + echo "Connectivity preflight failed: one or more selected private subnets do not have an active NAT default route." >&2 + for entry in "${no_nat_subnets[@]}"; do + subnet_id="${entry%%:*}" + route_target="${entry#*:}" + rt_id="$(ec2_subnet_route_table_id "$subnet_id" "$first_vpc" "$region")" + echo " - Subnet $subnet_id (route table $rt_id) default route target: $route_target" >&2 + done + echo "Fix before deploy:" >&2 + echo " 1) Use private subnets whose route table has 0.0.0.0/0 -> nat-xxxx" >&2 + echo " 2) Or update those route tables to point 0.0.0.0/0 to a NAT gateway" >&2 + echo " 3) Ensure DB SG allows Lambda SG '$lambda_sg' on TCP $db_port" >&2 + return 1 + fi + + echo "Connectivity preflight passed: private subnets have NAT egress." + return 0 +} + +stack_status() { + local stack_name="$1" + local region="$2" + aws cloudformation describe-stacks \ + --stack-name "$stack_name" \ + --region "$region" \ + --query 'Stacks[0].StackStatus' \ + --output text 2>/dev/null || true +} + +stack_parameters() { + local stack_name="$1" + local region="$2" + aws cloudformation describe-stacks \ + --stack-name "$stack_name" \ + --region "$region" \ + --query 'Stacks[0].Parameters[*].[ParameterKey,ParameterValue]' \ + --output text 2>/dev/null || true +} + +stack_param_value() { + local params="$1" + local key="$2" + echo "$params" | awk -F'\t' -v k="$key" '$1==k {print $2}' +} + +# Keep bootstrap stack aligned with the checked-in template so IAM/policy fixes +# (for example CloudFormation changeset permissions) apply before app deploy. +# Set SYNCBOT_SKIP_BOOTSTRAP_SYNC=1 to opt out. +sync_bootstrap_stack_from_repo() { + local bootstrap_stack="$1" + local aws_region="$2" + local params github_repo create_oidc bucket_prefix + + if [[ "${SYNCBOT_SKIP_BOOTSTRAP_SYNC:-}" == "1" ]]; then + echo "Skipping bootstrap template sync (SYNCBOT_SKIP_BOOTSTRAP_SYNC=1)." + return 0 + fi + + params="$(stack_parameters "$bootstrap_stack" "$aws_region")" + if [[ -z "$params" ]]; then + echo "Could not read bootstrap stack parameters for '$bootstrap_stack' in $aws_region; skipping bootstrap template sync." >&2 + return 0 + fi + + github_repo="$(stack_param_value "$params" "GitHubRepository")" + github_repo="${github_repo//$'\r'/}" + github_repo="${github_repo#"${github_repo%%[![:space:]]*}"}" + github_repo="${github_repo%"${github_repo##*[![:space:]]}"}" + if [[ -z "$github_repo" ]]; then + echo "Bootstrap stack has no GitHubRepository parameter; skipping bootstrap template sync." >&2 + return 0 + fi + + create_oidc="$(stack_param_value "$params" "CreateOIDCProvider")" + bucket_prefix="$(stack_param_value "$params" "DeploymentBucketPrefix")" + [[ -z "$create_oidc" ]] && create_oidc="true" + [[ -z "$bucket_prefix" ]] && bucket_prefix="syncbot-deploy" + + echo + echo "Syncing bootstrap stack with repo template..." + aws cloudformation deploy \ + --template-file "$BOOTSTRAP_TEMPLATE" \ + --stack-name "$bootstrap_stack" \ + --parameter-overrides \ + "GitHubRepository=$github_repo" \ + "CreateOIDCProvider=$create_oidc" \ + "DeploymentBucketPrefix=$bucket_prefix" \ + --capabilities CAPABILITY_NAMED_IAM \ + --no-fail-on-empty-changeset \ + --region "$aws_region" +} + +# Compare GitHub owner/repo from bootstrap stack to the repo chosen for gh; offer to update OIDC trust. +maybe_prompt_bootstrap_github_trust_update() { + local picked_repo="$1" + local bootstrap_stack="$2" + local aws_region="$3" + local params trusted picked_lc trusted_lc create_oidc bucket_prefix + + if [[ -z "$bootstrap_stack" || -z "$picked_repo" ]]; then + return 0 + fi + + params="$(stack_parameters "$bootstrap_stack" "$aws_region")" + if [[ -z "$params" ]]; then + echo "Could not read bootstrap stack parameters for '$bootstrap_stack' in $aws_region; skipping OIDC trust drift check." >&2 + return 0 + fi + + trusted="$(stack_param_value "$params" "GitHubRepository")" + # CloudFormation / CLI sometimes surface trailing whitespace; normalize for compare + display. + trusted="${trusted//$'\r'/}" + trusted="${trusted#"${trusted%%[![:space:]]*}"}" + trusted="${trusted%"${trusted##*[![:space:]]}"}" + if [[ -z "$trusted" ]]; then + echo "Bootstrap stack has no GitHubRepository parameter; skipping OIDC trust drift check." >&2 + return 0 + fi + + picked_lc="$(printf '%s' "$picked_repo" | tr '[:upper:]' '[:lower:]')" + trusted_lc="$(printf '%s' "$trusted" | tr '[:upper:]' '[:lower:]')" + if [[ "$picked_lc" == "$trusted_lc" ]]; then + echo "Bootstrap OIDC: stack '$bootstrap_stack' has GitHubRepository=$trusted — matches your choice; no bootstrap update needed." + return 0 + fi + + echo + echo "Warning: Bootstrap stack '$bootstrap_stack' OIDC trust is scoped to:" + echo " GitHubRepository=$trusted" + echo "You chose this repository for GitHub Actions variables:" + echo " $picked_repo" + echo "GitHub Actions in '$picked_repo' cannot assume the deploy role until trust matches." + echo + if ! prompt_yes_no "Update bootstrap OIDC trust to '$picked_repo'? (CloudFormation stack update)" "n"; then + echo "Leaving bootstrap GitHubRepository unchanged. Fix manually or update the bootstrap stack later." >&2 + return 0 + fi + + create_oidc="$(stack_param_value "$params" "CreateOIDCProvider")" + bucket_prefix="$(stack_param_value "$params" "DeploymentBucketPrefix")" + [[ -z "$create_oidc" ]] && create_oidc="true" + [[ -z "$bucket_prefix" ]] && bucket_prefix="syncbot-deploy" + + echo "Updating bootstrap stack '$bootstrap_stack'..." + aws cloudformation deploy \ + --template-file "$BOOTSTRAP_TEMPLATE" \ + --stack-name "$bootstrap_stack" \ + --parameter-overrides \ + "GitHubRepository=$picked_repo" \ + "CreateOIDCProvider=$create_oidc" \ + "DeploymentBucketPrefix=$bucket_prefix" \ + --capabilities CAPABILITY_NAMED_IAM \ + --region "$aws_region" + echo "Bootstrap OIDC trust updated to $picked_repo." +} + +print_recent_stack_failures() { + local stack_name="$1" + local region="$2" + echo "Recent failure events for $stack_name:" + aws cloudformation describe-stack-events \ + --stack-name "$stack_name" \ + --region "$region" \ + --query "StackEvents[?contains(ResourceStatus, 'FAILED')].[Timestamp,LogicalResourceId,ResourceStatus,ResourceStatusReason]" \ + --output table 2>/dev/null || true +} + +handle_unhealthy_stack_state() { + local stack_name="$1" + local region="$2" + local status + status="$(stack_status "$stack_name" "$region")" + if [[ -z "$status" || "$status" == "None" ]]; then + return 0 + fi + + case "$status" in + CREATE_FAILED|ROLLBACK_COMPLETE|ROLLBACK_FAILED|UPDATE_ROLLBACK_FAILED|DELETE_FAILED) + echo + echo "Stack $stack_name is in a failed state: $status" + print_recent_stack_failures "$stack_name" "$region" + echo + if prompt_yes_no "Delete failed stack '$stack_name' now so deploy can continue?" "y"; then + aws cloudformation delete-stack --stack-name "$stack_name" --region "$region" + echo "Waiting for stack deletion to complete..." + aws cloudformation wait stack-delete-complete --stack-name "$stack_name" --region "$region" + else + echo "Cannot continue deploy while stack is in $status." + exit 1 + fi + ;; + *_IN_PROGRESS) + echo "Error: stack $stack_name is currently $status. Wait for it to finish, then rerun." >&2 + exit 1 + ;; + *) + ;; + esac +} + +echo "=== Prerequisites ===" +prereqs_require_cmd aws prereqs_hint_aws_cli +prereqs_require_cmd sam prereqs_hint_sam_cli +prereqs_require_cmd docker prereqs_hint_docker +prereqs_require_cmd python3 prereqs_hint_python3 +prereqs_require_cmd curl prereqs_hint_curl + +prereqs_print_cli_status_matrix "AWS" aws sam docker python3 curl + +if [[ ! -f "$APP_TEMPLATE" ]]; then + echo "Error: app template not found at $APP_TEMPLATE" >&2 + exit 1 +fi +if [[ ! -f "$BOOTSTRAP_TEMPLATE" ]]; then + echo "Error: bootstrap template not found at $BOOTSTRAP_TEMPLATE" >&2 + exit 1 +fi + +echo "=== SyncBot AWS Deploy ===" +echo + +DEFAULT_REGION="${AWS_REGION:-us-east-2}" +REGION="$(prompt_default "AWS region" "$DEFAULT_REGION")" +echo +echo "=== Authentication ===" +ensure_aws_authenticated +BOOTSTRAP_STACK="$(prompt_default "Bootstrap stack name" "syncbot-bootstrap")" + +# Probe bootstrap outputs only; create/sync runs later if task 1 (Bootstrap) is selected. +BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" + +SUGGESTED_TEST_STACK="$(output_value "$BOOTSTRAP_OUTPUTS" "SuggestedTestStackName")" +SUGGESTED_PROD_STACK="$(output_value "$BOOTSTRAP_OUTPUTS" "SuggestedProdStackName")" +[[ -z "$SUGGESTED_TEST_STACK" ]] && SUGGESTED_TEST_STACK="syncbot-test" +[[ -z "$SUGGESTED_PROD_STACK" ]] && SUGGESTED_PROD_STACK="syncbot-prod" + +echo +echo "=== Stack Identity ===" +STAGE="$(prompt_default "Deploy stage (test/prod)" "test")" +if [[ "$STAGE" != "test" && "$STAGE" != "prod" ]]; then + echo "Error: stage must be 'test' or 'prod'." >&2 + exit 1 +fi + +DEFAULT_STACK="$SUGGESTED_TEST_STACK" +[[ "$STAGE" == "prod" ]] && DEFAULT_STACK="$SUGGESTED_PROD_STACK" +STACK_NAME="$(prompt_default "App stack name" "$DEFAULT_STACK")" +EXISTING_STACK_STATUS="$(stack_status "$STACK_NAME" "$REGION")" +IS_STACK_UPDATE="false" +EXISTING_STACK_PARAMS="" +PREV_EXISTING_DATABASE_HOST="" +PREV_EXISTING_DATABASE_ADMIN_USER="" +PREV_EXISTING_DATABASE_NETWORK_MODE="" +PREV_EXISTING_DATABASE_SUBNET_IDS_CSV="" +PREV_EXISTING_DATABASE_LAMBDA_SG_ID="" +PREV_DATABASE_ENGINE="" +PREV_DATABASE_SCHEMA="" +PREV_LOG_LEVEL="" +PREV_REQUIRE_ADMIN="" +PREV_SOFT_DELETE="" +PREV_FEDERATION="" +PREV_INSTANCE_ID="" +PREV_PUBLIC_URL="" +PREV_PRIMARY_WORKSPACE="" +PREV_ENABLE_DB_RESET="" +PREV_DB_TLS="" +PREV_DB_SSL_CA="" +PREV_DATABASE_HOST_IN_USE="" +PREV_STACK_USES_EXISTING_DB="false" +EXISTING_STACK_OUTPUTS="" +if [[ -n "$EXISTING_STACK_STATUS" && "$EXISTING_STACK_STATUS" != "None" ]]; then + echo "Detected existing CloudFormation stack: $STACK_NAME ($EXISTING_STACK_STATUS)" + if ! prompt_yes_no "Continue and update this existing stack?" "y"; then + echo "Aborted." + exit 0 + fi + IS_STACK_UPDATE="true" + EXISTING_STACK_PARAMS="$(stack_parameters "$STACK_NAME" "$REGION")" + PREV_EXISTING_DATABASE_HOST="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseHost")" + PREV_EXISTING_DATABASE_ADMIN_USER="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseAdminUser")" + PREV_EXISTING_DATABASE_NETWORK_MODE="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseNetworkMode")" + PREV_EXISTING_DATABASE_SUBNET_IDS_CSV="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseSubnetIdsCsv")" + PREV_EXISTING_DATABASE_LAMBDA_SG_ID="$(stack_param_value "$EXISTING_STACK_PARAMS" "ExistingDatabaseLambdaSecurityGroupId")" + PREV_DATABASE_ENGINE="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseEngine")" + PREV_DATABASE_SCHEMA="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseSchema")" + PREV_LOG_LEVEL="$(stack_param_value "$EXISTING_STACK_PARAMS" "LogLevel")" + PREV_REQUIRE_ADMIN="$(stack_param_value "$EXISTING_STACK_PARAMS" "RequireAdmin")" + PREV_SOFT_DELETE="$(stack_param_value "$EXISTING_STACK_PARAMS" "SoftDeleteRetentionDays")" + PREV_FEDERATION="$(stack_param_value "$EXISTING_STACK_PARAMS" "SyncbotFederationEnabled")" + PREV_INSTANCE_ID="$(stack_param_value "$EXISTING_STACK_PARAMS" "SyncbotInstanceId")" + PREV_PUBLIC_URL="$(stack_param_value "$EXISTING_STACK_PARAMS" "SyncbotPublicUrl")" + PREV_PRIMARY_WORKSPACE="$(stack_param_value "$EXISTING_STACK_PARAMS" "PrimaryWorkspace")" + PREV_ENABLE_DB_RESET="$(stack_param_value "$EXISTING_STACK_PARAMS" "EnableDbReset")" + PREV_DB_TLS="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseTlsEnabled")" + PREV_DB_SSL_CA="$(stack_param_value "$EXISTING_STACK_PARAMS" "DatabaseSslCaPath")" + EXISTING_STACK_OUTPUTS="$(app_describe_outputs "$STACK_NAME" "$REGION")" + PREV_DATABASE_HOST_IN_USE="$(output_value "$EXISTING_STACK_OUTPUTS" "DatabaseHostInUse")" + if [[ -n "$PREV_EXISTING_DATABASE_HOST" ]]; then + PREV_STACK_USES_EXISTING_DB="true" + fi + if [[ -z "$PREV_EXISTING_DATABASE_HOST" && -n "$PREV_DATABASE_HOST_IN_USE" ]]; then + PREV_EXISTING_DATABASE_HOST="$PREV_DATABASE_HOST_IN_USE" + fi +fi + +echo +prompt_deploy_tasks_aws + +if [[ "$TASK_BOOTSTRAP" == "true" ]]; then + echo + echo "=== Bootstrap Stack ===" + if [[ -z "$BOOTSTRAP_OUTPUTS" ]]; then + echo "Bootstrap stack not found (or has no outputs): $BOOTSTRAP_STACK in $REGION" + if prompt_yes_no "Deploy bootstrap stack now?" "y"; then + GITHUB_REPO="$(prompt_default "GitHub repository (owner/repo)" "REPLACE_ME_OWNER/REPLACE_ME_REPO")" + CREATE_OIDC="$(prompt_default "Create OIDC provider (true/false)" "true")" + BUCKET_PREFIX="$(prompt_default "Deployment bucket prefix" "syncbot-deploy")" + echo "Deploying bootstrap stack..." + aws cloudformation deploy \ + --template-file "$BOOTSTRAP_TEMPLATE" \ + --stack-name "$BOOTSTRAP_STACK" \ + --parameter-overrides \ + "GitHubRepository=$GITHUB_REPO" \ + "CreateOIDCProvider=$CREATE_OIDC" \ + "DeploymentBucketPrefix=$BUCKET_PREFIX" \ + --capabilities CAPABILITY_NAMED_IAM \ + --region "$REGION" + BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" + else + echo "Skipping bootstrap. You must provide deploy bucket manually when deploying." + fi + fi + if [[ -n "$BOOTSTRAP_OUTPUTS" ]]; then + sync_bootstrap_stack_from_repo "$BOOTSTRAP_STACK" "$REGION" + BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" + fi +fi + +BOOTSTRAP_OUTPUTS="$(bootstrap_describe_outputs "$BOOTSTRAP_STACK" "$REGION")" +S3_BUCKET="$(output_value "$BOOTSTRAP_OUTPUTS" "DeploymentBucketName")" +if [[ -n "$S3_BUCKET" ]]; then + echo "Detected deploy bucket from bootstrap: $S3_BUCKET" +elif [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then + S3_BUCKET="$(prompt_default "Deployment S3 bucket name" "REPLACE_ME_DEPLOY_BUCKET")" +else + S3_BUCKET="" +fi + +if [[ "$TASK_BUILD_DEPLOY" != "true" ]]; then + if [[ "$TASK_CICD" == "true" || "$TASK_SLACK_API" == "true" || "$TASK_BACKUP_SECRETS" == "true" ]]; then + if [[ -z "${EXISTING_STACK_STATUS:-}" || "$EXISTING_STACK_STATUS" == "None" ]]; then + echo "Error: CloudFormation stack '$STACK_NAME' does not exist in $REGION. Select task 2 (Build/Deploy) first or create the stack." >&2 + exit 1 + fi + fi +fi + +if [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then +echo +echo "=== Configuration ===" +echo "=== Database Source ===" +# DB_MODE / GH_DB_MODE: 1 = stack-managed RDS in this template; 2 = external or existing RDS host. +DB_MODE_DEFAULT="1" +if [[ "$IS_STACK_UPDATE" == "true" ]]; then + if [[ "$PREV_STACK_USES_EXISTING_DB" == "true" ]]; then + EXISTING_DB_LABEL="$PREV_EXISTING_DATABASE_HOST" + [[ -z "$EXISTING_DB_LABEL" ]] && EXISTING_DB_LABEL="not set" + DB_MODE_DEFAULT="2" + echo " 1) Use stack-managed RDS" + echo " 2) Use external or existing RDS host: $EXISTING_DB_LABEL (default/current)" + else + DB_MODE_DEFAULT="1" + echo " 1) Use stack-managed RDS (default/current)" + echo " 2) Use external or existing RDS host" + fi +else + echo " 1) Use stack-managed RDS (default)" + echo " 2) Use external or existing RDS host" +fi +DB_MODE="$(prompt_default "Choose database source (1 or 2)" "$DB_MODE_DEFAULT")" +if [[ "$DB_MODE" != "1" && "$DB_MODE" != "2" ]]; then + echo "Error: invalid database mode." >&2 + exit 1 +fi +if [[ "$IS_STACK_UPDATE" == "true" && "$PREV_STACK_USES_EXISTING_DB" != "true" && "$DB_MODE" == "2" ]]; then + echo + echo "Warning: switching from stack-managed RDS to existing external DB will remove stack-managed RDS/VPC resources." + if ! prompt_yes_no "Continue with this destructive migration?" "n"; then + echo "Keeping stack-managed RDS mode for this deploy." + DB_MODE="1" + fi +fi + +DATABASE_ENGINE="mysql" +DB_ENGINE_DEFAULT="1" +if [[ "$IS_STACK_UPDATE" == "true" && "$PREV_DATABASE_ENGINE" == "postgresql" ]]; then + DATABASE_ENGINE="postgresql" + DB_ENGINE_DEFAULT="2" +fi +echo +echo "=== Database Engine ===" +if [[ "$DB_ENGINE_DEFAULT" == "2" ]]; then + echo " 1) MySQL" + echo " 2) PostgreSQL (default/current)" +else + echo " 1) MySQL (default/current)" + echo " 2) PostgreSQL" +fi +DB_ENGINE_MODE="$(prompt_default "Choose 1 or 2" "$DB_ENGINE_DEFAULT")" +if [[ "$DB_ENGINE_MODE" == "2" ]]; then + DATABASE_ENGINE="postgresql" +elif [[ "$DB_ENGINE_MODE" != "1" ]]; then + echo "Error: invalid database engine mode." >&2 + exit 1 +fi + +echo +echo "=== Slack App Credentials ===" +SLACK_SIGNING_SECRET_SOURCE="prompt" +[[ -n "${SLACK_SIGNING_SECRET:-}" ]] && SLACK_SIGNING_SECRET_SOURCE="env:SLACK_SIGNING_SECRET" +SLACK_CLIENT_SECRET_SOURCE="prompt" +[[ -n "${SLACK_CLIENT_SECRET:-}" ]] && SLACK_CLIENT_SECRET_SOURCE="env:SLACK_CLIENT_SECRET" +SLACK_SIGNING_SECRET="$(required_from_env_or_prompt "SLACK_SIGNING_SECRET" "SlackSigningSecret" "secret")" +SLACK_CLIENT_SECRET="$(required_from_env_or_prompt "SLACK_CLIENT_SECRET" "SlackClientSecret" "secret")" +SLACK_CLIENT_ID="$(required_from_env_or_prompt "SLACK_CLIENT_ID" "SlackClientID")" + +ENV_EXISTING_DATABASE_HOST="${EXISTING_DATABASE_HOST:-}" +ENV_EXISTING_DATABASE_ADMIN_USER="${EXISTING_DATABASE_ADMIN_USER:-}" +ENV_EXISTING_DATABASE_ADMIN_PASSWORD="${EXISTING_DATABASE_ADMIN_PASSWORD:-}" +EXISTING_DB_ADMIN_PASSWORD_SOURCE="prompt" +EXISTING_DATABASE_HOST="" +EXISTING_DATABASE_ADMIN_USER="" +EXISTING_DATABASE_ADMIN_PASSWORD="" +EXISTING_DATABASE_NETWORK_MODE="public" +EXISTING_DATABASE_SUBNET_IDS_CSV="" +EXISTING_DATABASE_LAMBDA_SG_ID="" +DATABASE_SCHEMA="" +DATABASE_SCHEMA_DEFAULT="syncbot_${STAGE}" +if [[ "$IS_STACK_UPDATE" == "true" && -n "$PREV_DATABASE_SCHEMA" ]]; then + DATABASE_SCHEMA_DEFAULT="$PREV_DATABASE_SCHEMA" +fi + +if [[ "$DB_MODE" == "2" ]]; then + echo + echo "=== Existing Database Host ===" + EXISTING_DATABASE_HOST_DEFAULT="REPLACE_ME_RDS_HOST" + [[ -n "$PREV_EXISTING_DATABASE_HOST" ]] && EXISTING_DATABASE_HOST_DEFAULT="$PREV_EXISTING_DATABASE_HOST" + EXISTING_DATABASE_ADMIN_USER_DEFAULT="admin" + [[ -n "$PREV_EXISTING_DATABASE_ADMIN_USER" ]] && EXISTING_DATABASE_ADMIN_USER_DEFAULT="$PREV_EXISTING_DATABASE_ADMIN_USER" + + if [[ -n "$ENV_EXISTING_DATABASE_HOST" ]]; then + echo "Using ExistingDatabaseHost from environment variable EXISTING_DATABASE_HOST." + EXISTING_DATABASE_HOST="$ENV_EXISTING_DATABASE_HOST" + else + EXISTING_DATABASE_HOST="$(prompt_default "ExistingDatabaseHost (RDS endpoint hostname)" "$EXISTING_DATABASE_HOST_DEFAULT")" + fi + + DETECTED_ADMIN_USER="" + DETECTED_ADMIN_SECRET_ARN="" + if [[ "$IS_STACK_UPDATE" == "true" ]]; then + RDS_ADMIN_LOOKUP="$(rds_lookup_admin_defaults "$EXISTING_DATABASE_HOST" "$REGION")" + if [[ -n "$RDS_ADMIN_LOOKUP" && "$RDS_ADMIN_LOOKUP" != "None" ]]; then + IFS=$'\t' read -r DETECTED_ADMIN_USER DETECTED_ADMIN_SECRET_ARN <<< "$RDS_ADMIN_LOOKUP" + [[ "$DETECTED_ADMIN_USER" == "None" ]] && DETECTED_ADMIN_USER="" + [[ "$DETECTED_ADMIN_SECRET_ARN" == "None" ]] && DETECTED_ADMIN_SECRET_ARN="" + fi + fi + + if [[ -z "$EXISTING_DATABASE_ADMIN_USER_DEFAULT" || "$EXISTING_DATABASE_ADMIN_USER_DEFAULT" == "admin" ]]; then + [[ -n "$DETECTED_ADMIN_USER" ]] && EXISTING_DATABASE_ADMIN_USER_DEFAULT="$DETECTED_ADMIN_USER" + fi + if [[ -n "$ENV_EXISTING_DATABASE_ADMIN_USER" ]]; then + echo "Using ExistingDatabaseAdminUser from environment variable EXISTING_DATABASE_ADMIN_USER." + EXISTING_DATABASE_ADMIN_USER="$ENV_EXISTING_DATABASE_ADMIN_USER" + else + EXISTING_DATABASE_ADMIN_USER="$(prompt_default "ExistingDatabaseAdminUser" "$EXISTING_DATABASE_ADMIN_USER_DEFAULT")" + fi + + if [[ -n "$ENV_EXISTING_DATABASE_ADMIN_PASSWORD" ]]; then + echo "Using ExistingDatabaseAdminPassword from environment variable EXISTING_DATABASE_ADMIN_PASSWORD." + EXISTING_DATABASE_ADMIN_PASSWORD="$ENV_EXISTING_DATABASE_ADMIN_PASSWORD" + EXISTING_DB_ADMIN_PASSWORD_SOURCE="env:EXISTING_DATABASE_ADMIN_PASSWORD" + else + if [[ "$IS_STACK_UPDATE" == "true" && -n "$DETECTED_ADMIN_SECRET_ARN" ]]; then + EXISTING_DATABASE_ADMIN_PASSWORD="$(secret_password_by_id "$DETECTED_ADMIN_SECRET_ARN" "$REGION" 2>/dev/null || true)" + if [[ -n "$EXISTING_DATABASE_ADMIN_PASSWORD" ]]; then + echo "Detected existing DB admin password from AWS Secrets Manager for re-deploy." + EXISTING_DB_ADMIN_PASSWORD_SOURCE="aws-secret:$DETECTED_ADMIN_SECRET_ARN" + fi + fi + if [[ -z "$EXISTING_DATABASE_ADMIN_PASSWORD" ]]; then + echo "Existing DB admin credentials couldn't be auto-detected. Please enter them manually." + EXISTING_DATABASE_ADMIN_PASSWORD="$(prompt_secret_required "ExistingDatabaseAdminPassword")" + EXISTING_DB_ADMIN_PASSWORD_SOURCE="prompt" + fi + fi + + DATABASE_SCHEMA="$(prompt_default "DatabaseSchema" "$DATABASE_SCHEMA_DEFAULT")" + + if [[ -z "$EXISTING_DATABASE_HOST" || "$EXISTING_DATABASE_HOST" == REPLACE_ME* ]]; then + echo "Error: valid ExistingDatabaseHost is required for existing DB mode." >&2 + exit 1 + fi + + RDS_LOOKUP="$(rds_lookup_network_defaults "$EXISTING_DATABASE_HOST" "$REGION")" + DETECTED_PUBLIC="" + DETECTED_SUBNETS="" + DETECTED_SGS="" + DETECTED_VPC="" + DETECTED_DB_ID="" + if [[ -n "$RDS_LOOKUP" && "$RDS_LOOKUP" != "None" ]]; then + IFS=$'\t' read -r DETECTED_PUBLIC DETECTED_SUBNETS DETECTED_SGS DETECTED_VPC DETECTED_DB_ID <<< "$RDS_LOOKUP" + [[ "$DETECTED_PUBLIC" == "None" ]] && DETECTED_PUBLIC="" + [[ "$DETECTED_SUBNETS" == "None" ]] && DETECTED_SUBNETS="" + [[ "$DETECTED_SGS" == "None" ]] && DETECTED_SGS="" + [[ "$DETECTED_VPC" == "None" ]] && DETECTED_VPC="" + [[ "$DETECTED_DB_ID" == "None" ]] && DETECTED_DB_ID="" + echo + echo "Detected RDS instance details:" + [[ -n "$DETECTED_DB_ID" ]] && echo " DB instance: $DETECTED_DB_ID" + [[ -n "$DETECTED_VPC" ]] && echo " VPC: $DETECTED_VPC" + [[ -n "$DETECTED_PUBLIC" ]] && echo " Public access: $DETECTED_PUBLIC" + else + echo + echo "Could not auto-detect existing RDS network settings from host." + echo "You can still continue by entering network values manually." + fi + + DEFAULT_EXISTING_DB_NETWORK_MODE="public" + if [[ -n "$PREV_EXISTING_DATABASE_NETWORK_MODE" ]]; then + DEFAULT_EXISTING_DB_NETWORK_MODE="$PREV_EXISTING_DATABASE_NETWORK_MODE" + fi + if [[ "$DETECTED_PUBLIC" == "False" ]]; then + DEFAULT_EXISTING_DB_NETWORK_MODE="private" + fi + EXISTING_DATABASE_NETWORK_MODE="$(prompt_default "Existing DB network mode (public/private)" "$DEFAULT_EXISTING_DB_NETWORK_MODE")" + if [[ "$EXISTING_DATABASE_NETWORK_MODE" != "public" && "$EXISTING_DATABASE_NETWORK_MODE" != "private" ]]; then + echo "Error: existing DB network mode must be 'public' or 'private'." >&2 + exit 1 + fi + + if [[ "$EXISTING_DATABASE_NETWORK_MODE" == "private" ]]; then + AUTO_PRIVATE_SUBNETS="" + if [[ -n "$DETECTED_VPC" ]]; then + AUTO_PRIVATE_SUBNETS="$(discover_private_lambda_subnets_for_db_vpc "$DETECTED_VPC" "$REGION")" + if [[ -n "$AUTO_PRIVATE_SUBNETS" ]]; then + echo "Detected private Lambda subnet candidates (NAT-routed): $AUTO_PRIVATE_SUBNETS" + fi + fi + + DEFAULT_SUBNETS="$AUTO_PRIVATE_SUBNETS" + [[ -z "$DEFAULT_SUBNETS" && -n "$PREV_EXISTING_DATABASE_SUBNET_IDS_CSV" ]] && DEFAULT_SUBNETS="$PREV_EXISTING_DATABASE_SUBNET_IDS_CSV" + [[ -z "$DEFAULT_SUBNETS" ]] && DEFAULT_SUBNETS="$DETECTED_SUBNETS" + [[ -z "$DEFAULT_SUBNETS" ]] && DEFAULT_SUBNETS="REPLACE_ME_SUBNET_1,REPLACE_ME_SUBNET_2" + DEFAULT_SG="${DETECTED_SGS%%,*}" + [[ -n "$PREV_EXISTING_DATABASE_LAMBDA_SG_ID" ]] && DEFAULT_SG="$PREV_EXISTING_DATABASE_LAMBDA_SG_ID" + [[ -z "$DEFAULT_SG" ]] && DEFAULT_SG="REPLACE_ME_LAMBDA_SG_ID" + + echo + echo "Private DB mode selected: Lambdas will run in VPC." + echo "Note: app Lambda needs Internet egress (usually NAT) to call Slack APIs." + EXISTING_DATABASE_SUBNET_IDS_CSV="$(prompt_default "ExistingDatabaseSubnetIdsCsv (comma-separated)" "$DEFAULT_SUBNETS")" + EXISTING_DATABASE_LAMBDA_SG_ID="$(prompt_default "ExistingDatabaseLambdaSecurityGroupId" "$DEFAULT_SG")" + + if [[ -z "$EXISTING_DATABASE_SUBNET_IDS_CSV" || "$EXISTING_DATABASE_SUBNET_IDS_CSV" == REPLACE_ME* ]]; then + echo "Error: valid ExistingDatabaseSubnetIdsCsv is required for private mode." >&2 + exit 1 + fi + if [[ -z "$EXISTING_DATABASE_LAMBDA_SG_ID" || "$EXISTING_DATABASE_LAMBDA_SG_ID" == REPLACE_ME* ]]; then + echo "Error: valid ExistingDatabaseLambdaSecurityGroupId is required for private mode." >&2 + exit 1 + fi + + echo + echo "Running private-connectivity preflight checks..." + if ! validate_private_existing_db_connectivity \ + "$REGION" \ + "$DATABASE_ENGINE" \ + "$EXISTING_DATABASE_SUBNET_IDS_CSV" \ + "$EXISTING_DATABASE_LAMBDA_SG_ID" \ + "$DETECTED_VPC" \ + "$DETECTED_SGS" \ + "$EXISTING_DATABASE_HOST"; then + echo "Fix network settings and rerun deploy." >&2 + exit 1 + fi + fi +else + echo + echo "=== New RDS Database ===" + echo "New RDS mode uses:" + echo " - admin user: syncbot_admin_${STAGE} (password auto-generated)" + echo " - app user: syncbot_user_${STAGE} (password auto-generated)" + DATABASE_SCHEMA="$(prompt_default "DatabaseSchema" "$DATABASE_SCHEMA_DEFAULT")" +fi + +TOKEN_OVERRIDE="$(prompt_default "TokenEncryptionKeyOverride (optional for disaster recovery; leave blank for normal deploy)" "")" +EXISTING_TOKEN_SECRET_ARN="" +TOKEN_SECRET_NAME="syncbot-${STAGE}-token-encryption-key" +APP_DB_SECRET_NAME="syncbot-${STAGE}-app-db-password" +if [[ -z "$TOKEN_OVERRIDE" ]]; then + DETECTED_TOKEN_SECRET_ARN="$(secret_arn_by_name "$TOKEN_SECRET_NAME" "$REGION")" + if [[ -n "$DETECTED_TOKEN_SECRET_ARN" && "$DETECTED_TOKEN_SECRET_ARN" != "None" ]]; then + echo "Detected existing token secret: $TOKEN_SECRET_NAME" + if prompt_yes_no "Reuse detected secret ARN for this deploy?" "y"; then + EXISTING_TOKEN_SECRET_ARN="$DETECTED_TOKEN_SECRET_ARN" + fi + fi +fi + +LOG_LEVEL_DEFAULT="INFO" +if [[ "$IS_STACK_UPDATE" == "true" && -n "$PREV_LOG_LEVEL" ]]; then + LOG_LEVEL_DEFAULT="$PREV_LOG_LEVEL" +fi + +REQUIRE_ADMIN="${PREV_REQUIRE_ADMIN:-true}" +SOFT_DELETE_RETENTION_DAYS="${PREV_SOFT_DELETE:-30}" +SYNCBOT_FEDERATION_ENABLED="${PREV_FEDERATION:-false}" +SYNCBOT_INSTANCE_ID="${PREV_INSTANCE_ID:-}" +SYNCBOT_PUBLIC_URL="${PREV_PUBLIC_URL:-}" +PRIMARY_WORKSPACE="${PREV_PRIMARY_WORKSPACE:-}" +ENABLE_DB_RESET="${PREV_ENABLE_DB_RESET:-}" +DATABASE_TLS_ENABLED="${PREV_DB_TLS:-}" +DATABASE_SSL_CA_PATH="${PREV_DB_SSL_CA:-}" + +echo +echo "=== Log Level ===" +LOG_LEVEL="$(prompt_log_level "$LOG_LEVEL_DEFAULT")" + +echo +echo "=== App Settings ===" +REQUIRE_ADMIN="$(prompt_require_admin "$REQUIRE_ADMIN")" +SOFT_DELETE_RETENTION_DAYS="$(prompt_soft_delete_retention_days "$SOFT_DELETE_RETENTION_DAYS")" +PRIMARY_WORKSPACE="$(prompt_primary_workspace "$PRIMARY_WORKSPACE")" +SYNCBOT_FEDERATION_ENABLED="$(prompt_federation_enabled "$SYNCBOT_FEDERATION_ENABLED")" +if [[ "$SYNCBOT_FEDERATION_ENABLED" == "true" ]]; then + SYNCBOT_INSTANCE_ID="$(prompt_instance_id "$SYNCBOT_INSTANCE_ID")" + SYNCBOT_PUBLIC_URL="$(prompt_public_url "$SYNCBOT_PUBLIC_URL")" +fi + +echo +echo "=== Deploy Summary ===" +echo "Region: $REGION" +echo "Stack: $STACK_NAME" +echo "Stage: $STAGE" +echo "Log level: $LOG_LEVEL" +echo "Require admin: $REQUIRE_ADMIN" +echo "Soft-delete days: $SOFT_DELETE_RETENTION_DAYS" +if [[ -n "$PRIMARY_WORKSPACE" ]]; then + echo "Primary workspace: $PRIMARY_WORKSPACE" +else + echo "Primary workspace: (not set — backup/restore hidden)" +fi +if [[ "$ENABLE_DB_RESET" == "true" ]]; then + echo "DB reset: enabled (PRIMARY_WORKSPACE must match)" +else + echo "DB reset: (disabled)" +fi +if [[ "$SYNCBOT_FEDERATION_ENABLED" == "true" ]]; then + echo "Federation: enabled" + [[ -n "$SYNCBOT_INSTANCE_ID" ]] && echo "Instance ID: $SYNCBOT_INSTANCE_ID" + [[ -n "$SYNCBOT_PUBLIC_URL" ]] && echo "Public URL: $SYNCBOT_PUBLIC_URL" +fi +echo "Deploy bucket: $S3_BUCKET" +if [[ "$DB_MODE" == "2" ]]; then + echo "DB mode: existing host" + echo "DB engine: $DATABASE_ENGINE" + echo "DB host: $EXISTING_DATABASE_HOST" + echo "DB network: $EXISTING_DATABASE_NETWORK_MODE" + if [[ "$EXISTING_DATABASE_NETWORK_MODE" == "private" ]]; then + echo "DB subnets: $EXISTING_DATABASE_SUBNET_IDS_CSV" + echo "Lambda SG: $EXISTING_DATABASE_LAMBDA_SG_ID" + fi + echo "DB schema: $DATABASE_SCHEMA" +else + echo "DB mode: create new RDS" + echo "DB engine: $DATABASE_ENGINE" + echo "DB admin user: syncbot_admin_${STAGE} (auto password)" + echo "DB app user: syncbot_user_${STAGE} (auto password)" + echo "DB schema: $DATABASE_SCHEMA" +fi +if [[ -n "$TOKEN_OVERRIDE" ]]; then + echo "DR key override: YES (TokenEncryptionKeyOverride)" +else + echo "DR key override: NO (auto-generated TOKEN_ENCRYPTION_KEY)" + if [[ -n "$EXISTING_TOKEN_SECRET_ARN" ]]; then + echo "Token secret: Reusing existing secret ARN" + fi +fi +if [[ -n "$APP_DB_PASSWORD_OVERRIDE" ]]; then + echo "App DB secret: Reusing prior app DB password value" +fi +echo + +if ! prompt_yes_no "Proceed with build + deploy?" "y"; then + echo "Aborted." + exit 0 +fi + +echo +echo "=== Preflight ===" +preflight_secrets_manager_access "$REGION" "$TOKEN_SECRET_NAME" "$APP_DB_SECRET_NAME" "$EXISTING_TOKEN_SECRET_ARN" + +handle_orphan_app_db_secret_on_create "$EXISTING_STACK_STATUS" "$APP_DB_SECRET_NAME" "$REGION" + +handle_unhealthy_stack_state "$STACK_NAME" "$REGION" + +echo +echo "=== SAM Build ===" +echo "Building app..." +sam build -t "$APP_TEMPLATE" --use-container + +PARAMS=( + "Stage=$STAGE" + "DatabaseEngine=$DATABASE_ENGINE" + "SlackSigningSecret=$SLACK_SIGNING_SECRET" + "SlackClientSecret=$SLACK_CLIENT_SECRET" + "DatabaseSchema=$DATABASE_SCHEMA" + "LogLevel=$LOG_LEVEL" + "RequireAdmin=$REQUIRE_ADMIN" + "SoftDeleteRetentionDays=$SOFT_DELETE_RETENTION_DAYS" + "SyncbotFederationEnabled=$SYNCBOT_FEDERATION_ENABLED" +) +# SAM rejects Key= (empty value) in shorthand format; only include when non-empty. +[[ -n "$SYNCBOT_INSTANCE_ID" ]] && PARAMS+=("SyncbotInstanceId=$SYNCBOT_INSTANCE_ID") +[[ -n "$SYNCBOT_PUBLIC_URL" ]] && PARAMS+=("SyncbotPublicUrl=$SYNCBOT_PUBLIC_URL") +[[ -n "$PRIMARY_WORKSPACE" ]] && PARAMS+=("PrimaryWorkspace=$PRIMARY_WORKSPACE") +[[ -n "$ENABLE_DB_RESET" ]] && PARAMS+=("EnableDbReset=$ENABLE_DB_RESET") +[[ -n "$DATABASE_TLS_ENABLED" ]] && PARAMS+=("DatabaseTlsEnabled=$DATABASE_TLS_ENABLED") +[[ -n "$DATABASE_SSL_CA_PATH" ]] && PARAMS+=("DatabaseSslCaPath=$DATABASE_SSL_CA_PATH") + +if [[ -n "$SLACK_CLIENT_ID" ]]; then + PARAMS+=("SlackClientID=$SLACK_CLIENT_ID") +fi + +if [[ "$DB_MODE" == "2" ]]; then + PARAMS+=( + "ExistingDatabaseHost=$EXISTING_DATABASE_HOST" + "ExistingDatabaseAdminUser=$EXISTING_DATABASE_ADMIN_USER" + "ExistingDatabaseAdminPassword=$EXISTING_DATABASE_ADMIN_PASSWORD" + "ExistingDatabaseNetworkMode=$EXISTING_DATABASE_NETWORK_MODE" + ) + if [[ "$EXISTING_DATABASE_NETWORK_MODE" == "private" ]]; then + PARAMS+=( + "ExistingDatabaseSubnetIdsCsv=$EXISTING_DATABASE_SUBNET_IDS_CSV" + "ExistingDatabaseLambdaSecurityGroupId=$EXISTING_DATABASE_LAMBDA_SG_ID" + ) + fi +else + # Clear existing-host parameters on updates to avoid stale previous values. + # SAM rejects Key= (empty value) in shorthand; use ParameterKey=K,ParameterValue= instead. + PARAMS+=( + "ParameterKey=ExistingDatabaseHost,ParameterValue=" + "ParameterKey=ExistingDatabaseAdminUser,ParameterValue=" + "ParameterKey=ExistingDatabaseAdminPassword,ParameterValue=" + "ExistingDatabaseNetworkMode=public" + "ParameterKey=ExistingDatabaseSubnetIdsCsv,ParameterValue=" + "ParameterKey=ExistingDatabaseLambdaSecurityGroupId,ParameterValue=" + ) +fi + +if [[ -n "$TOKEN_OVERRIDE" ]]; then + PARAMS+=("TokenEncryptionKeyOverride=$TOKEN_OVERRIDE") +fi +if [[ -n "$APP_DB_PASSWORD_OVERRIDE" ]]; then + PARAMS+=("AppDbPasswordOverride=$APP_DB_PASSWORD_OVERRIDE") +fi +if [[ -n "$EXISTING_TOKEN_SECRET_ARN" ]]; then + PARAMS+=("ExistingTokenEncryptionKeySecretArn=$EXISTING_TOKEN_SECRET_ARN") +fi + +echo "=== SAM Deploy ===" +echo "Deploying stack..." +sam deploy \ + -t .aws-sam/build/template.yaml \ + --stack-name "$STACK_NAME" \ + --s3-bucket "$S3_BUCKET" \ + --capabilities CAPABILITY_IAM \ + --region "$REGION" \ + --no-fail-on-empty-changeset \ + --parameter-overrides "${PARAMS[@]}" + +APP_OUTPUTS="$(app_describe_outputs "$STACK_NAME" "$REGION")" + +else + echo + echo "Skipping Build/Deploy (task 2 not selected)." + APP_OUTPUTS="${EXISTING_STACK_OUTPUTS:-}" + DB_MODE="1" + if [[ "$PREV_STACK_USES_EXISTING_DB" == "true" ]]; then + DB_MODE="2" + fi + DATABASE_SCHEMA="${PREV_DATABASE_SCHEMA:-}" + [[ -z "$DATABASE_SCHEMA" ]] && DATABASE_SCHEMA="syncbot_${STAGE}" + DATABASE_ENGINE="${PREV_DATABASE_ENGINE:-mysql}" + [[ -z "$DATABASE_ENGINE" ]] && DATABASE_ENGINE="mysql" + EXISTING_DATABASE_HOST="${PREV_EXISTING_DATABASE_HOST:-}" + EXISTING_DATABASE_ADMIN_USER="${PREV_EXISTING_DATABASE_ADMIN_USER:-}" + EXISTING_DATABASE_ADMIN_PASSWORD="${EXISTING_DATABASE_ADMIN_PASSWORD:-}" + EXISTING_DATABASE_NETWORK_MODE="${PREV_EXISTING_DATABASE_NETWORK_MODE:-public}" + EXISTING_DATABASE_SUBNET_IDS_CSV="${PREV_EXISTING_DATABASE_SUBNET_IDS_CSV:-}" + EXISTING_DATABASE_LAMBDA_SG_ID="${PREV_EXISTING_DATABASE_LAMBDA_SG_ID:-}" + SLACK_SIGNING_SECRET="${SLACK_SIGNING_SECRET:-}" + SLACK_CLIENT_SECRET="${SLACK_CLIENT_SECRET:-}" + SLACK_CLIENT_ID="${SLACK_CLIENT_ID:-}" + TOKEN_SECRET_NAME="syncbot-${STAGE}-token-encryption-key" + APP_DB_SECRET_NAME="syncbot-${STAGE}-app-db-password" + TOKEN_OVERRIDE="" + EXISTING_TOKEN_SECRET_ARN="" + RECEIPT_TOKEN_SECRET_ID="" + RECEIPT_APP_DB_SECRET_NAME="" + TOKEN_SECRET_ID="" + TOKEN_SECRET_VALUE="" + APP_DB_SECRET_VALUE="" +fi + +SYNCBOT_API_URL="$(output_value "$APP_OUTPUTS" "SyncBotApiUrl")" +SYNCBOT_INSTALL_URL="$(output_value "$APP_OUTPUTS" "SyncBotInstallUrl")" + +echo +echo "=== Post-Deploy ===" +if [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then + echo "Deploy complete." +fi + +if [[ "$TASK_SLACK_API" == "true" || "$TASK_BUILD_DEPLOY" == "true" ]]; then + generate_stage_slack_manifest "$STAGE" "$SYNCBOT_API_URL" "$SYNCBOT_INSTALL_URL" +fi + +if [[ "$TASK_SLACK_API" == "true" ]] && [[ -n "${SLACK_MANIFEST_GENERATED_PATH:-}" ]]; then + slack_api_configure_from_manifest "$SLACK_MANIFEST_GENERATED_PATH" "$SYNCBOT_INSTALL_URL" +fi + +if [[ "$TASK_CICD" == "true" ]]; then + configure_github_actions_aws \ + "$BOOTSTRAP_OUTPUTS" \ + "$BOOTSTRAP_STACK" \ + "$REGION" \ + "$STACK_NAME" \ + "$STAGE" \ + "$DATABASE_SCHEMA" \ + "$DB_MODE" \ + "$EXISTING_DATABASE_HOST" \ + "$EXISTING_DATABASE_ADMIN_USER" \ + "$EXISTING_DATABASE_ADMIN_PASSWORD" \ + "$EXISTING_DATABASE_NETWORK_MODE" \ + "$EXISTING_DATABASE_SUBNET_IDS_CSV" \ + "$EXISTING_DATABASE_LAMBDA_SG_ID" \ + "$DATABASE_ENGINE" +fi + +if [[ "$TASK_BUILD_DEPLOY" == "true" || "$TASK_BACKUP_SECRETS" == "true" ]]; then + # Prepare secret metadata/value so receipt and final backup output stay in sync. + if [[ -n "${TOKEN_OVERRIDE:-}" ]]; then + RECEIPT_TOKEN_SECRET_ID="TokenEncryptionKeyOverride" + TOKEN_SECRET_ID="TokenEncryptionKeyOverride" + TOKEN_SECRET_VALUE="$TOKEN_OVERRIDE" + else + TOKEN_SECRET_ID="${TOKEN_SECRET_NAME:-}" + if [[ -n "${EXISTING_TOKEN_SECRET_ARN:-}" ]]; then + TOKEN_SECRET_ID="$EXISTING_TOKEN_SECRET_ARN" + fi + TOKEN_SECRET_VALUE="$(secret_value_by_id "$TOKEN_SECRET_ID" "$REGION" 2>/dev/null || true)" + RECEIPT_TOKEN_SECRET_ID="$TOKEN_SECRET_ID" + fi + APP_DB_SECRET_VALUE="$(secret_value_by_id "$APP_DB_SECRET_NAME" "$REGION" 2>/dev/null || true)" + RECEIPT_APP_DB_SECRET_NAME="$APP_DB_SECRET_NAME" +fi + +if [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then + echo + echo "=== Deploy Receipt ===" + write_deploy_receipt \ + "aws" \ + "$STAGE" \ + "$STACK_NAME" \ + "$REGION" \ + "$SYNCBOT_API_URL" \ + "$SYNCBOT_INSTALL_URL" \ + "$SLACK_MANIFEST_GENERATED_PATH" +fi + +if [[ "$TASK_BACKUP_SECRETS" == "true" ]]; then + echo + echo "=== Backup Secrets (Disaster Recovery) ===" + # IMPORTANT: When Backup Secrets is selected, print plaintext backup secrets here. + # Do not remove/redact this section; operators rely on it for DR copy-out. + echo "Copy these values now and store them in your secure disaster-recovery vault." + + echo "- TOKEN_ENCRYPTION_KEY source: ${TOKEN_SECRET_ID:-}" + if [[ -n "${TOKEN_SECRET_VALUE:-}" && "$TOKEN_SECRET_VALUE" != "None" ]]; then + echo " TOKEN_ENCRYPTION_KEY: $TOKEN_SECRET_VALUE" + else + echo " TOKEN_ENCRYPTION_KEY: " + fi + + echo "- DATABASE_PASSWORD source: ${APP_DB_SECRET_NAME:-}" + if [[ -n "${APP_DB_SECRET_VALUE:-}" && "$APP_DB_SECRET_VALUE" != "None" ]]; then + echo " DATABASE_PASSWORD: $APP_DB_SECRET_VALUE" + else + echo " DATABASE_PASSWORD: " + fi +fi diff --git a/infra/aws/scripts/print-bootstrap-outputs.sh b/infra/aws/scripts/print-bootstrap-outputs.sh new file mode 100755 index 0000000..51f8176 --- /dev/null +++ b/infra/aws/scripts/print-bootstrap-outputs.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Print SyncBot AWS bootstrap stack outputs for GitHub variables or local config. +# Run from repo root: infra/aws/scripts/print-bootstrap-outputs.sh +# Optional env: BOOTSTRAP_STACK_NAME (default syncbot-bootstrap), AWS_REGION (default us-east-2). +# +# Flow: describe-stack (key/value) -> raw lines -> suggested GitHub variable names. + +set -euo pipefail + +STACK_NAME="${BOOTSTRAP_STACK_NAME:-syncbot-bootstrap}" +REGION="${AWS_REGION:-us-east-2}" + +echo "=== Bootstrap Stack Outputs ===" +echo "Bootstrap stack: $STACK_NAME (region: $REGION)" +echo "" + +outputs=$(aws cloudformation describe-stacks \ + --stack-name "$STACK_NAME" \ + --query 'Stacks[0].Outputs[*].[OutputKey,OutputValue]' \ + --output text \ + --region "$REGION" 2>/dev/null) || { + echo "Error: Could not describe stack '$STACK_NAME' in $REGION. Is the bootstrap stack deployed?" >&2 + exit 1 +} + +while read -r key value; do + echo "$key = $value" +done <<< "$outputs" + +echo "" +echo "=== Suggested GitHub Actions Variables ===" +echo "AWS_ROLE_TO_ASSUME = $(echo "$outputs" | awk -F'\t' '$1=="GitHubDeployRoleArn"{print $2}')" +echo "AWS_S3_BUCKET = $(echo "$outputs" | awk -F'\t' '$1=="DeploymentBucketName"{print $2}') (SAM/CI packaging for sam deploy — not Slack or app media)" +echo "AWS_REGION = $(echo "$outputs" | awk -F'\t' '$1=="BootstrapRegion"{print $2}')" +echo "" +echo "Next: deploy the app stack (sam deploy) and set the remaining GitHub vars/secrets." +echo "TOKEN_ENCRYPTION_KEY is created by the app stack on first deploy — back it up then (see docs/DEPLOYMENT.md)." diff --git a/infra/aws/template.bootstrap.yaml b/infra/aws/template.bootstrap.yaml new file mode 100644 index 0000000..5c19c82 --- /dev/null +++ b/infra/aws/template.bootstrap.yaml @@ -0,0 +1,279 @@ +# Bootstrap stack for SyncBot deployments. +# Deploy once with admin credentials; creates OIDC role, deploy bucket, and +# least-privilege policy for GitHub Actions and optional local deploy use. +# No SAM Transform — plain CloudFormation (IAM + S3). +AWSTemplateFormatVersion: "2010-09-09" +Description: > + SyncBot bootstrap: GitHub OIDC deploy role, deployment artifact bucket, + and least-privilege policy. Deploy once locally, then use outputs in GitHub + and for future local deploys. + +Parameters: + GitHubRepository: + Type: String + Description: > + GitHub repository in form owner/repo (e.g. myorg/syncbot). + Used to scope OIDC trust so only this repo can assume the deploy role. + CreateOIDCProvider: + Type: String + Default: "true" + AllowedValues: + - "true" + - "false" + Description: > + Set to false if an OIDC provider for token.actions.githubusercontent.com + already exists in this account (e.g. created by another stack). + DeploymentBucketPrefix: + Type: String + Default: "syncbot-deploy" + Description: > + Prefix for the deployment artifact bucket. The bucket resource name is + DeploymentBucketPrefix + account ID + region (see DeploymentBucket in Resources). + +Conditions: + CreateOIDC: !Equals [!Ref CreateOIDCProvider, "true"] + +Resources: + # ------------------------------------------------------------------------- + # GitHub OIDC identity provider (one per account) + # ------------------------------------------------------------------------- + GitHubOIDCProvider: + Type: AWS::IAM::OIDCProvider + Condition: CreateOIDC + Properties: + Url: https://token.actions.githubusercontent.com + ClientIdList: + - sts.amazonaws.com + ThumbprintList: + - 6938fd4d98bab03faadb97b34396831e3780aea1 + + # ------------------------------------------------------------------------- + # Deployment artifact bucket for SAM/CloudFormation packages + # ------------------------------------------------------------------------- + DeploymentBucket: + Type: AWS::S3::Bucket + Properties: + BucketName: !Sub "${DeploymentBucketPrefix}-${AWS::AccountId}-${AWS::Region}" + PublicAccessBlockConfiguration: + BlockPublicAcls: true + BlockPublicPolicy: true + IgnorePublicAcls: true + RestrictPublicBuckets: true + LifecycleConfiguration: + Rules: + - Id: ExpireOldArtifacts + Status: Enabled + ExpirationInDays: 30 + NoncurrentVersionExpiration: + NoncurrentDays: 7 + + # ------------------------------------------------------------------------- + # IAM role for GitHub Actions (and optional local assume-role) + # ------------------------------------------------------------------------- + GitHubDeployRole: + Type: AWS::IAM::Role + Properties: + RoleName: !Sub "syncbot-github-deploy-${AWS::Region}" + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + - Effect: Allow + Principal: + Federated: !If + - CreateOIDC + - !Sub "arn:aws:iam::${AWS::AccountId}:oidc-provider/token.actions.githubusercontent.com" + - !Sub "arn:aws:iam::${AWS::AccountId}:oidc-provider/token.actions.githubusercontent.com" + Action: sts:AssumeRoleWithWebIdentity + Condition: + StringEquals: + token.actions.githubusercontent.com:aud: sts.amazonaws.com + StringLike: + token.actions.githubusercontent.com:sub: !Sub "repo:${GitHubRepository}:*" + ManagedPolicyArns: + - !Ref DeployPolicy + + # ------------------------------------------------------------------------- + # Least-privilege policy for SAM deploy (CloudFormation, S3, IAM PassRole, etc.) + # ------------------------------------------------------------------------- + DeployPolicy: + Type: AWS::IAM::ManagedPolicy + Properties: + ManagedPolicyName: !Sub "syncbot-deploy-policy-${AWS::Region}" + PolicyDocument: + Version: "2012-10-17" + Statement: + - Sid: CloudFormation + Effect: Allow + Action: + - cloudformation:CreateStack + - cloudformation:UpdateStack + - cloudformation:DeleteStack + - cloudformation:CreateChangeSet + - cloudformation:ExecuteChangeSet + - cloudformation:DeleteChangeSet + - cloudformation:DescribeChangeSet + - cloudformation:ListChangeSets + - cloudformation:DescribeStacks + - cloudformation:DescribeStackEvents + - cloudformation:DescribeStackResources + - cloudformation:DescribeStackResource + - cloudformation:GetTemplate + - cloudformation:GetTemplateSummary + - cloudformation:ListStackResources + - cloudformation:ValidateTemplate + Resource: "*" + - Sid: S3DeployBucket + Effect: Allow + Action: + - s3:GetObject + - s3:PutObject + - s3:DeleteObject + - s3:ListBucket + - s3:GetBucketLocation + Resource: + - !GetAtt DeploymentBucket.Arn + - !Sub "${DeploymentBucket.Arn}/*" + - Sid: IAMCreateSyncBotRole + Effect: Allow + Action: + - iam:CreateRole + Resource: "*" + Condition: + StringLike: + iam:RoleName: syncbot-* + - Sid: IAMManageSyncBotRoles + Effect: Allow + Action: + - iam:PassRole + - iam:PutRolePolicy + - iam:AttachRolePolicy + - iam:GetRole + - iam:DeleteRole + - iam:DeleteRolePolicy + - iam:DetachRolePolicy + Resource: + - !Sub "arn:aws:iam::${AWS::AccountId}:role/syncbot-*" + - Sid: Lambda + Effect: Allow + Action: + - lambda:CreateFunction + - lambda:UpdateFunctionCode + - lambda:UpdateFunctionConfiguration + - lambda:GetFunction + - lambda:DeleteFunction + - lambda:AddPermission + - lambda:RemovePermission + - lambda:PublishVersion + Resource: !Sub "arn:aws:lambda:${AWS::Region}:${AWS::AccountId}:function:syncbot-*" + - Sid: ApiGateway + Effect: Allow + Action: + - apigateway:* + Resource: "*" + - Sid: Logs + Effect: Allow + Action: + - logs:CreateLogGroup + - logs:PutRetentionPolicy + - logs:DescribeLogGroups + - logs:DeleteLogGroup + Resource: !Sub "arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/syncbot-*" + - Sid: EC2Networking + Effect: Allow + Action: + - ec2:CreateVpc + - ec2:DeleteVpc + - ec2:DescribeVpcs + - ec2:CreateSubnet + - ec2:DeleteSubnet + - ec2:DescribeSubnets + - ec2:CreateInternetGateway + - ec2:DeleteInternetGateway + - ec2:AttachInternetGateway + - ec2:DetachInternetGateway + - ec2:CreateRouteTable + - ec2:DeleteRouteTable + - ec2:CreateRoute + - ec2:DeleteRoute + - ec2:AssociateRouteTable + - ec2:DisassociateRouteTable + - ec2:CreateSecurityGroup + - ec2:DeleteSecurityGroup + - ec2:DescribeSecurityGroups + - ec2:DescribeRouteTables + - ec2:DescribeInternetGateways + - ec2:DescribeVpcAttribute + - ec2:ModifyVpcAttribute + - ec2:CreateTags + - ec2:DeleteTags + - ec2:DescribeTags + Resource: "*" + - Sid: RDS + Effect: Allow + Action: + - rds:CreateDBInstance + - rds:DeleteDBInstance + - rds:ModifyDBInstance + - rds:DescribeDBInstances + - rds:CreateDBSubnetGroup + - rds:DeleteDBSubnetGroup + - rds:DescribeDBSubnetGroups + - rds:CreateDBParameterGroup + - rds:DeleteDBParameterGroup + - rds:DescribeDBParameters + - rds:AddTagsToResource + - rds:RemoveTagsFromResource + - rds:ListTagsForResource + Resource: "*" + - Sid: CloudWatchAlarms + Effect: Allow + Action: + - cloudwatch:PutMetricAlarm + - cloudwatch:DeleteAlarms + - cloudwatch:DescribeAlarms + Resource: "*" + - Sid: Events + Effect: Allow + Action: + - events:PutRule + - events:DeleteRule + - events:PutTargets + - events:RemoveTargets + - events:DescribeRule + - events:ListTargetsByRule + Resource: "*" + - Sid: SecretsManagerSyncBot + Effect: Allow + Action: + - secretsmanager:GetSecretValue + - secretsmanager:DescribeSecret + - secretsmanager:CreateSecret + - secretsmanager:UpdateSecret + - secretsmanager:DeleteSecret + - secretsmanager:TagResource + - secretsmanager:UntagResource + - secretsmanager:ListSecretVersionIds + Resource: !Sub "arn:aws:secretsmanager:${AWS::Region}:${AWS::AccountId}:secret:syncbot-*" + +Outputs: + GitHubDeployRoleArn: + Description: ARN of the role for GitHub Actions to assume (set as AWS_ROLE_TO_ASSUME). + Value: !GetAtt GitHubDeployRole.Arn + Export: + Name: !Sub "${AWS::StackName}-GitHubDeployRoleArn" + DeploymentBucketName: + Description: Name of the S3 bucket for SAM deploy artifacts (set as AWS_S3_BUCKET). + Value: !Ref DeploymentBucket + Export: + Name: !Sub "${AWS::StackName}-DeploymentBucketName" + BootstrapRegion: + Description: Region where bootstrap and app stacks are deployed. + Value: !Ref AWS::Region + Export: + Name: !Sub "${AWS::StackName}-BootstrapRegion" + SuggestedTestStackName: + Description: Suggested stack name for test environment. + Value: syncbot-test + SuggestedProdStackName: + Description: Suggested stack name for prod environment. + Value: syncbot-prod diff --git a/infra/aws/template.yaml b/infra/aws/template.yaml new file mode 100644 index 0000000..dc57999 --- /dev/null +++ b/infra/aws/template.yaml @@ -0,0 +1,899 @@ +AWSTemplateFormatVersion: "2010-09-09" +Transform: AWS::Serverless-2016-10-31 +Description: > + SyncBot - Slack app that syncs posts and replies across workspaces. + Free-tier compatible: Lambda, API Gateway, RDS PostgreSQL or MySQL (db.t4g.micro). + OAuth and app data use RDS; media is uploaded directly to Slack. + SAM deploy uses an S3 artifact bucket for packaging only (not runtime). + Template lives under infra/aws; CodeUri points at repo-root syncbot/ (includes db/alembic for migrations). + +Globals: + Function: + Timeout: 10 + MemorySize: 128 + Tracing: Active + Api: + TracingEnabled: true + MethodSettings: + - ResourcePath: "/*" + HttpMethod: "*" + ThrottlingBurstLimit: 20 + ThrottlingRateLimit: 10 + +# ================================================================ +# Parameters +# ================================================================ +# Grouping: Stage/engine → Slack (maps to SLACK_* env) → RDS / VPC → +# secrets/overrides (TOKEN_ENCRYPTION_KEY, DB password) → RequireAdmin. +# See each Description for the runtime env name where applicable. + +Parameters: + Stage: + Description: Deployment stage + Type: String + Default: test + AllowedValues: + - test + - prod + + DatabaseEngine: + Description: > + SQL engine for new or existing RDS database host. + Supported engines: mysql or postgresql. Default is mysql. + Type: String + Default: mysql + AllowedValues: + - postgresql + - mysql + + # --- Slack --- + + SlackSigningSecret: + Description: Slack signing secret for request verification (SLACK_SIGNING_SECRET) + Type: String + NoEcho: true + + SlackClientID: + Description: > + Slack OAuth app Client ID (SLACK_CLIENT_ID; Basic Information → App Credentials). + Required for your Slack app; use the ID from the app you created for this deploy. + Type: String + + SlackClientSecret: + Description: Slack OAuth client secret (SLACK_CLIENT_SECRET) + Type: String + NoEcho: true + + SlackOauthBotScopes: + Description: Comma-separated list of Slack OAuth bot scopes (SLACK_BOT_SCOPES) + Type: String + Default: "app_mentions:read,channels:history,channels:join,channels:read,channels:manage,chat:write,chat:write.customize,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email" + + SlackOauthUserScopes: + Description: > + Comma-separated Slack OAuth user scopes (SLACK_USER_SCOPES). Must match slack-manifest.json + oauth_config.scopes.user and syncbot/slack_manifest_scopes.py USER_SCOPES (same order). + Type: String + Default: "chat:write,channels:history,channels:read,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email" + + # --- Database (RDS) --- + + ExistingDatabaseHost: + Description: > + Endpoint of an existing RDS instance (e.g. mydb.xxxx.us-east-2.rds.amazonaws.com). + Leave EMPTY to create a new RDS instance. When set, all VPC and RDS + resources are skipped and the deploy creates the schema and app user for you. + Type: String + Default: "" + + ExistingDatabaseAdminUser: + Description: > + Database admin user that can create databases and users (e.g. RDS master). Used only when + ExistingDatabaseHost is set; the deploy creates a dedicated app user and schema. + Type: String + Default: "" + + ExistingDatabaseAdminPassword: + Description: Password for ExistingDatabaseAdminUser. Used only when using existing host. + Type: String + NoEcho: true + Default: "" + + ExistingDatabaseNetworkMode: + Description: > + Network mode for existing database host. Use "public" when the existing RDS + endpoint is reachable from the public Internet. Use "private" when it is only + reachable from within a VPC. + Type: String + Default: public + AllowedValues: + - public + - private + + ExistingDatabaseSubnetIdsCsv: + Description: > + Comma-separated subnet IDs for Lambda VPC attachment when using an existing + private database host (for example "subnet-aaa,subnet-bbb"). + Ignored unless ExistingDatabaseHost is set and ExistingDatabaseNetworkMode=private. + Type: String + Default: "" + + ExistingDatabaseLambdaSecurityGroupId: + Description: > + Security group ID for Lambda VPC attachment when using an existing private + database host. This security group must be allowed to connect to the DB (3306 MySQL, 5432 PostgreSQL). + Ignored unless ExistingDatabaseHost is set and ExistingDatabaseNetworkMode=private. + Type: String + Default: "" + + DatabaseSchema: + Description: > + Database/schema name for MySQL or PostgreSQL. Each app sharing an RDS instance + should use a different schema name. + Type: String + Default: "syncbot" + + DatabaseInstanceClass: + Description: "RDS instance class (db.t4g.micro is free-tier eligible). Ignored when using an existing database." + Type: String + Default: db.t4g.micro + AllowedValues: + - db.t4g.micro + + DatabaseBackupRetentionDays: + Description: > + Automated backup retention days for new RDS. Default 0 is the most + free-tier-friendly setting and avoids limits on free-plan accounts. + Increase only if your account plan supports automated backups. + Type: Number + Default: 0 + AllowedValues: + - 0 + - 1 + - 7 + + AllowedDBCidr: + Description: > + CIDR allowed to reach the database (e.g. your IP as x.x.x.x/32). + Ignored when using an existing database. + Type: String + Default: "0.0.0.0/0" + + VpcCidr: + Description: CIDR block for the VPC. Ignored when using an existing database. + Type: String + Default: "10.0.0.0/16" + + # --- Secrets and optional overrides (disaster recovery / recreate) --- + + TokenEncryptionKeyOverride: + Description: > + Optional disaster-recovery override for TOKEN_ENCRYPTION_KEY. + Use only when restoring an existing deployment with a known key. + Leave empty for normal deploys (auto-generated Secret Manager key is used). + Type: String + NoEcho: true + Default: "" + + ExistingTokenEncryptionKeySecretArn: + Description: > + Optional existing Secrets Manager secret ARN containing TOKEN_ENCRYPTION_KEY. + Use this when the secret already exists (for example after a failed create/delete cycle) + to avoid secret name collisions. + Type: String + Default: "" + + AppDbPasswordOverride: + Description: > + Optional app DB password override used only when recreating the app DB secret. + Leave empty for normal deploys to auto-generate. + Type: String + NoEcho: true + Default: "" + + RequireAdmin: + Description: > + When "true" (default), only workspace admins and owners can + configure SyncBot. Set to "false" to allow any user. + Type: String + Default: "true" + AllowedValues: + - "true" + - "false" + + LogLevel: + Description: > + Python logging level for the app (LOG_LEVEL). DEBUG, INFO, WARNING, ERROR, or CRITICAL. + Type: String + Default: INFO + AllowedValues: + - DEBUG + - INFO + - WARNING + - ERROR + - CRITICAL + + SoftDeleteRetentionDays: + Description: Days to retain soft-deleted workspace data (SOFT_DELETE_RETENTION_DAYS). + Type: Number + Default: 30 + MinValue: 1 + + SyncbotFederationEnabled: + Description: Set to "true" to enable external connections / federation (SYNCBOT_FEDERATION_ENABLED). + Type: String + Default: "false" + AllowedValues: + - "true" + - "false" + + SyncbotInstanceId: + Description: > + Optional stable UUID for this instance (SYNCBOT_INSTANCE_ID). Leave empty to auto-generate at runtime. + Type: String + Default: "" + + SyncbotPublicUrl: + Description: > + Public HTTPS base URL without path (SYNCBOT_PUBLIC_URL). Required when + federation is enabled (SyncbotFederationEnabled=true); use the API Gateway + stage URL from stack outputs or a custom domain. Leave empty otherwise. + Type: String + Default: "" + + PrimaryWorkspace: + Description: > + Slack Team ID for PRIMARY_WORKSPACE. Required for backup/restore to appear; also scopes DB reset. Leave empty to hide backup/restore. + Type: String + Default: "" + + EnableDbReset: + Description: > + Set to "true" to enable Reset Database when PRIMARY_WORKSPACE matches (ENABLE_DB_RESET). Leave empty to disable. + Legacy values (e.g. a Slack Team ID) are ignored by the app until updated to "true". + Type: String + Default: "" + + DatabaseTlsEnabled: + Description: > + Optional DATABASE_TLS_ENABLED. Empty = use app default (TLS on outside local dev). + Set "true" or "false" to override. + Type: String + Default: "" + AllowedValues: + - "" + - "true" + - "false" + + DatabaseSslCaPath: + Description: > + Optional CA bundle path when DB TLS is on (DATABASE_SSL_CA_PATH). Empty = app default. + Type: String + Default: "" + +# ================================================================ +# Conditions +# ================================================================ + +Conditions: + CreateDatabase: !Equals [!Ref ExistingDatabaseHost, ""] + UseExistingDatabase: !Not [!Equals [!Ref ExistingDatabaseHost, ""]] + IsMysqlEngine: !Equals [!Ref DatabaseEngine, mysql] + IsPostgresqlEngine: !Equals [!Ref DatabaseEngine, postgresql] + CreateDatabaseMysql: !And [!Condition CreateDatabase, !Condition IsMysqlEngine] + CreateDatabasePostgresql: !And [!Condition CreateDatabase, !Condition IsPostgresqlEngine] + UseAutomatedBackups: !Not [!Equals [!Ref DatabaseBackupRetentionDays, 0]] + UseExistingDatabasePrivateVpc: !And + - !Condition UseExistingDatabase + - !Equals [!Ref ExistingDatabaseNetworkMode, "private"] + HasTokenEncryptionKeyOverride: !Not [!Equals [!Ref TokenEncryptionKeyOverride, ""]] + HasExistingTokenEncryptionKeySecretArn: !Not [!Equals [!Ref ExistingTokenEncryptionKeySecretArn, ""]] + HasAppDbPasswordOverride: !Not [!Equals [!Ref AppDbPasswordOverride, ""]] + HasNoAppDbPasswordOverride: !Not [!Condition HasAppDbPasswordOverride] + HasSyncbotPublicUrlOverride: !Not [!Equals [!Ref SyncbotPublicUrl, ""]] + HasPrimaryWorkspace: !Not [!Equals [!Ref PrimaryWorkspace, ""]] + HasEnableDbReset: !Equals [!Ref EnableDbReset, "true"] + HasDatabaseTlsExplicit: !Not [!Equals [!Ref DatabaseTlsEnabled, ""]] + HasDatabaseSslCaPath: !Not [!Equals [!Ref DatabaseSslCaPath, ""]] + CreateTokenEncryptionKeySecret: !And + - !Not [!Condition HasTokenEncryptionKeyOverride] + - !Not [!Condition HasExistingTokenEncryptionKeySecretArn] +Mappings: + StagesMap: + test: + KeepWarmName: "SyncBotKeepWarmTest" + prod: + KeepWarmName: "SyncBotKeepWarmProd" + +Resources: + # ============================================================ + # Networking + # ============================================================ + + VPC: + Type: AWS::EC2::VPC + Condition: CreateDatabase + Properties: + CidrBlock: !Ref VpcCidr + EnableDnsHostnames: true + EnableDnsSupport: true + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-vpc" + + InternetGateway: + Type: AWS::EC2::InternetGateway + Condition: CreateDatabase + Properties: + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-igw" + + VPCGatewayAttachment: + Type: AWS::EC2::VPCGatewayAttachment + Condition: CreateDatabase + Properties: + VpcId: !Ref VPC + InternetGatewayId: !Ref InternetGateway + + PublicSubnet1: + Type: AWS::EC2::Subnet + Condition: CreateDatabase + Properties: + VpcId: !Ref VPC + CidrBlock: !Select [0, !Cidr [!Ref VpcCidr, 4, 8]] + AvailabilityZone: !Select [0, !GetAZs ""] + MapPublicIpOnLaunch: true + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-public-1" + + PublicSubnet2: + Type: AWS::EC2::Subnet + Condition: CreateDatabase + Properties: + VpcId: !Ref VPC + CidrBlock: !Select [1, !Cidr [!Ref VpcCidr, 4, 8]] + AvailabilityZone: !Select [1, !GetAZs ""] + MapPublicIpOnLaunch: true + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-public-2" + + PublicRouteTable: + Type: AWS::EC2::RouteTable + Condition: CreateDatabase + Properties: + VpcId: !Ref VPC + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-public-rt" + + PublicRoute: + Type: AWS::EC2::Route + Condition: CreateDatabase + DependsOn: VPCGatewayAttachment + Properties: + RouteTableId: !Ref PublicRouteTable + DestinationCidrBlock: "0.0.0.0/0" + GatewayId: !Ref InternetGateway + + PublicSubnet1RouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Condition: CreateDatabase + Properties: + SubnetId: !Ref PublicSubnet1 + RouteTableId: !Ref PublicRouteTable + + PublicSubnet2RouteTableAssociation: + Type: AWS::EC2::SubnetRouteTableAssociation + Condition: CreateDatabase + Properties: + SubnetId: !Ref PublicSubnet2 + RouteTableId: !Ref PublicRouteTable + + RDSSecurityGroup: + Type: AWS::EC2::SecurityGroup + Condition: CreateDatabase + Properties: + GroupDescription: Controls access to the SyncBot RDS instance + VpcId: !Ref VPC + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 3306 + ToPort: 3306 + CidrIp: !Ref AllowedDBCidr + Description: "MySQL (if DatabaseEngine=mysql)" + - IpProtocol: tcp + FromPort: 5432 + ToPort: 5432 + CidrIp: !Ref AllowedDBCidr + Description: "PostgreSQL (if DatabaseEngine=postgresql)" + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-rds-sg" + + # ============================================================ + # RDS MySQL / PostgreSQL + # ============================================================ + + RDSParameterGroupMysql: + Type: AWS::RDS::DBParameterGroup + Condition: CreateDatabaseMysql + Properties: + Family: mysql8.0 + Description: !Sub "SyncBot ${Stage} MySQL - SSL" + Parameters: + require_secure_transport: "1" + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-db-params-mysql" + + RDSParameterGroupPostgres: + Type: AWS::RDS::DBParameterGroup + Condition: CreateDatabasePostgresql + Properties: + Family: postgres16 + Description: !Sub "SyncBot ${Stage} PostgreSQL" + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-db-params-pg" + + DBSubnetGroup: + Type: AWS::RDS::DBSubnetGroup + Condition: CreateDatabase + Properties: + DBSubnetGroupDescription: Subnet group for SyncBot RDS + SubnetIds: + - !Ref PublicSubnet1 + - !Ref PublicSubnet2 + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-db-subnet-group" + + RDSInstanceMysql: + Type: AWS::RDS::DBInstance + Condition: CreateDatabaseMysql + DeletionPolicy: Snapshot + UpdateReplacePolicy: Snapshot + Properties: + DBInstanceIdentifier: !Sub "syncbot-${Stage}-mysql" + DBInstanceClass: !Ref DatabaseInstanceClass + Engine: mysql + # Minor version must match cfn-lint / RDS allowed list (major-only "8.0" fails E3691) + EngineVersion: "8.0.40" + MasterUsername: !Sub "syncbot_admin_${Stage}" + ManageMasterUserPassword: true + DBName: !Ref DatabaseSchema + AllocatedStorage: 20 + StorageType: gp2 + StorageEncrypted: true + PubliclyAccessible: true + MultiAZ: false + DBSubnetGroupName: !Ref DBSubnetGroup + DBParameterGroupName: !Ref RDSParameterGroupMysql + VPCSecurityGroups: + - !Ref RDSSecurityGroup + BackupRetentionPeriod: !Ref DatabaseBackupRetentionDays + PreferredBackupWindow: !If + - UseAutomatedBackups + - "03:00-04:00" + - !Ref AWS::NoValue + PreferredMaintenanceWindow: "sun:04:00-sun:05:00" + DeletionProtection: true + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-db-mysql" + + RDSInstancePostgres: + Type: AWS::RDS::DBInstance + Condition: CreateDatabasePostgresql + DeletionPolicy: Snapshot + UpdateReplacePolicy: Snapshot + Properties: + DBInstanceIdentifier: !Sub "syncbot-${Stage}-pg" + DBInstanceClass: !Ref DatabaseInstanceClass + Engine: postgres + EngineVersion: "16.6" + MasterUsername: !Sub "syncbot_admin_${Stage}" + ManageMasterUserPassword: true + DBName: !Ref DatabaseSchema + AllocatedStorage: 20 + StorageType: gp2 + StorageEncrypted: true + PubliclyAccessible: true + MultiAZ: false + DBSubnetGroupName: !Ref DBSubnetGroup + DBParameterGroupName: !Ref RDSParameterGroupPostgres + VPCSecurityGroups: + - !Ref RDSSecurityGroup + BackupRetentionPeriod: !Ref DatabaseBackupRetentionDays + PreferredBackupWindow: !If + - UseAutomatedBackups + - "03:00-04:00" + - !Ref AWS::NoValue + PreferredMaintenanceWindow: "sun:04:00-sun:05:00" + DeletionProtection: true + Tags: + - Key: Name + Value: !Sub "syncbot-${Stage}-db-pg" + + TokenEncryptionKeySecret: + Type: AWS::SecretsManager::Secret + Condition: CreateTokenEncryptionKeySecret + DeletionPolicy: Retain + UpdateReplacePolicy: Retain + Properties: + Name: !Sub "syncbot-${Stage}-token-encryption-key" + Description: !Sub "SyncBot ${Stage} token encryption key (backup required)" + GenerateSecretString: + PasswordLength: 48 + ExcludePunctuation: true + IncludeSpace: false + + # --- DB setup: generated app password and setup Lambda --- + AppDbCredentialsSecretGenerated: + Type: AWS::SecretsManager::Secret + Condition: HasNoAppDbPasswordOverride + Properties: + Name: !Sub "syncbot-${Stage}-app-db-password" + Description: !Sub "SyncBot ${Stage} app DB user password (created by stack)" + GenerateSecretString: + PasswordLength: 32 + ExcludePunctuation: true + IncludeSpace: false + + AppDbCredentialsSecretProvided: + Type: AWS::SecretsManager::Secret + Condition: HasAppDbPasswordOverride + Properties: + Name: !Sub "syncbot-${Stage}-app-db-password" + Description: !Sub "SyncBot ${Stage} app DB user password (provided override)" + SecretString: !Ref AppDbPasswordOverride + + DbSetupFunction: + Type: AWS::Serverless::Function + Metadata: + BuildMethod: makefile + cfn-lint: + config: + ignore_checks: + # Parameter ExistingDatabaseLambdaSecurityGroupId is a valid sg-* at deploy time + - W1030 + Properties: + CodeUri: db_setup/ + Handler: handler.handler + Runtime: python3.12 + Architectures: + - x86_64 + Timeout: 60 + MemorySize: 256 + Policies: + - AWSLambdaVPCAccessExecutionRole + - Version: "2012-10-17" + Statement: + - Effect: Allow + Action: secretsmanager:GetSecretValue + Resource: + - !If + - HasAppDbPasswordOverride + - !Ref AppDbCredentialsSecretProvided + - !Ref AppDbCredentialsSecretGenerated + - !If + - UseExistingDatabase + - !Ref AWS::NoValue + - !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.MasterUserSecret.SecretArn + - !GetAtt RDSInstancePostgres.MasterUserSecret.SecretArn + VpcConfig: !If + - UseExistingDatabasePrivateVpc + - SubnetIds: !Split [",", !Ref ExistingDatabaseSubnetIdsCsv] + SecurityGroupIds: + - !Ref ExistingDatabaseLambdaSecurityGroupId + - !Ref AWS::NoValue + + AppDbSetup: + Type: Custom::ExistingRDSSetup + Properties: + ServiceToken: !GetAtt DbSetupFunction.Arn + Host: !If + - UseExistingDatabase + - !Ref ExistingDatabaseHost + - !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.Endpoint.Address + - !GetAtt RDSInstancePostgres.Endpoint.Address + AdminUser: !If + - UseExistingDatabase + - !Ref ExistingDatabaseAdminUser + - !Sub "syncbot_admin_${Stage}" + AdminPassword: !If + - UseExistingDatabase + - !Ref ExistingDatabaseAdminPassword + - "" + AdminSecretArn: !If + - UseExistingDatabase + - "" + - !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.MasterUserSecret.SecretArn + - !GetAtt RDSInstancePostgres.MasterUserSecret.SecretArn + Schema: !Ref DatabaseSchema + Stage: !Ref Stage + SecretArn: !If + - HasAppDbPasswordOverride + - !Ref AppDbCredentialsSecretProvided + - !Ref AppDbCredentialsSecretGenerated + DatabaseEngine: !Ref DatabaseEngine + + # ============================================================ + # Lambda Function + # ============================================================ + + SyncBotFunction: + Type: AWS::Serverless::Function + Metadata: + cfn-lint: + config: + ignore_checks: + - W1030 + Properties: + CodeUri: ../../syncbot/ + Handler: app.handler + Runtime: python3.12 + Architectures: + - x86_64 + Timeout: 30 + MemorySize: 128 + Policies: + - AWSLambdaVPCAccessExecutionRole + VpcConfig: !If + - UseExistingDatabasePrivateVpc + - SubnetIds: !Split [",", !Ref ExistingDatabaseSubnetIdsCsv] + SecurityGroupIds: + - !Ref ExistingDatabaseLambdaSecurityGroupId + - !Ref AWS::NoValue + Events: + SyncBot: + Type: Api + Properties: + Path: /slack/events + Method: post + SyncBotInstall: + Type: Api + Properties: + Path: /slack/install + Method: get + SyncBotOAuthRedirect: + Type: Api + Properties: + Path: /slack/oauth_redirect + Method: get + SyncBotKeepWarm: + Type: ScheduleV2 + Properties: + ScheduleExpression: "rate(5 minutes)" + Name: !FindInMap + - StagesMap + - !Ref Stage + - KeepWarmName + Environment: + Variables: + SLACK_BOT_TOKEN: "123" + SLACK_SIGNING_SECRET: !Ref SlackSigningSecret + SLACK_CLIENT_SECRET: !Ref SlackClientSecret + SLACK_BOT_SCOPES: !Ref SlackOauthBotScopes + SLACK_USER_SCOPES: !Ref SlackOauthUserScopes + SLACK_CLIENT_ID: !Ref SlackClientID + DATABASE_BACKEND: !Ref DatabaseEngine + DATABASE_PORT: !If + - IsMysqlEngine + - "3306" + - "5432" + DATABASE_HOST: !If + - UseExistingDatabase + - !Ref ExistingDatabaseHost + - !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.Endpoint.Address + - !GetAtt RDSInstancePostgres.Endpoint.Address + DATABASE_USER: !GetAtt AppDbSetup.Username + DATABASE_PASSWORD: !Sub + - "{{resolve:secretsmanager:${SecretArn}:SecretString}}" + - SecretArn: !If + - HasAppDbPasswordOverride + - !Ref AppDbCredentialsSecretProvided + - !Ref AppDbCredentialsSecretGenerated + DATABASE_SCHEMA: !Ref DatabaseSchema + TOKEN_ENCRYPTION_KEY: !If + - HasTokenEncryptionKeyOverride + - !Ref TokenEncryptionKeyOverride + - !If + - HasExistingTokenEncryptionKeySecretArn + - !Sub + - "{{resolve:secretsmanager:${SecretArn}:SecretString}}" + - { SecretArn: !Ref ExistingTokenEncryptionKeySecretArn } + - !Sub "{{resolve:secretsmanager:${TokenEncryptionKeySecret}:SecretString}}" + REQUIRE_ADMIN: !Ref RequireAdmin + LOG_LEVEL: !Ref LogLevel + SOFT_DELETE_RETENTION_DAYS: !Sub "${SoftDeleteRetentionDays}" + SYNCBOT_FEDERATION_ENABLED: !Ref SyncbotFederationEnabled + SYNCBOT_INSTANCE_ID: !Ref SyncbotInstanceId + SYNCBOT_PUBLIC_URL: !Ref SyncbotPublicUrl + PRIMARY_WORKSPACE: !If + - HasPrimaryWorkspace + - !Ref PrimaryWorkspace + - !Ref AWS::NoValue + ENABLE_DB_RESET: !If + - HasEnableDbReset + - "true" + - !Ref AWS::NoValue + DATABASE_TLS_ENABLED: !If + - HasDatabaseTlsExplicit + - !Ref DatabaseTlsEnabled + - !Ref AWS::NoValue + DATABASE_SSL_CA_PATH: !If + - HasDatabaseSslCaPath + - !Ref DatabaseSslCaPath + - !Ref AWS::NoValue + + # Slack Bolt (aws_lambda adapter) runs lazy listeners by invoking this function again + # via lambda:InvokeFunction. The execution role must allow self-invoke. + SyncBotFunctionSelfInvokePolicy: + Type: AWS::IAM::Policy + Properties: + PolicyName: !Sub "syncbot-${Stage}-self-invoke" + Roles: + - !Ref SyncBotFunctionRole + PolicyDocument: + Version: "2012-10-17" + Statement: + - Sid: AllowSelfInvokeForBoltLazyListeners + Effect: Allow + Action: + - lambda:InvokeFunction + Resource: !GetAtt SyncBotFunction.Arn + + # ============================================================ + # CloudWatch Alarms + # ============================================================ + + LambdaErrorAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub "syncbot-${Stage}-lambda-errors" + AlarmDescription: > + Fires when the SyncBot Lambda function produces 3 or more + errors within a 5-minute window. + Namespace: AWS/Lambda + MetricName: Errors + Dimensions: + - Name: FunctionName + Value: !Ref SyncBotFunction + Statistic: Sum + Period: 300 + EvaluationPeriods: 1 + Threshold: 3 + ComparisonOperator: GreaterThanOrEqualToThreshold + TreatMissingData: notBreaching + + LambdaThrottleAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub "syncbot-${Stage}-lambda-throttles" + AlarmDescription: > + Fires when the SyncBot Lambda function is throttled. + Namespace: AWS/Lambda + MetricName: Throttles + Dimensions: + - Name: FunctionName + Value: !Ref SyncBotFunction + Statistic: Sum + Period: 300 + EvaluationPeriods: 1 + Threshold: 1 + ComparisonOperator: GreaterThanOrEqualToThreshold + TreatMissingData: notBreaching + + LambdaDurationAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub "syncbot-${Stage}-lambda-duration" + AlarmDescription: > + Fires when average Lambda duration exceeds 10 seconds. + Namespace: AWS/Lambda + MetricName: Duration + Dimensions: + - Name: FunctionName + Value: !Ref SyncBotFunction + Statistic: Average + Period: 300 + EvaluationPeriods: 2 + Threshold: 10000 + ComparisonOperator: GreaterThanOrEqualToThreshold + TreatMissingData: notBreaching + + ApiGateway5xxAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub "syncbot-${Stage}-api-5xx" + AlarmDescription: > + Fires when the API Gateway returns 5 or more 5xx errors + within a 5-minute window. + Namespace: AWS/ApiGateway + MetricName: 5XXError + Dimensions: + - Name: ApiName + Value: !Ref ServerlessRestApi + Statistic: Sum + Period: 300 + EvaluationPeriods: 1 + Threshold: 5 + ComparisonOperator: GreaterThanOrEqualToThreshold + TreatMissingData: notBreaching + +# ================================================================ +# Outputs +# ================================================================ + +Outputs: + SyncBotPublicBaseUrl: + Description: Public HTTPS base URL (SYNCBOT_PUBLIC_URL) for Slack and federation + Value: !If + - HasSyncbotPublicUrlOverride + - !Ref SyncbotPublicUrl + - !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod" + + SyncBotApiUrl: + Description: API Gateway endpoint URL + Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/slack/events/" + + SyncBotInstallUrl: + Description: Slack app installation URL + Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/slack/install" + + SyncBotFunctionArn: + Description: SyncBot Lambda function ARN + Value: !GetAtt SyncBotFunction.Arn + + DatabaseHostInUse: + Description: Database host the Lambda is configured to connect to + Value: !If + - UseExistingDatabase + - !Ref ExistingDatabaseHost + - !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.Endpoint.Address + - !GetAtt RDSInstancePostgres.Endpoint.Address + + RDSEndpoint: + Condition: CreateDatabase + Description: RDS endpoint (MySQL or PostgreSQL) when created by this stack + Value: !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.Endpoint.Address + - !GetAtt RDSInstancePostgres.Endpoint.Address + + RDSPort: + Condition: CreateDatabase + Description: RDS port when created by this stack + Value: !If + - IsMysqlEngine + - !GetAtt RDSInstanceMysql.Endpoint.Port + - !GetAtt RDSInstancePostgres.Endpoint.Port + + VpcId: + Condition: CreateDatabase + Description: VPC ID (only when VPC is created by this stack) + Value: !Ref VPC + + TokenEncryptionSecretArn: + Description: Secrets Manager ARN containing TOKEN_ENCRYPTION_KEY (empty when using TokenEncryptionKeyOverride only) + Value: !If + - HasExistingTokenEncryptionKeySecretArn + - !Ref ExistingTokenEncryptionKeySecretArn + - !If + - CreateTokenEncryptionKeySecret + - !Ref TokenEncryptionKeySecret + - "" diff --git a/infra/aws/tests/test_sam_template_validate.py b/infra/aws/tests/test_sam_template_validate.py new file mode 100644 index 0000000..e78c224 --- /dev/null +++ b/infra/aws/tests/test_sam_template_validate.py @@ -0,0 +1,37 @@ +"""Structural SAM validation for templates next to this package (``sam validate``). + +Requires the AWS SAM CLI on PATH; skipped when missing. +""" + +from __future__ import annotations + +import shutil +import subprocess +from pathlib import Path + +import pytest + +INFRA_AWS = Path(__file__).resolve().parent.parent + + +def _which(name: str) -> str | None: + return shutil.which(name) + + +@pytest.mark.parametrize( + "name", + ["template.yaml", "template.bootstrap.yaml"], +) +def test_sam_template_validates(name: str) -> None: + """Same class of checks as ``sam build``, without packaging.""" + sam = _which("sam") + if not sam: + pytest.skip("sam CLI not on PATH") + template = INFRA_AWS / name + assert template.is_file(), f"missing {template}" + proc = subprocess.run( + [sam, "validate", "-t", str(template), "--lint"], + capture_output=True, + text=True, + ) + assert proc.returncode == 0, f"sam validate failed:\n{proc.stdout}\n{proc.stderr}" diff --git a/infra/gcp/README.md b/infra/gcp/README.md new file mode 100644 index 0000000..0d37b14 --- /dev/null +++ b/infra/gcp/README.md @@ -0,0 +1,82 @@ +# SyncBot on GCP (Terraform) + +Minimal Terraform scaffold to run SyncBot on Google Cloud. Satisfies the [infrastructure contract](../../docs/INFRA_CONTRACT.md): Cloud Run (public HTTPS), Secret Manager, optional Cloud SQL, and optional Cloud Scheduler keep-warm. + +## Prerequisites + +- [Terraform](https://www.terraform.io/downloads) >= 1.0 +- [gcloud](https://cloud.google.com/sdk/docs/install) CLI, authenticated +- A GCP project with billing enabled + +## Quick start + +1. **Enable APIs and create secrets (one-time)** + Terraform will enable required APIs. Create Secret Manager secrets and set their values (or let Terraform create placeholder secrets and add versions manually): + + ```bash + cd infra/gcp + terraform init + terraform plan -var="project_id=YOUR_PROJECT_ID" -var="stage=test" + terraform apply -var="project_id=YOUR_PROJECT_ID" -var="stage=test" + ``` + +2. **Set secret values** + After the first apply, add secret versions for Slack and DB (if using existing DB). Use the secret IDs shown in Terraform (e.g. `syncbot-test-syncbot-slack-signing-secret`): + + ```bash + echo -n "YOUR_SLACK_SIGNING_SECRET" | gcloud secrets versions add syncbot-test-syncbot-slack-signing-secret --data-file=- + # Repeat for SLACK_CLIENT_ID, SLACK_CLIENT_SECRET, SLACK_BOT_SCOPES (comma-separated list must match oauth_config.scopes.bot / BOT_SCOPES), syncbot-db-password (if existing DB) + ``` + + `TOKEN_ENCRYPTION_KEY` is generated once automatically by Terraform and stored in Secret Manager. Back it up. If lost, existing workspaces must reinstall to re-authorize bot tokens. + For disaster recovery, restore with `-var='token_encryption_key_override='`. + +3. **Set the Cloud Run image** + By default the service uses a placeholder image. Build and push your SyncBot image to Artifact Registry, then: + + ```bash + terraform apply -var="project_id=YOUR_PROJECT_ID" -var="stage=test" \ + -var='cloud_run_image=REGION-docker.pkg.dev/PROJECT/syncbot-test-images/syncbot:latest' + ``` + +## Variables (summary) + +| Variable | Description | +|----------|-------------| +| `project_id` | GCP project ID (required) | +| `region` | Region for Cloud Run and optional Cloud SQL (default `us-central1`) | +| `stage` | Stage name, e.g. `test` or `prod` | +| `use_existing_database` | If `true`, use `existing_db_*` vars instead of creating Cloud SQL | +| `existing_db_host`, `existing_db_schema`, `existing_db_user` | Existing MySQL connection (when `use_existing_database = true`) | +| `cloud_run_image` | Container image URL for Cloud Run (set after first build) | +| `secret_slack_bot_scopes` | Secret Manager secret ID for **bot** OAuth scopes (runtime `SLACK_BOT_SCOPES`; default `syncbot-slack-scopes`). The **secret value** must match `oauth_config.scopes.bot` / `BOT_SCOPES` (same requirement as AWS SAM `SlackOauthBotScopes`). | +| `slack_user_scopes` | Plain-text **user** OAuth scopes for Cloud Run (`SLACK_USER_SCOPES`). Default matches repo standard (same comma-separated string as AWS SAM `SlackOauthUserScopes`); must match manifest `oauth_config.scopes.user` and `USER_SCOPES` in `slack_manifest_scopes.py`. | +| `log_level` | Python logging level for the app (`LOG_LEVEL`): `DEBUG`, `INFO`, `WARNING`, `ERROR`, or `CRITICAL` (default `INFO`). | +| `enable_keep_warm` | Create Cloud Scheduler job to ping the service (default `true`) | + +See [variables.tf](variables.tf) for all options. + +## Outputs (deploy contract) + +After `terraform apply`, outputs align with [docs/INFRA_CONTRACT.md](../../docs/INFRA_CONTRACT.md): + +- **service_url** — Public base URL (for Slack app configuration) +- **region** — Primary region +- **project_id** — GCP project ID +- **artifact_registry_repository** — Image registry URL (CI pushes here) +- **deploy_service_account_email** — Service account for CI (use with Workload Identity Federation) + +Use the [GCP bootstrap output script](scripts/print-bootstrap-outputs.sh) to print these as GitHub variable suggestions. + +## Keep-warm + +If `enable_keep_warm` is `true`, a Cloud Scheduler job pings the service at `/health` on the configured interval. The app implements `GET /health` (JSON `{"status":"ok"}`). + +## HTTP port + +Cloud Run sets the `PORT` environment variable (default `8080`). The container entrypoint (`python app.py`) listens on `PORT`, falling back to `3000` when unset (local Docker). + +## Security + +- The Cloud Run service is publicly invokable so Slack can reach it. For production, consider Cloud Armor or IAP. +- Deploy uses a dedicated service account; prefer [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation) for GitHub Actions instead of long-lived keys. diff --git a/infra/gcp/main.tf b/infra/gcp/main.tf new file mode 100644 index 0000000..8a34047 --- /dev/null +++ b/infra/gcp/main.tf @@ -0,0 +1,350 @@ +# SyncBot on GCP — minimal Terraform scaffold +# Satisfies docs/INFRA_CONTRACT.md (Cloud Run, secrets, optional Cloud SQL, keep-warm) + +terraform { + required_version = ">= 1.0" + required_providers { + google = { + source = "hashicorp/google" + version = "~> 5.0" + } + random = { + source = "hashicorp/random" + version = "~> 3.0" + } + } +} + +provider "google" { + project = var.project_id + region = var.region +} + +locals { + name_prefix = "syncbot-${var.stage}" + secret_ids = [ + var.secret_slack_signing_secret, + var.secret_slack_client_id, + var.secret_slack_client_secret, + var.secret_slack_bot_scopes, + var.secret_token_encryption_key, + var.secret_db_password, + ] + # Map deploy-contract env var names to Secret Manager secret variable keys (used in app_secrets) + env_to_secret_key = { + "SLACK_SIGNING_SECRET" = var.secret_slack_signing_secret + "SLACK_CLIENT_ID" = var.secret_slack_client_id + "SLACK_CLIENT_SECRET" = var.secret_slack_client_secret + "SLACK_BOT_SCOPES" = var.secret_slack_bot_scopes + "TOKEN_ENCRYPTION_KEY" = var.secret_token_encryption_key + "DATABASE_PASSWORD" = var.secret_db_password + } + # Runtime DB connection: existing host or Cloud SQL public IP after create + db_host = var.use_existing_database ? var.existing_db_host : ( + length(google_sql_database_instance.main) > 0 ? google_sql_database_instance.main[0].public_ip_address : "" + ) + db_schema = var.use_existing_database ? var.existing_db_schema : "syncbot" + db_user = var.use_existing_database ? var.existing_db_user : "syncbot_app" + + # Non-secret Cloud Run env (see docs/INFRA_CONTRACT.md) + syncbot_public_url_effective = trimspace(var.syncbot_public_url_override) != "" ? trimspace(var.syncbot_public_url_override) : "" + runtime_plain_env = merge( + { + DATABASE_HOST = local.db_host + DATABASE_USER = local.db_user + DATABASE_SCHEMA = local.db_schema + DATABASE_BACKEND = var.database_backend + DATABASE_PORT = var.database_port + SLACK_USER_SCOPES = var.slack_user_scopes + LOG_LEVEL = var.log_level + REQUIRE_ADMIN = var.require_admin + SLACK_BOT_TOKEN = "123" + SOFT_DELETE_RETENTION_DAYS = tostring(var.soft_delete_retention_days) + SYNCBOT_FEDERATION_ENABLED = var.syncbot_federation_enabled ? "true" : "false" + }, + var.syncbot_instance_id != "" ? { SYNCBOT_INSTANCE_ID = var.syncbot_instance_id } : {}, + local.syncbot_public_url_effective != "" ? { SYNCBOT_PUBLIC_URL = trimsuffix(local.syncbot_public_url_effective, "/") } : {}, + trimspace(var.primary_workspace) != "" ? { PRIMARY_WORKSPACE = var.primary_workspace } : {}, + trimspace(var.enable_db_reset) != "" ? { ENABLE_DB_RESET = var.enable_db_reset } : {}, + var.database_tls_enabled != "" ? { DATABASE_TLS_ENABLED = var.database_tls_enabled } : {}, + trimspace(var.database_ssl_ca_path) != "" ? { DATABASE_SSL_CA_PATH = var.database_ssl_ca_path } : {}, + ) +} + +# --------------------------------------------------------------------------- +# APIs +# --------------------------------------------------------------------------- + +resource "google_project_service" "run" { + project = var.project_id + service = "run.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "secretmanager" { + project = var.project_id + service = "secretmanager.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "sqladmin" { + count = var.use_existing_database ? 0 : 1 + project = var.project_id + service = "sqladmin.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "scheduler" { + count = var.enable_keep_warm ? 1 : 0 + project = var.project_id + service = "cloudscheduler.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "artifact_registry" { + project = var.project_id + service = "artifactregistry.googleapis.com" + disable_on_destroy = false +} + +# --------------------------------------------------------------------------- +# Secret Manager: placeholder secrets (values set via gcloud or console) +# --------------------------------------------------------------------------- + +resource "google_secret_manager_secret" "app_secrets" { + for_each = toset(local.secret_ids) + project = var.project_id + secret_id = "${local.name_prefix}-${each.key}" + + replication { + auto {} + } + + depends_on = [google_project_service.secretmanager] +} + +# --------------------------------------------------------------------------- +# Artifact Registry repository for container images (deploy contract: artifact_bucket equivalent) +# --------------------------------------------------------------------------- + +resource "google_artifact_registry_repository" "syncbot" { + location = var.region + repository_id = "${local.name_prefix}-images" + description = "SyncBot container images" + format = "DOCKER" + + depends_on = [google_project_service.artifact_registry] +} + +# --------------------------------------------------------------------------- +# Service account for Cloud Run (runtime) +# --------------------------------------------------------------------------- + +resource "google_service_account" "cloud_run" { + project = var.project_id + account_id = "${replace(local.name_prefix, "-", "")}-run" + display_name = "SyncBot Cloud Run runtime (${var.stage})" +} + +# Grant Cloud Run SA access to read the app secrets +resource "google_project_iam_member" "cloud_run_secret_access" { + for_each = toset(local.secret_ids) + project = var.project_id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${google_service_account.cloud_run.email}" +} + +# --------------------------------------------------------------------------- +# Deploy service account (CI / Workload Identity Federation) +# --------------------------------------------------------------------------- + +resource "google_service_account" "deploy" { + project = var.project_id + account_id = "${replace(local.name_prefix, "-", "")}-deploy" + display_name = "SyncBot deploy (CI) (${var.stage})" +} + +resource "google_project_iam_member" "deploy_run_admin" { + project = var.project_id + role = "roles/run.admin" + member = "serviceAccount:${google_service_account.deploy.email}" +} + +resource "google_project_iam_member" "deploy_sa_user" { + project = var.project_id + role = "roles/iam.serviceAccountUser" + member = "serviceAccount:${google_service_account.deploy.email}" +} + +resource "google_project_iam_member" "deploy_artifact_writer" { + project = var.project_id + role = "roles/artifactregistry.writer" + member = "serviceAccount:${google_service_account.deploy.email}" +} + +# --------------------------------------------------------------------------- +# Cloud SQL (optional): minimal MySQL instance +# --------------------------------------------------------------------------- + +resource "random_password" "db" { + count = var.use_existing_database ? 0 : 1 + length = 24 + special = false +} + +resource "random_password" "token_encryption_key" { + length = 48 + special = false +} + +resource "google_sql_database_instance" "main" { + count = var.use_existing_database ? 0 : 1 + project = var.project_id + name = "${local.name_prefix}-db" + database_version = "MYSQL_8_0" + region = var.region + + settings { + tier = "db-f1-micro" + availability_type = "ZONAL" + disk_size = 10 + disk_type = "PD_SSD" + + database_flags { + name = "cloudsql_iam_authentication" + value = "on" + } + + ip_configuration { + ipv4_enabled = true + private_network = null + } + } + + deletion_protection = false + + depends_on = [google_project_service.sqladmin] +} + +resource "google_sql_database" "schema" { + count = var.use_existing_database ? 0 : 1 + name = "syncbot" + instance = google_sql_database_instance.main[0].name +} + +resource "google_sql_user" "app" { + count = var.use_existing_database ? 0 : 1 + name = "syncbot_app" + instance = google_sql_database_instance.main[0].name + host = "%" + password = random_password.db[0].result +} + +# Store Cloud SQL password in Secret Manager for Cloud Run +resource "google_secret_manager_secret_version" "db_password" { + count = var.use_existing_database ? 0 : 1 + secret = google_secret_manager_secret.app_secrets[var.secret_db_password].id + secret_data = random_password.db[0].result +} + +# Generate TOKEN_ENCRYPTION_KEY once and persist in Secret Manager. +resource "google_secret_manager_secret_version" "token_encryption_key" { + secret = google_secret_manager_secret.app_secrets[var.secret_token_encryption_key].id + secret_data = var.token_encryption_key_override != "" ? var.token_encryption_key_override : random_password.token_encryption_key.result +} + +# --------------------------------------------------------------------------- +# Cloud Run service +# --------------------------------------------------------------------------- + +resource "google_cloud_run_v2_service" "syncbot" { + project = var.project_id + name = local.name_prefix + location = var.region + ingress = "INGRESS_TRAFFIC_ALL" + + template { + service_account = google_service_account.cloud_run.email + + # Lambda-like single request per container (free-tier friendly; matches app pool sizing). + max_instance_request_concurrency = 1 + + scaling { + min_instance_count = var.cloud_run_min_instances + max_instance_count = var.cloud_run_max_instances + } + + containers { + image = var.cloud_run_image + + resources { + limits = { + cpu = var.cloud_run_cpu + memory = var.cloud_run_memory + } + } + + dynamic "env" { + for_each = local.runtime_plain_env + content { + name = env.key + value = env.value + } + } + + dynamic "env" { + for_each = local.env_to_secret_key + content { + name = env.key + value_source { + secret_key_ref { + secret = google_secret_manager_secret.app_secrets[env.value].name + version = "latest" + } + } + } + } + } + } + + depends_on = [ + google_project_service.run, + google_secret_manager_secret.app_secrets, + ] +} + +# Allow unauthenticated invocations (Slack calls the URL; use IAP or Cloud Armor in prod if needed) +resource "google_cloud_run_v2_service_iam_member" "public" { + project = google_cloud_run_v2_service.syncbot.project + location = google_cloud_run_v2_service.syncbot.location + name = google_cloud_run_v2_service.syncbot.name + role = "roles/run.invoker" + member = "allUsers" +} + +# --------------------------------------------------------------------------- +# Cloud Scheduler (keep-warm) +# --------------------------------------------------------------------------- + +resource "google_cloud_scheduler_job" "keep_warm" { + count = var.enable_keep_warm ? 1 : 0 + project = var.project_id + name = "${local.name_prefix}-keep-warm" + region = var.region + schedule = "*/${var.keep_warm_interval_minutes} * * * *" + time_zone = "UTC" + attempt_deadline = "60s" + + http_target { + uri = "${google_cloud_run_v2_service.syncbot.uri}/health" + http_method = "GET" + oidc_token { + service_account_email = google_service_account.cloud_run.email + } + } + + depends_on = [ + google_project_service.scheduler, + google_cloud_run_v2_service.syncbot, + ] +} diff --git a/infra/gcp/outputs.tf b/infra/gcp/outputs.tf new file mode 100644 index 0000000..cb623cd --- /dev/null +++ b/infra/gcp/outputs.tf @@ -0,0 +1,49 @@ +# Outputs aligned with docs/INFRA_CONTRACT.md (bootstrap output contract) + +output "service_url" { + description = "Public base URL of the deployed app (for Slack app configuration)" + value = google_cloud_run_v2_service.syncbot.uri +} + +output "region" { + description = "Primary region for the deployment" + value = var.region +} + +output "project_id" { + description = "GCP project ID" + value = var.project_id +} + +# Deploy contract: artifact_bucket equivalent (registry for container images) +output "artifact_registry_repository" { + description = "Artifact Registry repository for container images (CI pushes here)" + value = "${var.region}-docker.pkg.dev/${var.project_id}/${google_artifact_registry_repository.syncbot.repository_id}" +} + +# Deploy contract: deploy_role equivalent (for Workload Identity Federation) +output "deploy_service_account_email" { + description = "Service account email for CI/deploy (use with WIF)" + value = google_service_account.deploy.email +} + +output "cloud_run_service_name" { + description = "Cloud Run service name (for deploy targeting)" + value = google_cloud_run_v2_service.syncbot.name +} + +output "cloud_run_service_location" { + description = "Cloud Run service location (region)" + value = google_cloud_run_v2_service.syncbot.location +} + +# Optional: DB connection info when Cloud SQL is created +output "database_connection_name" { + description = "Cloud SQL connection name (when not using existing DB)" + value = var.use_existing_database ? null : (length(google_sql_database_instance.main) > 0 ? google_sql_database_instance.main[0].connection_name : null) +} + +output "token_encryption_secret_name" { + description = "Secret Manager secret name containing TOKEN_ENCRYPTION_KEY" + value = google_secret_manager_secret.app_secrets[var.secret_token_encryption_key].name +} diff --git a/infra/gcp/scripts/deploy.sh b/infra/gcp/scripts/deploy.sh new file mode 100755 index 0000000..90af4ec --- /dev/null +++ b/infra/gcp/scripts/deploy.sh @@ -0,0 +1,860 @@ +#!/usr/bin/env bash +# Interactive GCP deploy helper (Terraform). Run from repo root: +# ./infra/gcp/scripts/deploy.sh +# Or via: ./deploy.sh gcp +# +# Phases (main path): +# 1) Prerequisites (terraform, gcloud, python3, curl) +# 2) Project, region, stage; detect existing Cloud Run service +# 3) Deploy Tasks: multi-select menu (build/deploy, CI/CD, Slack API, backup secrets) +# 4) Configuration (if build/deploy): database, image, log level, terraform init/plan/apply +# 5) Post-tasks: Slack manifest/API, deploy receipt, print-bootstrap-outputs, GitHub Actions, DR secrets +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +GCP_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +REPO_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +SLACK_MANIFEST_GENERATED_PATH="" + +# shellcheck source=/dev/null +source "$REPO_ROOT/deploy.sh" + +echo "=== Prerequisites ===" +prereqs_require_cmd terraform prereqs_hint_terraform +prereqs_require_cmd gcloud prereqs_hint_gcloud +prereqs_require_cmd python3 prereqs_hint_python3 +prereqs_require_cmd curl prereqs_hint_curl + +prereqs_print_cli_status_matrix "GCP" terraform gcloud python3 curl + +prompt_line() { + local p="$1" + local d="${2:-}" + local v + if [[ -n "$d" ]]; then + read -r -p "$p [$d]: " v + echo "${v:-$d}" + else + read -r -p "$p: " v + echo "$v" + fi +} + +prompt_secret() { + local p="$1" + local v + read -r -s -p "$p: " v + printf '\n' >&2 + echo "$v" +} + +prompt_required() { + local p="$1" + local v + while true; do + read -r -p "$p: " v + if [[ -n "$v" ]]; then + echo "$v" + return 0 + fi + echo "Error: $p is required." >&2 + done +} + +required_from_env_or_prompt() { + local env_name="$1" + local prompt="$2" + local mode="${3:-plain}" # plain|secret + local env_value="${!env_name:-}" + if [[ -n "$env_value" ]]; then + echo "Using $prompt from environment variable $env_name." >&2 + echo "$env_value" + return 0 + fi + if [[ "$mode" == "secret" ]]; then + while true; do + env_value="$(prompt_secret "$prompt")" + if [[ -n "$env_value" ]]; then + echo "$env_value" + return 0 + fi + echo "Error: $prompt is required." >&2 + done + fi + prompt_required "$prompt" +} + +prompt_yn() { + local p="$1" + local def="${2:-y}" + local a + local hint="y/N" + [[ "$def" == "y" ]] && hint="Y/n" + read -r -p "$p [$hint]: " a + if [[ -z "$a" ]]; then + a="$def" + fi + [[ "$a" =~ ^[Yy]$ ]] +} + +ensure_gcloud_authenticated() { + local active_account + active_account="$(gcloud auth list --filter=status:ACTIVE --format='value(account)' 2>/dev/null || true)" + if [[ -n "$active_account" ]]; then + return 0 + fi + echo "gcloud is not authenticated." + if prompt_yn "Run 'gcloud auth login' now?" "y"; then + gcloud auth login || true + fi + active_account="$(gcloud auth list --filter=status:ACTIVE --format='value(account)' 2>/dev/null || true)" + if [[ -z "$active_account" ]]; then + echo "Unable to authenticate gcloud. Run 'gcloud auth login' and rerun." + exit 1 + fi +} + +ensure_gcloud_adc_authenticated() { + if gcloud auth application-default print-access-token >/dev/null 2>&1; then + return 0 + fi + + echo "Application Default Credentials (ADC) are not configured." + if prompt_yn "Run 'gcloud auth application-default login' now?" "y"; then + gcloud auth application-default login || true + fi + + if ! gcloud auth application-default print-access-token >/dev/null 2>&1; then + echo "Unable to configure ADC. Run 'gcloud auth application-default login' and rerun." >&2 + exit 1 + fi +} + +ensure_gh_authenticated() { + if ! command -v gh >/dev/null 2>&1; then + prereqs_hint_gh_cli >&2 + return 1 + fi + if gh auth status >/dev/null 2>&1; then + return 0 + fi + echo "gh CLI is not authenticated." + if prompt_yn "Run 'gh auth login' now?" "y"; then + gh auth login || true + fi + if gh auth status >/dev/null 2>&1; then + return 0 + fi + echo "gh authentication is still missing. Skipping automatic GitHub setup." + return 1 +} + +cloud_sql_instance_exists() { + local project_id="$1" + local instance_name="$2" + gcloud sql instances describe "$instance_name" \ + --project "$project_id" \ + --format='value(name)' >/dev/null 2>&1 +} + +cloud_run_env_value() { + local project_id="$1" + local region="$2" + local service_name="$3" + local env_key="$4" + gcloud run services describe "$service_name" \ + --project "$project_id" \ + --region "$region" \ + --format=json 2>/dev/null | python3 - "$env_key" <<'PY' +import json +import sys + +env_key = sys.argv[1] +try: + data = json.load(sys.stdin) +except Exception: + print("") + raise SystemExit(0) + +containers = (data.get("spec", {}) or {}).get("template", {}).get("spec", {}).get("containers", []) +for c in containers: + for e in c.get("env", []) or []: + if e.get("name") == env_key: + print(e.get("value", "")) + raise SystemExit(0) +print("") +PY +} + +cloud_run_image_value() { + local project_id="$1" + local region="$2" + local service_name="$3" + gcloud run services describe "$service_name" \ + --project "$project_id" \ + --region "$region" \ + --format='value(spec.template.spec.containers[0].image)' 2>/dev/null || true +} + +secret_has_active_version() { + local project_id="$1" + local secret_name="$2" + local latest_state + latest_state="$(gcloud secrets versions describe latest \ + --project "$project_id" \ + --secret "$secret_name" \ + --format='value(state)' 2>/dev/null || true)" + [[ "$latest_state" == "ENABLED" ]] +} + +secret_latest_value() { + local project_id="$1" + local secret_name="$2" + gcloud secrets versions access latest \ + --project "$project_id" \ + --secret "$secret_name" 2>/dev/null || true +} + +cloud_run_secret_name() { + local project_id="$1" + local region="$2" + local service_name="$3" + local env_key="$4" + gcloud run services describe "$service_name" \ + --project "$project_id" \ + --region "$region" \ + --format=json 2>/dev/null | python3 - "$env_key" <<'PY' +import json +import sys + +env_key = sys.argv[1] +try: + data = json.load(sys.stdin) +except Exception: + print("") + raise SystemExit(0) + +containers = (data.get("spec", {}) or {}).get("template", {}).get("spec", {}).get("containers", []) +for c in containers: + for e in c.get("env", []) or []: + if e.get("name") != env_key: + continue + secret_ref = (((e.get("valueSource") or {}).get("secretKeyRef") or {}).get("secret")) or "" + if not secret_ref: + print("") + raise SystemExit(0) + # Accept either full resource names or plain secret IDs. + print(secret_ref.split("/secrets/")[-1]) + raise SystemExit(0) +print("") +PY +} + +preflight_existing_db_secret_readiness() { + local project_id="$1" + local stage="$2" + local db_secret_name="syncbot-${stage}-syncbot-db-password" + + echo + echo "=== Existing DB Secret Preflight ===" + echo "Verifying required Secret Manager value exists for DATABASE_PASSWORD..." + if ! secret_has_active_version "$project_id" "$db_secret_name"; then + echo "Missing active secret version for '$db_secret_name'." >&2 + echo "Create one before deploy, for example:" >&2 + echo " printf '%s' '' | gcloud secrets versions add '$db_secret_name' --project '$project_id' --data-file=-" >&2 + exit 1 + fi + echo "Secret preflight passed for: $db_secret_name" +} + +slack_manifest_json_compact() { + local manifest_file="$1" + python3 - "$manifest_file" <<'PY' +import json +import sys +path = sys.argv[1] +with open(path, "r", encoding="utf-8") as f: + data = json.load(f) +print(json.dumps(data, separators=(",", ":"))) +PY +} + +slack_api_configure_from_manifest() { + local manifest_file="$1" + local install_url="$2" + local token app_id team_id manifest_json api_resp ok + + echo + echo "=== Slack App API ===" + + token="$(required_from_env_or_prompt "SLACK_API_TOKEN" "Slack API token (required scopes: apps.manifest:write)" "secret")" + app_id="$(prompt_line "Slack App ID (optional; blank = create new app)" "${SLACK_APP_ID:-}")" + team_id="$(prompt_line "Slack Team ID (optional; usually blank)" "${SLACK_TEAM_ID:-}")" + + manifest_json="$(slack_manifest_json_compact "$manifest_file" 2>/dev/null || true)" + if [[ -z "$manifest_json" ]]; then + echo "Could not parse manifest JSON automatically." + echo "Ensure $manifest_file is valid JSON and Python 3 is installed." + return 0 + fi + + if [[ -n "$app_id" ]]; then + if [[ -n "$team_id" ]]; then + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "app_id=$app_id" \ + --data-urlencode "team_id=$team_id" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.update" || true)" + else + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "app_id=$app_id" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.update" || true)" + fi + ok="$(python3 - "$api_resp" <<'PY' +import json,sys +try: + data=json.loads(sys.argv[1]) +except Exception: + print("invalid-json") + sys.exit(0) +print("ok" if data.get("ok") else f"error:{data.get('error','unknown_error')}") +PY +)" + if [[ "$ok" == "ok" ]]; then + echo "Slack app manifest updated for App ID: $app_id" + echo "Open install URL: $install_url" + else + echo "Slack API update failed: ${ok#error:}" + echo "Response (truncated):" + slack_api_echo_truncated_body "$api_resp" + echo "Hint: check token scopes (apps.manifest:write), manifest JSON, and api.slack.com methods apps.manifest.update" + fi + return 0 + fi + + if [[ -n "$team_id" ]]; then + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "team_id=$team_id" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.create" || true)" + else + api_resp="$(curl -sS -X POST \ + -H "Authorization: Bearer $token" \ + --data-urlencode "manifest=$manifest_json" \ + "https://slack.com/api/apps.manifest.create" || true)" + fi + ok="$(python3 - "$api_resp" <<'PY' +import json,sys +try: + data=json.loads(sys.argv[1]) +except Exception: + print("invalid-json") + sys.exit(0) +if not data.get("ok"): + print(f"error:{data.get('error','unknown_error')}") + sys.exit(0) +app_id = data.get("app_id") or (data.get("app", {}) or {}).get("id") or "" +print(f"ok:{app_id}") +PY +)" + if [[ "$ok" == ok:* ]]; then + app_id="${ok#ok:}" + echo "Slack app created successfully." + [[ -n "$app_id" ]] && echo "New Slack App ID: $app_id" + echo "Open install URL: $install_url" + else + echo "Slack API create failed: ${ok#error:}" + echo "Response (truncated):" + slack_api_echo_truncated_body "$api_resp" + echo "Hint: check token scopes (apps.manifest:write), manifest JSON, and api.slack.com methods apps.manifest.create" + fi +} + +generate_stage_slack_manifest() { + local stage="$1" + local api_url="$2" + local install_url="$3" + local template="$REPO_ROOT/slack-manifest.json" + local manifest_out="$REPO_ROOT/slack-manifest_${stage}.json" + local events_url base_url oauth_redirect_url + + if [[ ! -f "$template" ]]; then + echo "Slack manifest template not found at $template" + return 0 + fi + if [[ -z "$api_url" ]]; then + echo "Could not determine API URL from service outputs. Skipping Slack manifest generation." + return 0 + fi + + events_url="${api_url%/}" + base_url="${events_url%/slack/events}" + oauth_redirect_url="${base_url}/slack/oauth_redirect" + + if ! python3 - "$template" "$manifest_out" "$events_url" "$oauth_redirect_url" <<'PY' +import json +import sys + +template_path, out_path, events_url, redirect_url = sys.argv[1:5] +with open(template_path, "r", encoding="utf-8") as f: + manifest = json.load(f) + +manifest.setdefault("oauth_config", {}).setdefault("redirect_urls", []) +manifest["oauth_config"]["redirect_urls"] = [redirect_url] +manifest.setdefault("settings", {}).setdefault("event_subscriptions", {}) +manifest["settings"]["event_subscriptions"]["request_url"] = events_url +manifest.setdefault("settings", {}).setdefault("interactivity", {}) +manifest["settings"]["interactivity"]["request_url"] = events_url + +with open(out_path, "w", encoding="utf-8") as f: + json.dump(manifest, f, indent=2) + f.write("\n") +PY + then + echo "Failed to generate stage Slack manifest from JSON template." + return 0 + fi + + SLACK_MANIFEST_GENERATED_PATH="$manifest_out" + + echo "=== Slack Manifest (${stage}) ===" + echo "Saved file: $manifest_out" + echo "Install URL: $install_url" + echo + sed 's/^/ /' "$manifest_out" +} + +write_deploy_receipt() { + local provider="$1" + local stage="$2" + local project_or_stack="$3" + local region="$4" + local service_url="$5" + local install_url="$6" + local manifest_path="$7" + local ts_human ts_file receipt_dir receipt_path + + ts_human="$(date -u +"%Y-%m-%d %H:%M:%S UTC")" + ts_file="$(date -u +"%Y%m%dT%H%M%SZ")" + receipt_dir="$REPO_ROOT/deploy-receipts" + receipt_path="$receipt_dir/deploy-${provider}-${stage}-${ts_file}.md" + + mkdir -p "$receipt_dir" + cat >"$receipt_path" </dev/null || true)" + artifact_registry_url="$(cd "$terraform_dir" && terraform output -raw artifact_registry_repository 2>/dev/null || true)" + service_url="$(cd "$terraform_dir" && terraform output -raw service_url 2>/dev/null || true)" + + echo + echo "=== GitHub Actions (GCP) ===" + echo "Detected project: $gcp_project_id" + echo "Detected region: $gcp_region" + echo "Detected service account: $deploy_sa_email" + echo "Detected artifact repo: $artifact_registry_url" + echo "Detected service URL: $service_url" + repo="$(prompt_github_repo_for_actions "$REPO_ROOT")" + + if ! ensure_gh_authenticated; then + echo + echo "Set these GitHub Actions Variables manually:" + echo " GCP_PROJECT_ID = $gcp_project_id" + echo " GCP_REGION = $gcp_region" + echo " GCP_SERVICE_ACCOUNT = $deploy_sa_email" + echo " DEPLOY_TARGET = gcp" + echo "Also set GCP_WORKLOAD_IDENTITY_PROVIDER for deploy-gcp.yml." + return 0 + fi + + if prompt_yn "Create/update GitHub environments 'test' and 'prod' now?" "y"; then + gh api -X PUT "repos/$repo/environments/test" >/dev/null + gh api -X PUT "repos/$repo/environments/prod" >/dev/null + echo "GitHub environments ensured: test, prod." + fi + + if prompt_yn "Set repo variables with gh now (GCP_PROJECT_ID, GCP_REGION, GCP_SERVICE_ACCOUNT, DEPLOY_TARGET=gcp)?" "y"; then + gh variable set GCP_PROJECT_ID --body "$gcp_project_id" -R "$repo" + gh variable set GCP_REGION --body "$gcp_region" -R "$repo" + [[ -n "$deploy_sa_email" ]] && gh variable set GCP_SERVICE_ACCOUNT --body "$deploy_sa_email" -R "$repo" + gh variable set DEPLOY_TARGET --body "gcp" -R "$repo" + echo "GitHub repository variables updated." + echo "Remember to set GCP_WORKLOAD_IDENTITY_PROVIDER." + fi + + if prompt_yn "Set environment variable STAGE_NAME for '$env_name' now?" "y"; then + gh variable set STAGE_NAME --env "$env_name" --body "$deploy_stage" -R "$repo" + echo "Environment variable STAGE_NAME updated for '$env_name'." + fi +} + +echo "=== SyncBot GCP Deploy ===" +echo "Working directory: $GCP_DIR" +echo + +echo "=== Project And Region ===" +PROJECT_ID="$(prompt_line "GCP project_id" "${GCP_PROJECT_ID:-}")" +if [[ -z "$PROJECT_ID" ]]; then + echo "Error: project_id is required." >&2 + exit 1 +fi + +REGION="$(prompt_line "GCP region" "${GCP_REGION:-us-central1}")" +echo +echo "=== Authentication ===" +ensure_gcloud_authenticated +ensure_gcloud_adc_authenticated +gcloud config set project "$PROJECT_ID" >/dev/null 2>&1 || true +STAGE="$(prompt_line "Stage (test/prod)" "${STAGE:-test}")" +if [[ "$STAGE" != "test" && "$STAGE" != "prod" ]]; then + echo "Error: stage must be 'test' or 'prod'." >&2 + exit 1 +fi +SERVICE_NAME="syncbot-${STAGE}" +EXISTING_SERVICE_URL="$(gcloud run services describe "$SERVICE_NAME" \ + --project "$PROJECT_ID" \ + --region "$REGION" \ + --format='value(status.url)' 2>/dev/null || true)" +if [[ -n "$EXISTING_SERVICE_URL" ]]; then + echo "Detected existing Cloud Run service: $SERVICE_NAME" + if ! prompt_yn "Continue and update this existing deployment?" "y"; then + echo "Aborted." + exit 0 + fi +fi + +echo +prompt_deploy_tasks_gcp + +if [[ "$TASK_BUILD_DEPLOY" != "true" ]]; then + if [[ "$TASK_CICD" == "true" || "$TASK_SLACK_API" == "true" || "$TASK_BACKUP_SECRETS" == "true" ]]; then + cd "$GCP_DIR" + if ! terraform output -raw service_url &>/dev/null; then + echo "Error: No Terraform outputs found in $GCP_DIR. Select task 1 (Build/Deploy) first." >&2 + exit 1 + fi + fi +fi + +if [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then +echo +echo "=== Configuration ===" +echo "=== Database Source ===" +# USE_EXISTING=true: point Terraform at an external DB only (use_existing_database); skip creating Cloud SQL. +# USE_EXISTING_DEFAULT: y/n default for the prompt when redeploying without a managed instance for this stage. +USE_EXISTING="false" +USE_EXISTING_DEFAULT="n" +DB_INSTANCE_NAME="${SERVICE_NAME}-db" +if [[ -n "$EXISTING_SERVICE_URL" ]]; then + if cloud_sql_instance_exists "$PROJECT_ID" "$DB_INSTANCE_NAME"; then + USE_EXISTING_DEFAULT="n" + echo "Detected managed Cloud SQL instance: $DB_INSTANCE_NAME" + else + USE_EXISTING_DEFAULT="y" + echo "No managed Cloud SQL instance found for stage; defaulting to existing DB mode." + fi +fi +if prompt_yn "Use existing database host (skip Cloud SQL creation)?" "$USE_EXISTING_DEFAULT"; then + USE_EXISTING="true" +fi + +EXISTING_HOST="" +EXISTING_SCHEMA="" +EXISTING_USER="" +DETECTED_EXISTING_HOST="" +DETECTED_EXISTING_SCHEMA="" +DETECTED_EXISTING_USER="" +if [[ -n "$EXISTING_SERVICE_URL" ]]; then + DETECTED_EXISTING_HOST="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_HOST")" + DETECTED_EXISTING_SCHEMA="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_SCHEMA")" + DETECTED_EXISTING_USER="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_USER")" +fi +if [[ "$USE_EXISTING" == "true" ]]; then + EXISTING_HOST="$(prompt_line "Existing DB host" "$DETECTED_EXISTING_HOST")" + EXISTING_SCHEMA="$(prompt_line "Database schema name" "${DETECTED_EXISTING_SCHEMA:-syncbot}")" + EXISTING_USER="$(prompt_line "Database user" "$DETECTED_EXISTING_USER")" + if [[ -z "$EXISTING_HOST" ]]; then + echo "Error: Existing DB host is required when using existing database mode." >&2 + exit 1 + fi + if [[ -z "$EXISTING_USER" ]]; then + echo "Error: Database user is required when using existing database mode." >&2 + exit 1 + fi +fi + +DETECTED_CLOUD_IMAGE="" +if [[ -n "$EXISTING_SERVICE_URL" ]]; then + DETECTED_CLOUD_IMAGE="$(cloud_run_image_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME")" +fi +echo +echo "=== Container Image ===" +CLOUD_IMAGE="$(prompt_line "cloud_run_image (required)" "$DETECTED_CLOUD_IMAGE")" +if [[ -z "$CLOUD_IMAGE" ]]; then + echo "Error: cloud_run_image is required. Build and push the SyncBot image first, then rerun." >&2 + exit 1 +fi + +DETECTED_LOG_LEVEL="" +if [[ -n "$EXISTING_SERVICE_URL" ]]; then + DETECTED_LOG_LEVEL="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "LOG_LEVEL")" +fi +LOG_LEVEL_DEFAULT="INFO" +if [[ -n "$DETECTED_LOG_LEVEL" ]]; then + LOG_LEVEL_DEFAULT="$(normalize_log_level "$DETECTED_LOG_LEVEL")" + if ! is_valid_log_level "$LOG_LEVEL_DEFAULT"; then + LOG_LEVEL_DEFAULT="INFO" + fi +fi + +echo +echo "=== Log Level ===" +LOG_LEVEL="$(prompt_log_level "$LOG_LEVEL_DEFAULT")" + +# Preserve optional runtime env on redeploy (Terraform defaults otherwise). +REQUIRE_ADMIN_DEFAULT="true" +SOFT_DELETE_DEFAULT="30" +SYNCBOT_PUBLIC_DEFAULT="" +SYNCBOT_FEDERATION_DEFAULT="false" +INSTANCE_ID_VAR="" +PRIMARY_WORKSPACE_VAR="" +ENABLE_DB_RESET_VAR="" +DB_TLS_VAR="" +DB_SSL_CA_VAR="" +DB_BACKEND="mysql" +DB_PORT="3306" +if [[ -n "$EXISTING_SERVICE_URL" ]]; then + DETECTED_RA="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "REQUIRE_ADMIN")" + [[ -n "$DETECTED_RA" ]] && REQUIRE_ADMIN_DEFAULT="$DETECTED_RA" + DETECTED_SD="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "SOFT_DELETE_RETENTION_DAYS")" + if [[ "$DETECTED_SD" =~ ^[0-9]+$ ]]; then + SOFT_DELETE_DEFAULT="$DETECTED_SD" + fi + SYNCBOT_PUBLIC_DEFAULT="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "SYNCBOT_PUBLIC_URL")" + DETECTED_FED="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "SYNCBOT_FEDERATION_ENABLED")" + if [[ "$DETECTED_FED" == "true" ]]; then + SYNCBOT_FEDERATION_DEFAULT="true" + elif [[ "$DETECTED_FED" == "false" ]]; then + SYNCBOT_FEDERATION_DEFAULT="false" + fi + DETECTED_INSTANCE_ID="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "SYNCBOT_INSTANCE_ID")" + INSTANCE_ID_VAR="${DETECTED_INSTANCE_ID:-}" + DETECTED_PW="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "PRIMARY_WORKSPACE")" + PRIMARY_WORKSPACE_VAR="${DETECTED_PW:-}" + DETECTED_ER="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "ENABLE_DB_RESET")" + ENABLE_DB_RESET_VAR="${DETECTED_ER:-}" + DETECTED_DB_TLS="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_TLS_ENABLED")" + DB_TLS_VAR="${DETECTED_DB_TLS:-}" + DETECTED_DB_SSL_CA="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_SSL_CA_PATH")" + DB_SSL_CA_VAR="${DETECTED_DB_SSL_CA:-}" + DETECTED_DB_BACKEND="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_BACKEND")" + [[ -n "$DETECTED_DB_BACKEND" ]] && DB_BACKEND="$DETECTED_DB_BACKEND" + DETECTED_DB_PORT="$(cloud_run_env_value "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_PORT")" + [[ -n "$DETECTED_DB_PORT" ]] && DB_PORT="$DETECTED_DB_PORT" +fi + +echo +echo "=== App Settings ===" +REQUIRE_ADMIN_DEFAULT="$(prompt_require_admin "$REQUIRE_ADMIN_DEFAULT")" +SOFT_DELETE_DEFAULT="$(prompt_soft_delete_retention_days "$SOFT_DELETE_DEFAULT")" +PRIMARY_WORKSPACE_VAR="$(prompt_primary_workspace "$PRIMARY_WORKSPACE_VAR")" +SYNCBOT_FEDERATION_DEFAULT="$(prompt_federation_enabled "$SYNCBOT_FEDERATION_DEFAULT")" +if [[ "$SYNCBOT_FEDERATION_DEFAULT" == "true" ]]; then + INSTANCE_ID_VAR="$(prompt_instance_id "$INSTANCE_ID_VAR")" + SYNCBOT_PUBLIC_DEFAULT="$(prompt_public_url "$SYNCBOT_PUBLIC_DEFAULT")" +fi + +echo +echo "=== Terraform Init ===" +echo "Running: terraform init" +cd "$GCP_DIR" +terraform init + +# TF_VAR_* avoids shell parsing issues when the URL contains & or other metacharacters. +export TF_VAR_syncbot_public_url_override="$SYNCBOT_PUBLIC_DEFAULT" + +VARS=( + "-var=project_id=$PROJECT_ID" + "-var=region=$REGION" + "-var=stage=$STAGE" + "-var=log_level=$LOG_LEVEL" + "-var=require_admin=$REQUIRE_ADMIN_DEFAULT" + "-var=soft_delete_retention_days=$SOFT_DELETE_DEFAULT" + "-var=syncbot_federation_enabled=$SYNCBOT_FEDERATION_DEFAULT" + "-var=syncbot_instance_id=${INSTANCE_ID_VAR:-}" + "-var=primary_workspace=${PRIMARY_WORKSPACE_VAR:-}" + "-var=enable_db_reset=${ENABLE_DB_RESET_VAR:-}" + "-var=database_tls_enabled=${DB_TLS_VAR:-}" + "-var=database_ssl_ca_path=${DB_SSL_CA_VAR:-}" + "-var=database_backend=${DB_BACKEND:-mysql}" + "-var=database_port=${DB_PORT:-3306}" +) + +if [[ "$USE_EXISTING" == "true" ]]; then + preflight_existing_db_secret_readiness "$PROJECT_ID" "$STAGE" + VARS+=("-var=use_existing_database=true") + VARS+=("-var=existing_db_host=$EXISTING_HOST") + VARS+=("-var=existing_db_schema=$EXISTING_SCHEMA") + VARS+=("-var=existing_db_user=$EXISTING_USER") +else + VARS+=("-var=use_existing_database=false") +fi + +VARS+=("-var=cloud_run_image=$CLOUD_IMAGE") + +echo +echo "Require admin: $REQUIRE_ADMIN_DEFAULT" +echo "Soft-delete days: $SOFT_DELETE_DEFAULT" +echo "Log level: $LOG_LEVEL" +if [[ -n "$PRIMARY_WORKSPACE_VAR" ]]; then + echo "Primary workspace: $PRIMARY_WORKSPACE_VAR" +else + echo "Primary workspace: (not set — backup/restore hidden)" +fi +if [[ "$ENABLE_DB_RESET_VAR" == "true" ]]; then + echo "DB reset: enabled" +else + echo "DB reset: (disabled)" +fi +if [[ "$SYNCBOT_FEDERATION_DEFAULT" == "true" ]]; then + echo "Federation: enabled" + [[ -n "$INSTANCE_ID_VAR" ]] && echo "Instance ID: $INSTANCE_ID_VAR" + [[ -n "$SYNCBOT_PUBLIC_DEFAULT" ]] && echo "Public URL: $SYNCBOT_PUBLIC_DEFAULT" +fi +echo +echo "=== Terraform Plan ===" +terraform plan "${VARS[@]}" + +echo +echo "=== Terraform Apply ===" +terraform apply -auto-approve "${VARS[@]}" + +echo +echo "=== Apply Complete ===" +SERVICE_URL="$(terraform output -raw service_url 2>/dev/null || true)" + +else + echo + echo "Skipping Build/Deploy (task 1 not selected)." + cd "$GCP_DIR" + SERVICE_URL="$(terraform output -raw service_url 2>/dev/null || true)" +fi + +SYNCBOT_API_URL="" +SYNCBOT_INSTALL_URL="" +if [[ -n "$SERVICE_URL" ]]; then + SYNCBOT_API_URL="${SERVICE_URL%/}/slack/events" + SYNCBOT_INSTALL_URL="${SERVICE_URL%/}/slack/install" +fi + +echo +echo "=== Post-Deploy ===" +if [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then + echo "Deploy complete." +fi + +if [[ "$TASK_SLACK_API" == "true" || "$TASK_BUILD_DEPLOY" == "true" ]]; then + generate_stage_slack_manifest "$STAGE" "$SYNCBOT_API_URL" "$SYNCBOT_INSTALL_URL" +fi + +if [[ "$TASK_SLACK_API" == "true" ]] && [[ -n "${SLACK_MANIFEST_GENERATED_PATH:-}" ]]; then + slack_api_configure_from_manifest "$SLACK_MANIFEST_GENERATED_PATH" "$SYNCBOT_INSTALL_URL" +fi + +if [[ "$TASK_BUILD_DEPLOY" == "true" ]]; then + echo + echo "=== Deploy Receipt ===" + write_deploy_receipt \ + "gcp" \ + "$STAGE" \ + "$PROJECT_ID" \ + "$REGION" \ + "$SERVICE_URL" \ + "$SYNCBOT_INSTALL_URL" \ + "$SLACK_MANIFEST_GENERATED_PATH" + + echo "Next:" + echo " 1) Set Secret Manager values for Slack (see infra/gcp/README.md)." + echo " 2) Build and push container image; update cloud_run_image and re-apply when image changes." + echo " 3) Run: ./infra/gcp/scripts/print-bootstrap-outputs.sh" + bash "$SCRIPT_DIR/print-bootstrap-outputs.sh" || true +fi + +if [[ "$TASK_CICD" == "true" ]]; then + configure_github_actions_gcp "$PROJECT_ID" "$REGION" "$GCP_DIR" "$STAGE" +fi + +TOKEN_SECRET_NAME="" +DB_SECRET_NAME="" +TOKEN_SECRET_VALUE="" +DB_SECRET_VALUE="" +if [[ "$TASK_BUILD_DEPLOY" == "true" || "$TASK_BACKUP_SECRETS" == "true" ]]; then + cd "$GCP_DIR" + TOKEN_SECRET_NAME="$(terraform output -raw token_encryption_secret_name 2>/dev/null || true)" + TOKEN_SECRET_NAME="${TOKEN_SECRET_NAME##*/secrets/}" + DB_SECRET_NAME="$(cloud_run_secret_name "$PROJECT_ID" "$REGION" "$SERVICE_NAME" "DATABASE_PASSWORD")" + if [[ -n "$TOKEN_SECRET_NAME" ]]; then + TOKEN_SECRET_VALUE="$(secret_latest_value "$PROJECT_ID" "$TOKEN_SECRET_NAME")" + fi + if [[ -n "$DB_SECRET_NAME" ]]; then + DB_SECRET_VALUE="$(secret_latest_value "$PROJECT_ID" "$DB_SECRET_NAME")" + fi +fi + +if [[ "$TASK_BACKUP_SECRETS" == "true" ]]; then + echo + echo "=== Backup Secrets (Disaster Recovery) ===" + # IMPORTANT: When Backup Secrets is selected, print plaintext backup secrets here. + # Do not remove/redact this section; operators rely on it for DR copy-out. + echo "Copy these values now and store them in your secure disaster-recovery vault." + if [[ -n "$TOKEN_SECRET_NAME" ]]; then + echo "- TOKEN_ENCRYPTION_KEY source: $TOKEN_SECRET_NAME" + else + echo "- TOKEN_ENCRYPTION_KEY source: " + fi + if [[ -n "$TOKEN_SECRET_VALUE" ]]; then + echo " TOKEN_ENCRYPTION_KEY: $TOKEN_SECRET_VALUE" + else + echo " TOKEN_ENCRYPTION_KEY: " + fi + if [[ -n "$DB_SECRET_NAME" ]]; then + echo "- DATABASE_PASSWORD source: $DB_SECRET_NAME" + else + echo "- DATABASE_PASSWORD source: " + fi + if [[ -n "$DB_SECRET_VALUE" ]]; then + echo " DATABASE_PASSWORD: $DB_SECRET_VALUE" + else + echo " DATABASE_PASSWORD: " + fi +fi diff --git a/infra/gcp/scripts/print-bootstrap-outputs.sh b/infra/gcp/scripts/print-bootstrap-outputs.sh new file mode 100755 index 0000000..6099532 --- /dev/null +++ b/infra/gcp/scripts/print-bootstrap-outputs.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# Print SyncBot GCP Terraform outputs for GitHub variables (WIF, deploy). +# Run from repo root: infra/gcp/scripts/print-bootstrap-outputs.sh +# Requires: terraform in PATH; run from repo root so infra/gcp is available. +# +# Flow: terraform output (full) -> suggested variable names for CI. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +GCP_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" + +if [[ ! -d "$GCP_DIR" ]] || [[ ! -f "$GCP_DIR/main.tf" ]]; then + echo "Error: infra/gcp not found (expected at $GCP_DIR). Run from repo root." >&2 + exit 1 +fi + +echo "=== Terraform Outputs (Infra/GCP) ===" +echo "" + +cd "$GCP_DIR" +if ! terraform output -json >/dev/null 2>&1; then + echo "Error: Terraform state not initialized or no outputs. Run 'terraform init' and 'terraform apply' in infra/gcp first." >&2 + exit 1 +fi + +terraform output + +echo "" +echo "=== Suggested GitHub Actions Variables ===" +echo "GCP_PROJECT_ID = $(terraform output -raw project_id 2>/dev/null || echo '')" +echo "GCP_REGION = $(terraform output -raw region 2>/dev/null || echo '')" +echo "GCP_SERVICE_ACCOUNT = $(terraform output -raw deploy_service_account_email 2>/dev/null || echo '')" +echo "Artifact Registry = $(terraform output -raw artifact_registry_repository 2>/dev/null || echo '')" +echo "Service URL = $(terraform output -raw service_url 2>/dev/null || echo '')" +echo "" +echo "For deploy-gcp.yml also set: GCP_WORKLOAD_IDENTITY_PROVIDER (after configuring WIF for GitHub)." +echo "" +echo "TOKEN_ENCRYPTION_KEY is generated by this Terraform apply and stored in Secret Manager." +echo "Back it up after apply (see docs/DEPLOYMENT.md). Secret: $(terraform output -raw token_encryption_secret_name 2>/dev/null || echo '')" diff --git a/infra/gcp/tests/test_terraform_validate.py b/infra/gcp/tests/test_terraform_validate.py new file mode 100644 index 0000000..515f01a --- /dev/null +++ b/infra/gcp/tests/test_terraform_validate.py @@ -0,0 +1,53 @@ +"""Terraform validation for the module next to this package. + +``terraform init -backend=false`` may need network access to download providers. +Uses ``TF_DATA_DIR`` in a temp directory so the repo tree is not modified. +""" + +from __future__ import annotations + +import os +import shutil +import subprocess +import tempfile +from pathlib import Path + +import pytest + +INFRA_GCP = Path(__file__).resolve().parent.parent + + +def _which(name: str) -> str | None: + return shutil.which(name) + + +def test_terraform_validates() -> None: + tf = _which("terraform") + if not tf: + pytest.skip("terraform not on PATH") + assert INFRA_GCP.is_dir() + with tempfile.TemporaryDirectory() as tmp: + env = dict(os.environ) + env["TF_DATA_DIR"] = tmp + init = subprocess.run( + [tf, "init", "-backend=false", "-input=false"], + cwd=INFRA_GCP, + capture_output=True, + text=True, + env=env, + timeout=180, + ) + if init.returncode != 0: + pytest.skip( + "terraform init failed (terraform missing or no network for providers?):\n" + f"{init.stdout}\n{init.stderr}" + ) + validate = subprocess.run( + [tf, "validate"], + cwd=INFRA_GCP, + capture_output=True, + text=True, + env=env, + timeout=60, + ) + assert validate.returncode == 0, f"terraform validate failed:\n{validate.stdout}\n{validate.stderr}" diff --git a/infra/gcp/variables.tf b/infra/gcp/variables.tf new file mode 100644 index 0000000..9237f90 --- /dev/null +++ b/infra/gcp/variables.tf @@ -0,0 +1,258 @@ +# GCP Terraform variables for SyncBot (see docs/INFRA_CONTRACT.md) +# +# Sections: project / region / stage → database mode → Cloud Run → keep-warm → +# Secret Manager IDs and scope envs → optional overrides. + +variable "project_id" { + type = string + description = "GCP project ID" +} + +variable "region" { + type = string + default = "us-central1" + description = "Primary region for Cloud Run and optional Cloud SQL" +} + +variable "stage" { + type = string + default = "test" + description = "Stage name (e.g. test, prod); used for resource naming" +} + +# --------------------------------------------------------------------------- +# Database: use existing or create Cloud SQL +# --------------------------------------------------------------------------- + +variable "use_existing_database" { + type = bool + default = false + description = "If true, do not create Cloud SQL; app uses existing_db_host/schema/user/password" +} + +variable "existing_db_host" { + type = string + default = "" + description = "Existing MySQL host (required when use_existing_database = true)" +} + +variable "existing_db_schema" { + type = string + default = "syncbot" + description = "Existing MySQL schema name (when use_existing_database = true)" +} + +variable "existing_db_user" { + type = string + default = "" + description = "Existing MySQL user (when use_existing_database = true)" +} + +# --------------------------------------------------------------------------- +# Cloud Run +# --------------------------------------------------------------------------- + +variable "cloud_run_image" { + type = string + default = "" + description = "Container image URL for Cloud Run (e.g. gcr.io/PROJECT/syncbot:latest). Set after first build or by CI." + + validation { + condition = trimspace(var.cloud_run_image) != "" + error_message = "cloud_run_image is required. Build/push the SyncBot image and pass -var=cloud_run_image=." + } +} + +variable "cloud_run_cpu" { + type = string + default = "1" + description = "CPU allocation for Cloud Run service" +} + +variable "cloud_run_memory" { + type = string + default = "512Mi" + description = "Memory allocation for Cloud Run service" +} + +variable "cloud_run_min_instances" { + type = number + default = 0 + description = "Minimum number of instances (0 allows scale-to-zero)" +} + +variable "cloud_run_max_instances" { + type = number + default = 10 + description = "Maximum number of Cloud Run instances" +} + +variable "log_level" { + type = string + default = "INFO" + description = "Python logging level for the app (LOG_LEVEL). DEBUG, INFO, WARNING, ERROR, or CRITICAL." + + validation { + condition = contains(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], var.log_level) + error_message = "log_level must be DEBUG, INFO, WARNING, ERROR, or CRITICAL." + } +} + +# --------------------------------------------------------------------------- +# Keep-warm (Cloud Scheduler) +# --------------------------------------------------------------------------- + +variable "enable_keep_warm" { + type = bool + default = true + description = "Create a Cloud Scheduler job that pings the service periodically" +} + +variable "keep_warm_interval_minutes" { + type = number + default = 5 + description = "Interval in minutes for keep-warm ping" +} + +# --------------------------------------------------------------------------- +# Secrets: names only; values are set outside Terraform (gcloud or console) +# --------------------------------------------------------------------------- + +variable "secret_slack_signing_secret" { + type = string + default = "syncbot-slack-signing-secret" + description = "Secret Manager secret ID for SLACK_SIGNING_SECRET" +} + +variable "secret_slack_client_id" { + type = string + default = "syncbot-slack-client-id" + description = "Secret Manager secret ID for SLACK_CLIENT_ID" +} + +variable "secret_slack_client_secret" { + type = string + default = "syncbot-slack-client-secret" + description = "Secret Manager secret ID for SLACK_CLIENT_SECRET" +} + +variable "secret_slack_bot_scopes" { + type = string + default = "syncbot-slack-scopes" + description = "Secret Manager secret ID whose value is comma-separated bot OAuth scopes (runtime env SLACK_BOT_SCOPES)" +} + +variable "slack_user_scopes" { + type = string + default = "chat:write,channels:history,channels:read,files:read,files:write,groups:history,groups:read,groups:write,im:write,reactions:read,reactions:write,team:read,users:read,users:read.email" + description = "Comma-separated user OAuth scopes for Cloud Run (SLACK_USER_SCOPES). Must match slack-manifest.json oauth_config.scopes.user and syncbot/slack_manifest_scopes.py USER_SCOPES; default matches repo standard (same string as AWS SAM SlackOauthUserScopes Default)." +} + +variable "secret_token_encryption_key" { + type = string + default = "syncbot-token-encryption-key" + description = "Secret Manager secret ID for TOKEN_ENCRYPTION_KEY" +} + +variable "token_encryption_key_override" { + type = string + default = "" + sensitive = true + description = "Optional disaster-recovery override for TOKEN_ENCRYPTION_KEY. Leave empty for normal deploys." +} + +variable "secret_db_password" { + type = string + default = "syncbot-db-password" + description = "Secret Manager secret ID for DATABASE_PASSWORD (used when use_existing_database = true or with Cloud SQL)" +} + +# --------------------------------------------------------------------------- +# Runtime plain env (Cloud Run) — parity with infra/aws/template.yaml +# --------------------------------------------------------------------------- + +variable "database_backend" { + type = string + default = "mysql" + description = "DATABASE_BACKEND; Cloud SQL in this stack is MySQL 8." + + validation { + condition = contains(["mysql", "postgresql"], var.database_backend) + error_message = "database_backend must be mysql or postgresql." + } +} + +variable "database_port" { + type = string + default = "3306" + description = "DATABASE_PORT for MySQL (default 3306)." +} + +variable "require_admin" { + type = string + default = "true" + description = "REQUIRE_ADMIN: true or false." + + validation { + condition = contains(["true", "false"], var.require_admin) + error_message = "require_admin must be true or false." + } +} + +variable "soft_delete_retention_days" { + type = number + default = 30 + description = "SOFT_DELETE_RETENTION_DAYS (minimum 1)." + + validation { + condition = var.soft_delete_retention_days >= 1 + error_message = "soft_delete_retention_days must be at least 1." + } +} + +variable "syncbot_federation_enabled" { + type = bool + default = false + description = "SYNCBOT_FEDERATION_ENABLED (maps to string true/false in env)." +} + +variable "syncbot_instance_id" { + type = string + default = "" + description = "SYNCBOT_INSTANCE_ID; leave empty for app auto-generation." +} + +variable "syncbot_public_url_override" { + type = string + default = "" + description = "SYNCBOT_PUBLIC_URL (HTTPS base, no path). Set after first deploy if using federation; empty omits the env var." +} + +variable "primary_workspace" { + type = string + default = "" + description = "PRIMARY_WORKSPACE Slack Team ID; required for backup/restore to appear. Empty omits the env var and hides backup/restore." +} + +variable "enable_db_reset" { + type = string + default = "" + description = "ENABLE_DB_RESET: set to \"true\" for Reset Database when PRIMARY_WORKSPACE matches; empty omits." +} + +variable "database_tls_enabled" { + type = string + default = "" + description = "DATABASE_TLS_ENABLED; empty = app default (TLS on outside local dev)." + + validation { + condition = contains(["", "true", "false"], var.database_tls_enabled) + error_message = "database_tls_enabled must be empty, true, or false." + } +} + +variable "database_ssl_ca_path" { + type = string + default = "" + description = "DATABASE_SSL_CA_PATH when TLS is on; empty omits (app default CA path)." +} diff --git a/poetry.lock b/poetry.lock index aca34c8..7ea3e3b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,254 +1,312 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] -name = "appnope" -version = "0.1.3" -description = "Disable App Nap on macOS >= 10.9" +name = "alembic" +version = "1.18.4" +description = "A database migration tool for SQLAlchemy." optional = false -python-versions = "*" +python-versions = ">=3.10" +groups = ["main"] files = [ - {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"}, - {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"}, -] - -[[package]] -name = "asttokens" -version = "2.4.0" -description = "Annotate AST trees with source code positions" -optional = false -python-versions = "*" -files = [ - {file = "asttokens-2.4.0-py2.py3-none-any.whl", hash = "sha256:cf8fc9e61a86461aa9fb161a14a0841a03c405fa829ac6b202670b3495d2ce69"}, - {file = "asttokens-2.4.0.tar.gz", hash = "sha256:2e0171b991b2c959acc6c49318049236844a5da1d65ba2672c4880c1c894834e"}, + {file = "alembic-1.18.4-py3-none-any.whl", hash = "sha256:a5ed4adcf6d8a4cb575f3d759f071b03cd6e5c7618eb796cb52497be25bfe19a"}, + {file = "alembic-1.18.4.tar.gz", hash = "sha256:cb6e1fd84b6174ab8dbb2329f86d631ba9559dd78df550b57804d607672cedbc"}, ] [package.dependencies] -six = ">=1.12.0" +Mako = "*" +SQLAlchemy = ">=1.4.23" +typing-extensions = ">=4.12" [package.extras] -test = ["astroid", "pytest"] - -[[package]] -name = "backcall" -version = "0.2.0" -description = "Specifications for callback functions passed in to an API" -optional = false -python-versions = "*" -files = [ - {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"}, - {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"}, -] +tz = ["tzdata"] [[package]] name = "boto3" -version = "1.28.60" +version = "1.42.76" description = "The AWS SDK for Python" optional = false -python-versions = ">= 3.7" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "boto3-1.28.60-py3-none-any.whl", hash = "sha256:d5f270c2c9a051f78c308cbba4268458e8df441057b73ba140742707ac1bc7ea"}, - {file = "boto3-1.28.60.tar.gz", hash = "sha256:dccb49cc10b31314b8553c6c9614c44b2249e0d0285d73f608a5d2010f6e1d82"}, + {file = "boto3-1.42.76-py3-none-any.whl", hash = "sha256:63c6779c814847016b89ae1b72ed968f8a63d80e589ba337511aa6fc1b59585e"}, + {file = "boto3-1.42.76.tar.gz", hash = "sha256:aa2b1973eee8973a9475d24bb579b1dee7176595338d4e4f7880b5c6189b8814"}, ] [package.dependencies] -botocore = ">=1.31.60,<1.32.0" +botocore = ">=1.42.76,<1.43.0" jmespath = ">=0.7.1,<2.0.0" -s3transfer = ">=0.7.0,<0.8.0" +s3transfer = ">=0.16.0,<0.17.0" [package.extras] crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.31.60" +version = "1.42.76" description = "Low-level, data-driven core of boto 3." optional = false -python-versions = ">= 3.7" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "botocore-1.31.60-py3-none-any.whl", hash = "sha256:b6de7a6a03ca3da18b78615a2cb5221c9fdb9483d3f50cb4281ae038b3f22d9f"}, - {file = "botocore-1.31.60.tar.gz", hash = "sha256:578470a15a5bd64f67437a81f23feccba85084167acf63c56acada2c1c1d95d8"}, + {file = "botocore-1.42.76-py3-none-any.whl", hash = "sha256:151e714ae3c32f68ea0b4dc60751401e03f84a87c6cf864ea0ee64aa10eb4607"}, + {file = "botocore-1.42.76.tar.gz", hash = "sha256:c553fa0ae29e36a5c407f74da78b78404b81b74b15fb62bf640a3cd9385f0874"}, ] [package.dependencies] jmespath = ">=0.7.1,<2.0.0" python-dateutil = ">=2.1,<3.0.0" -urllib3 = ">=1.25.4,<1.27" +urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""} [package.extras] -crt = ["awscrt (==0.16.26)"] +crt = ["awscrt (==0.31.2)"] [[package]] name = "certifi" -version = "2023.7.22" +version = "2026.2.25" description = "Python package for providing Mozilla's CA Bundle." optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +groups = ["main"] files = [ - {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, - {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, + {file = "certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa"}, + {file = "certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7"}, ] [[package]] name = "cffi" -version = "1.16.0" +version = "2.0.0" description = "Foreign Function Interface for Python calling C code." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["main"] +markers = "platform_python_implementation != \"PyPy\"" files = [ - {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, - {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, - {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, - {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, - {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, - {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, - {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, - {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, - {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, - {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, - {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, - {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, - {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, - {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, - {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, - {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, - {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, - {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, - {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, - {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, - {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, - {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, - {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, - {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, - {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, - {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, + {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"}, + {file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb"}, + {file = "cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a"}, + {file = "cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743"}, + {file = "cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5"}, + {file = "cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5"}, + {file = "cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187"}, + {file = "cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18"}, + {file = "cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5"}, + {file = "cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b"}, + {file = "cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27"}, + {file = "cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75"}, + {file = "cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1"}, + {file = "cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f"}, + {file = "cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25"}, + {file = "cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4"}, + {file = "cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e"}, + {file = "cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6"}, + {file = "cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322"}, + {file = "cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a"}, + {file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"}, + {file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"}, ] [package.dependencies] -pycparser = "*" +pycparser = {version = "*", markers = "implementation_name != \"PyPy\""} [[package]] name = "charset-normalizer" -version = "3.3.0" +version = "3.4.6" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.7" +groups = ["main"] files = [ - {file = "charset-normalizer-3.3.0.tar.gz", hash = "sha256:63563193aec44bce707e0c5ca64ff69fa72ed7cf34ce6e11d5127555756fd2f6"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:effe5406c9bd748a871dbcaf3ac69167c38d72db8c9baf3ff954c344f31c4cbe"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4162918ef3098851fcd8a628bf9b6a98d10c380725df9e04caf5ca6dd48c847a"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0570d21da019941634a531444364f2482e8db0b3425fcd5ac0c36565a64142c8"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5707a746c6083a3a74b46b3a631d78d129edab06195a92a8ece755aac25a3f3d"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:278c296c6f96fa686d74eb449ea1697f3c03dc28b75f873b65b5201806346a69"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a4b71f4d1765639372a3b32d2638197f5cd5221b19531f9245fcc9ee62d38f56"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5969baeaea61c97efa706b9b107dcba02784b1601c74ac84f2a532ea079403e"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3f93dab657839dfa61025056606600a11d0b696d79386f974e459a3fbc568ec"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:db756e48f9c5c607b5e33dd36b1d5872d0422e960145b08ab0ec7fd420e9d649"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:232ac332403e37e4a03d209a3f92ed9071f7d3dbda70e2a5e9cff1c4ba9f0678"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e5c1502d4ace69a179305abb3f0bb6141cbe4714bc9b31d427329a95acfc8bdd"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:2502dd2a736c879c0f0d3e2161e74d9907231e25d35794584b1ca5284e43f596"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23e8565ab7ff33218530bc817922fae827420f143479b753104ab801145b1d5b"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-win32.whl", hash = "sha256:1872d01ac8c618a8da634e232f24793883d6e456a66593135aeafe3784b0848d"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:557b21a44ceac6c6b9773bc65aa1b4cc3e248a5ad2f5b914b91579a32e22204d"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d7eff0f27edc5afa9e405f7165f85a6d782d308f3b6b9d96016c010597958e63"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6a685067d05e46641d5d1623d7c7fdf15a357546cbb2f71b0ebde91b175ffc3e"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d3d5b7db9ed8a2b11a774db2bbea7ba1884430a205dbd54a32d61d7c2a190fa"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2935ffc78db9645cb2086c2f8f4cfd23d9b73cc0dc80334bc30aac6f03f68f8c"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fe359b2e3a7729010060fbca442ca225280c16e923b37db0e955ac2a2b72a05"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:380c4bde80bce25c6e4f77b19386f5ec9db230df9f2f2ac1e5ad7af2caa70459"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0d1e3732768fecb052d90d62b220af62ead5748ac51ef61e7b32c266cac9293"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b2919306936ac6efb3aed1fbf81039f7087ddadb3160882a57ee2ff74fd2382"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f8888e31e3a85943743f8fc15e71536bda1c81d5aa36d014a3c0c44481d7db6e"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:82eb849f085624f6a607538ee7b83a6d8126df6d2f7d3b319cb837b289123078"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7b8b8bf1189b3ba9b8de5c8db4d541b406611a71a955bbbd7385bbc45fcb786c"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5adf257bd58c1b8632046bbe43ee38c04e1038e9d37de9c57a94d6bd6ce5da34"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c350354efb159b8767a6244c166f66e67506e06c8924ed74669b2c70bc8735b1"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-win32.whl", hash = "sha256:02af06682e3590ab952599fbadac535ede5d60d78848e555aa58d0c0abbde786"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:86d1f65ac145e2c9ed71d8ffb1905e9bba3a91ae29ba55b4c46ae6fc31d7c0d4"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:3b447982ad46348c02cb90d230b75ac34e9886273df3a93eec0539308a6296d7"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:abf0d9f45ea5fb95051c8bfe43cb40cda383772f7e5023a83cc481ca2604d74e"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b09719a17a2301178fac4470d54b1680b18a5048b481cb8890e1ef820cb80455"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3d9b48ee6e3967b7901c052b670c7dda6deb812c309439adaffdec55c6d7b78"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:edfe077ab09442d4ef3c52cb1f9dab89bff02f4524afc0acf2d46be17dc479f5"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3debd1150027933210c2fc321527c2299118aa929c2f5a0a80ab6953e3bd1908"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86f63face3a527284f7bb8a9d4f78988e3c06823f7bea2bd6f0e0e9298ca0403"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24817cb02cbef7cd499f7c9a2735286b4782bd47a5b3516a0e84c50eab44b98e"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c71f16da1ed8949774ef79f4a0260d28b83b3a50c6576f8f4f0288d109777989"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9cf3126b85822c4e53aa28c7ec9869b924d6fcfb76e77a45c44b83d91afd74f9"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:b3b2316b25644b23b54a6f6401074cebcecd1244c0b8e80111c9a3f1c8e83d65"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:03680bb39035fbcffe828eae9c3f8afc0428c91d38e7d61aa992ef7a59fb120e"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cc152c5dd831641e995764f9f0b6589519f6f5123258ccaca8c6d34572fefa8"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-win32.whl", hash = "sha256:b8f3307af845803fb0b060ab76cf6dd3a13adc15b6b451f54281d25911eb92df"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:8eaf82f0eccd1505cf39a45a6bd0a8cf1c70dcfc30dba338207a969d91b965c0"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dc45229747b67ffc441b3de2f3ae5e62877a282ea828a5bdb67883c4ee4a8810"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f4a0033ce9a76e391542c182f0d48d084855b5fcba5010f707c8e8c34663d77"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ada214c6fa40f8d800e575de6b91a40d0548139e5dc457d2ebb61470abf50186"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b1121de0e9d6e6ca08289583d7491e7fcb18a439305b34a30b20d8215922d43c"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1063da2c85b95f2d1a430f1c33b55c9c17ffaf5e612e10aeaad641c55a9e2b9d"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70f1d09c0d7748b73290b29219e854b3207aea922f839437870d8cc2168e31cc"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:250c9eb0f4600361dd80d46112213dff2286231d92d3e52af1e5a6083d10cad9"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:750b446b2ffce1739e8578576092179160f6d26bd5e23eb1789c4d64d5af7dc7"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:fc52b79d83a3fe3a360902d3f5d79073a993597d48114c29485e9431092905d8"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:588245972aca710b5b68802c8cad9edaa98589b1b42ad2b53accd6910dad3545"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e39c7eb31e3f5b1f88caff88bcff1b7f8334975b46f6ac6e9fc725d829bc35d4"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-win32.whl", hash = "sha256:abecce40dfebbfa6abf8e324e1860092eeca6f7375c8c4e655a8afb61af58f2c"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:24a91a981f185721542a0b7c92e9054b7ab4fea0508a795846bc5b0abf8118d4"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:67b8cc9574bb518ec76dc8e705d4c39ae78bb96237cb533edac149352c1f39fe"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac71b2977fb90c35d41c9453116e283fac47bb9096ad917b8819ca8b943abecd"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3ae38d325b512f63f8da31f826e6cb6c367336f95e418137286ba362925c877e"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:542da1178c1c6af8873e143910e2269add130a299c9106eef2594e15dae5e482"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30a85aed0b864ac88309b7d94be09f6046c834ef60762a8833b660139cfbad13"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aae32c93e0f64469f74ccc730a7cb21c7610af3a775157e50bbd38f816536b38"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b26ddf78d57f1d143bdf32e820fd8935d36abe8a25eb9ec0b5a71c82eb3895"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f5d10bae5d78e4551b7be7a9b29643a95aded9d0f602aa2ba584f0388e7a557"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:249c6470a2b60935bafd1d1d13cd613f8cd8388d53461c67397ee6a0f5dce741"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c5a74c359b2d47d26cdbbc7845e9662d6b08a1e915eb015d044729e92e7050b7"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b5bcf60a228acae568e9911f410f9d9e0d43197d030ae5799e20dca8df588287"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:187d18082694a29005ba2944c882344b6748d5be69e3a89bf3cc9d878e548d5a"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:81bf654678e575403736b85ba3a7867e31c2c30a69bc57fe88e3ace52fb17b89"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-win32.whl", hash = "sha256:85a32721ddde63c9df9ebb0d2045b9691d9750cb139c161c80e500d210f5e26e"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:468d2a840567b13a590e67dd276c570f8de00ed767ecc611994c301d0f8c014f"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e0fc42822278451bc13a2e8626cf2218ba570f27856b536e00cfa53099724828"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:09c77f964f351a7369cc343911e0df63e762e42bac24cd7d18525961c81754f4"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:12ebea541c44fdc88ccb794a13fe861cc5e35d64ed689513a5c03d05b53b7c82"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:805dfea4ca10411a5296bcc75638017215a93ffb584c9e344731eef0dcfb026a"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96c2b49eb6a72c0e4991d62406e365d87067ca14c1a729a870d22354e6f68115"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaf7b34c5bc56b38c931a54f7952f1ff0ae77a2e82496583b247f7c969eb1479"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:619d1c96099be5823db34fe89e2582b336b5b074a7f47f819d6b3a57ff7bdb86"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0ac5e7015a5920cfce654c06618ec40c33e12801711da6b4258af59a8eff00a"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:93aa7eef6ee71c629b51ef873991d6911b906d7312c6e8e99790c0f33c576f89"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7966951325782121e67c81299a031f4c115615e68046f79b85856b86ebffc4cd"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:02673e456dc5ab13659f85196c534dc596d4ef260e4d86e856c3b2773ce09843"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:c2af80fb58f0f24b3f3adcb9148e6203fa67dd3f61c4af146ecad033024dde43"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:153e7b6e724761741e0974fc4dcd406d35ba70b92bfe3fedcb497226c93b9da7"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-win32.whl", hash = "sha256:d47ecf253780c90ee181d4d871cd655a789da937454045b17b5798da9393901a"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:d97d85fa63f315a8bdaba2af9a6a686e0eceab77b3089af45133252618e70884"}, - {file = "charset_normalizer-3.3.0-py3-none-any.whl", hash = "sha256:e46cd37076971c1040fc8c41273a8b3e2c624ce4f2be3f5dfcb7a430c1d3acc2"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2e1d8ca8611099001949d1cdfaefc510cf0f212484fe7c565f735b68c78c3c95"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e25369dc110d58ddf29b949377a93e0716d72a24f62bad72b2b39f155949c1fd"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:259695e2ccc253feb2a016303543d691825e920917e31f894ca1a687982b1de4"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:dda86aba335c902b6149a02a55b38e96287157e609200811837678214ba2b1db"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:51fb3c322c81d20567019778cb5a4a6f2dc1c200b886bc0d636238e364848c89"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:4482481cb0572180b6fd976a4d5c72a30263e98564da68b86ec91f0fe35e8565"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:39f5068d35621da2881271e5c3205125cc456f54e9030d3f723288c873a71bf9"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8bea55c4eef25b0b19a0337dc4e3f9a15b00d569c77211fa8cde38684f234fb7"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:f0cdaecd4c953bfae0b6bb64910aaaca5a424ad9c72d85cb88417bb9814f7550"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:150b8ce8e830eb7ccb029ec9ca36022f756986aaaa7956aad6d9ec90089338c0"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:e68c14b04827dd76dcbd1aeea9e604e3e4b78322d8faf2f8132c7138efa340a8"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:3778fd7d7cd04ae8f54651f4a7a0bd6e39a0cf20f801720a4c21d80e9b7ad6b0"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:dad6e0f2e481fffdcf776d10ebee25e0ef89f16d691f1e5dee4b586375fdc64b"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-win32.whl", hash = "sha256:74a2e659c7ecbc73562e2a15e05039f1e22c75b7c7618b4b574a3ea9118d1557"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-win_amd64.whl", hash = "sha256:aa9cccf4a44b9b62d8ba8b4dd06c649ba683e4bf04eea606d2e94cfc2d6ff4d6"}, + {file = "charset_normalizer-3.4.6-cp310-cp310-win_arm64.whl", hash = "sha256:e985a16ff513596f217cee86c21371b8cd011c0f6f056d0920aa2d926c544058"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:82060f995ab5003a2d6e0f4ad29065b7672b6593c8c63559beefe5b443242c3e"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:60c74963d8350241a79cb8feea80e54d518f72c26db618862a8f53e5023deaf9"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f6e4333fb15c83f7d1482a76d45a0818897b3d33f00efd215528ff7c51b8e35d"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:bc72863f4d9aba2e8fd9085e63548a324ba706d2ea2c83b260da08a59b9482de"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9cc4fc6c196d6a8b76629a70ddfcd4635a6898756e2d9cac5565cf0654605d73"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:0c173ce3a681f309f31b87125fecec7a5d1347261ea11ebbb856fa6006b23c8c"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c907cdc8109f6c619e6254212e794d6548373cc40e1ec75e6e3823d9135d29cc"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:404a1e552cf5b675a87f0651f8b79f5f1e6fd100ee88dc612f89aa16abd4486f"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:e3c701e954abf6fc03a49f7c579cc80c2c6cc52525340ca3186c41d3f33482ef"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7a6967aaf043bceabab5412ed6bd6bd26603dae84d5cb75bf8d9a74a4959d398"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:5feb91325bbceade6afab43eb3b508c63ee53579fe896c77137ded51c6b6958e"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f820f24b09e3e779fe84c3c456cb4108a7aa639b0d1f02c28046e11bfcd088ed"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b35b200d6a71b9839a46b9b7fff66b6638bb52fc9658aa58796b0326595d3021"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-win32.whl", hash = "sha256:9ca4c0b502ab399ef89248a2c84c54954f77a070f28e546a85e91da627d1301e"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-win_amd64.whl", hash = "sha256:a9e68c9d88823b274cf1e72f28cb5dc89c990edf430b0bfd3e2fb0785bfeabf4"}, + {file = "charset_normalizer-3.4.6-cp311-cp311-win_arm64.whl", hash = "sha256:97d0235baafca5f2b09cf332cc275f021e694e8362c6bb9c96fc9a0eb74fc316"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ef7fedc7a6ecbe99969cd09632516738a97eeb8bd7258bf8a0f23114c057dab"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a4ea868bc28109052790eb2b52a9ab33f3aa7adc02f96673526ff47419490e21"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:836ab36280f21fc1a03c99cd05c6b7af70d2697e374c7af0b61ed271401a72a2"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f1ce721c8a7dfec21fcbdfe04e8f68174183cf4e8188e0645e92aa23985c57ff"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e28d62a8fc7a1fa411c43bd65e346f3bce9716dc51b897fbe930c5987b402d5"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:530d548084c4a9f7a16ed4a294d459b4f229db50df689bfe92027452452943a0"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:30f445ae60aad5e1f8bdbb3108e39f6fbc09f4ea16c815c66578878325f8f15a"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ac2393c73378fea4e52aa56285a3d64be50f1a12395afef9cce47772f60334c2"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:90ca27cd8da8118b18a52d5f547859cc1f8354a00cd1e8e5120df3e30d6279e5"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e5a94886bedca0f9b78fecd6afb6629142fd2605aa70a125d49f4edc6037ee6"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:695f5c2823691a25f17bc5d5ffe79fa90972cc34b002ac6c843bb8a1720e950d"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:231d4da14bcd9301310faf492051bee27df11f2bc7549bc0bb41fef11b82daa2"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a056d1ad2633548ca18ffa2f85c202cfb48b68615129143915b8dc72a806a923"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-win32.whl", hash = "sha256:c2274ca724536f173122f36c98ce188fd24ce3dad886ec2b7af859518ce008a4"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-win_amd64.whl", hash = "sha256:c8ae56368f8cc97c7e40a7ee18e1cedaf8e780cd8bc5ed5ac8b81f238614facb"}, + {file = "charset_normalizer-3.4.6-cp312-cp312-win_arm64.whl", hash = "sha256:899d28f422116b08be5118ef350c292b36fc15ec2daeb9ea987c89281c7bb5c4"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:11afb56037cbc4b1555a34dd69151e8e069bee82e613a73bef6e714ce733585f"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:423fb7e748a08f854a08a222b983f4df1912b1daedce51a72bd24fe8f26a1843"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d73beaac5e90173ac3deb9928a74763a6d230f494e4bfb422c217a0ad8e629bf"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d60377dce4511655582e300dc1e5a5f24ba0cb229005a1d5c8d0cb72bb758ab8"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:530e8cebeea0d76bdcf93357aa5e41336f48c3dc709ac52da2bb167c5b8271d9"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:a26611d9987b230566f24a0a125f17fe0de6a6aff9f25c9f564aaa2721a5fb88"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:34315ff4fc374b285ad7f4a0bf7dcbfe769e1b104230d40f49f700d4ab6bbd84"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ddd609f9e1af8c7bd6e2aca279c931aefecd148a14402d4e368f3171769fd"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:80d0a5615143c0b3225e5e3ef22c8d5d51f3f72ce0ea6fb84c943546c7b25b6c"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:92734d4d8d187a354a556626c221cd1a892a4e0802ccb2af432a1d85ec012194"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:613f19aa6e082cf96e17e3ffd89383343d0d589abda756b7764cf78361fd41dc"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2b1a63e8224e401cafe7739f77efd3f9e7f5f2026bda4aead8e59afab537784f"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6cceb5473417d28edd20c6c984ab6fee6c6267d38d906823ebfe20b03d607dc2"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-win32.whl", hash = "sha256:d7de2637729c67d67cf87614b566626057e95c303bc0a55ffe391f5205e7003d"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-win_amd64.whl", hash = "sha256:572d7c822caf521f0525ba1bce1a622a0b85cf47ffbdae6c9c19e3b5ac3c4389"}, + {file = "charset_normalizer-3.4.6-cp313-cp313-win_arm64.whl", hash = "sha256:a4474d924a47185a06411e0064b803c68be044be2d60e50e8bddcc2649957c1f"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:9cc6e6d9e571d2f863fa77700701dae73ed5f78881efc8b3f9a4398772ff53e8"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5960d965e67165d75b7c7ffc60a83ec5abfc5c11b764ec13ea54fbef8b4421"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b3694e3f87f8ac7ce279d4355645b3c878d24d1424581b46282f24b92f5a4ae2"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5d11595abf8dd942a77883a39d81433739b287b6aa71620f15164f8096221b30"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7bda6eebafd42133efdca535b04ccb338ab29467b3f7bf79569883676fc628db"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:bbc8c8650c6e51041ad1be191742b8b421d05bbd3410f43fa2a00c8db87678e8"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:22c6f0c2fbc31e76c3b8a86fba1a56eda6166e238c29cdd3d14befdb4a4e4815"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7edbed096e4a4798710ed6bc75dcaa2a21b68b6c356553ac4823c3658d53743a"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:7f9019c9cb613f084481bd6a100b12e1547cf2efe362d873c2e31e4035a6fa43"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:58c948d0d086229efc484fe2f30c2d382c86720f55cd9bc33591774348ad44e0"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:419a9d91bd238052642a51938af8ac05da5b3343becde08d5cdeab9046df9ee1"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5273b9f0b5835ff0350c0828faea623c68bfa65b792720c453e22b25cc72930f"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0e901eb1049fdb80f5bd11ed5ea1e498ec423102f7a9b9e4645d5b8204ff2815"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-win32.whl", hash = "sha256:b4ff1d35e8c5bd078be89349b6f3a845128e685e751b6ea1169cf2160b344c4d"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-win_amd64.whl", hash = "sha256:74119174722c4349af9708993118581686f343adc1c8c9c007d59be90d077f3f"}, + {file = "charset_normalizer-3.4.6-cp314-cp314-win_arm64.whl", hash = "sha256:e5bcc1a1ae744e0bb59641171ae53743760130600da8db48cbb6e4918e186e4e"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ad8faf8df23f0378c6d527d8b0b15ea4a2e23c89376877c598c4870d1b2c7866"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f5ea69428fa1b49573eef0cc44a1d43bebd45ad0c611eb7d7eac760c7ae771bc"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:06a7e86163334edfc5d20fe104db92fcd666e5a5df0977cb5680a506fe26cc8e"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e1f6e2f00a6b8edb562826e4632e26d063ac10307e80f7461f7de3ad8ef3f077"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95b52c68d64c1878818687a473a10547b3292e82b6f6fe483808fb1468e2f52f"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:7504e9b7dc05f99a9bbb4525c67a2c155073b44d720470a148b34166a69c054e"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:172985e4ff804a7ad08eebec0a1640ece87ba5041d565fff23c8f99c1f389484"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:4be9f4830ba8741527693848403e2c457c16e499100963ec711b1c6f2049b7c7"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:79090741d842f564b1b2827c0b82d846405b744d31e84f18d7a7b41c20e473ff"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:87725cfb1a4f1f8c2fc9890ae2f42094120f4b44db9360be5d99a4c6b0e03a9e"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:fcce033e4021347d80ed9c66dcf1e7b1546319834b74445f561d2e2221de5659"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:ca0276464d148c72defa8bb4390cce01b4a0e425f3b50d1435aa6d7a18107602"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:197c1a244a274bb016dd8b79204850144ef77fe81c5b797dc389327adb552407"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-win32.whl", hash = "sha256:2a24157fa36980478dd1770b585c0f30d19e18f4fb0c47c13aa568f871718579"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-win_amd64.whl", hash = "sha256:cd5e2801c89992ed8c0a3f0293ae83c159a60d9a5d685005383ef4caca77f2c4"}, + {file = "charset_normalizer-3.4.6-cp314-cp314t-win_arm64.whl", hash = "sha256:47955475ac79cc504ef2704b192364e51d0d473ad452caedd0002605f780101c"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:659a1e1b500fac8f2779dd9e1570464e012f43e580371470b45277a27baa7532"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f61aa92e4aad0be58eb6eb4e0c21acf32cf8065f4b2cae5665da756c4ceef982"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f50498891691e0864dc3da965f340fada0771f6142a378083dc4608f4ea513e2"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:bf625105bb9eef28a56a943fec8c8a98aeb80e7d7db99bd3c388137e6eb2d237"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2bd9d128ef93637a5d7a6af25363cf5dec3fa21cf80e68055aad627f280e8afa"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux_2_31_armv7l.whl", hash = "sha256:d08ec48f0a1c48d75d0356cea971921848fb620fdeba805b28f937e90691209f"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1ed80ff870ca6de33f4d953fda4d55654b9a2b340ff39ab32fa3adbcd718f264"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f98059e4fcd3e3e4e2d632b7cf81c2faae96c43c60b569e9c621468082f1d104"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:ab30e5e3e706e3063bc6de96b118688cb10396b70bb9864a430f67df98c61ecc"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:d5f5d1e9def3405f60e3ca8232d56f35c98fb7bf581efcc60051ebf53cb8b611"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_riscv64.whl", hash = "sha256:461598cd852bfa5a61b09cae2b1c02e2efcd166ee5516e243d540ac24bfa68a7"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:71be7e0e01753a89cf024abf7ecb6bca2c81738ead80d43004d9b5e3f1244e64"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:df01808ee470038c3f8dc4f48620df7225c49c2d6639e38f96e6d6ac6e6f7b0e"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-win32.whl", hash = "sha256:69dd852c2f0ad631b8b60cfbe25a28c0058a894de5abb566619c205ce0550eae"}, + {file = "charset_normalizer-3.4.6-cp38-cp38-win_amd64.whl", hash = "sha256:517ad0e93394ac532745129ceabdf2696b609ec9f87863d337140317ebce1c14"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:31215157227939b4fb3d740cd23fe27be0439afef67b785a1eb78a3ae69cba9e"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ecbbd45615a6885fe3240eb9db73b9e62518b611850fdf8ab08bd56de7ad2b17"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c45a03a4c69820a399f1dda9e1d8fbf3562eda46e7720458180302021b08f778"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e8aeb10fcbe92767f0fa69ad5a72deca50d0dca07fbde97848997d778a50c9fe"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:54fae94be3d75f3e573c9a1b5402dc593de19377013c9a0e4285e3d402dd3a2a"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux_2_31_armv7l.whl", hash = "sha256:2f7fdd9b6e6c529d6a2501a2d36b240109e78a8ceaef5687cfcfa2bbe671d297"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4d1d02209e06550bdaef34af58e041ad71b88e624f5d825519da3a3308e22687"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8bc5f0687d796c05b1e28ab0d38a50e6309906ee09375dd3aff6a9c09dd6e8f4"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:ee4ec14bc1680d6b0afab9aea2ef27e26d2024f18b24a2d7155a52b60da7e833"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d1a2ee9c1499fc8f86f4521f27a973c914b211ffa87322f4ee33bb35392da2c5"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:48696db7f18afb80a068821504296eb0787d9ce239b91ca15059d1d3eaacf13b"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4f41da960b196ea355357285ad1316a00099f22d0929fe168343b99b254729c9"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:802168e03fba8bbc5ce0d866d589e4b1ca751d06edee69f7f3a19c5a9fe6b597"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-win32.whl", hash = "sha256:8761ac29b6c81574724322a554605608a9960769ea83d2c73e396f3df896ad54"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-win_amd64.whl", hash = "sha256:1cf0a70018692f85172348fe06d3a4b63f94ecb055e13a00c644d368eb82e5b8"}, + {file = "charset_normalizer-3.4.6-cp39-cp39-win_arm64.whl", hash = "sha256:3516bbb8d42169de9e61b8520cbeeeb716f12f4ecfe3fd30a9919aa16c806ca8"}, + {file = "charset_normalizer-3.4.6-py3-none-any.whl", hash = "sha256:947cf925bc916d90adba35a64c82aace04fa39b46b52d4630ece166655905a69"}, + {file = "charset_normalizer-3.4.6.tar.gz", hash = "sha256:1ae6b62897110aa7c79ea2f5dd38d1abca6db663687c0b1ad9aed6f6bae3d9d6"}, ] [[package]] @@ -257,1087 +315,728 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] +markers = "sys_platform == \"win32\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -[[package]] -name = "comm" -version = "0.1.4" -description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." -optional = false -python-versions = ">=3.6" -files = [ - {file = "comm-0.1.4-py3-none-any.whl", hash = "sha256:6d52794cba11b36ed9860999cd10fd02d6b2eac177068fdd585e1e2f8a96e67a"}, - {file = "comm-0.1.4.tar.gz", hash = "sha256:354e40a59c9dd6db50c5cc6b4acc887d82e9603787f83b68c01a80a923984d15"}, -] - -[package.dependencies] -traitlets = ">=4" - -[package.extras] -lint = ["black (>=22.6.0)", "mdformat (>0.7)", "mdformat-gfm (>=0.3.5)", "ruff (>=0.0.156)"] -test = ["pytest"] -typing = ["mypy (>=0.990)"] - [[package]] name = "cryptography" -version = "41.0.4" +version = "46.0.6" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false -python-versions = ">=3.7" +python-versions = "!=3.9.0,!=3.9.1,>=3.8" +groups = ["main"] files = [ - {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839"}, - {file = "cryptography-41.0.4-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13"}, - {file = "cryptography-41.0.4-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143"}, - {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397"}, - {file = "cryptography-41.0.4-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860"}, - {file = "cryptography-41.0.4-cp37-abi3-win32.whl", hash = "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd"}, - {file = "cryptography-41.0.4-cp37-abi3-win_amd64.whl", hash = "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829"}, - {file = "cryptography-41.0.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9"}, - {file = "cryptography-41.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6"}, - {file = "cryptography-41.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311"}, - {file = "cryptography-41.0.4.tar.gz", hash = "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a"}, + {file = "cryptography-46.0.6-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:64235194bad039a10bb6d2d930ab3323baaec67e2ce36215fd0952fad0930ca8"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:26031f1e5ca62fcb9d1fcb34b2b60b390d1aacaa15dc8b895a9ed00968b97b30"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9a693028b9cbe51b5a1136232ee8f2bc242e4e19d456ded3fa7c86e43c713b4a"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:67177e8a9f421aa2d3a170c3e56eca4e0128883cf52a071a7cbf53297f18b175"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:d9528b535a6c4f8ff37847144b8986a9a143585f0540fbcb1a98115b543aa463"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:22259338084d6ae497a19bae5d4c66b7ca1387d3264d1c2c0e72d9e9b6a77b97"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:760997a4b950ff00d418398ad73fbc91aa2894b5c1db7ccb45b4f68b42a63b3c"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3dfa6567f2e9e4c5dceb8ccb5a708158a2a871052fa75c8b78cb0977063f1507"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:cdcd3edcbc5d55757e5f5f3d330dd00007ae463a7e7aa5bf132d1f22a4b62b19"}, + {file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:d4e4aadb7fc1f88687f47ca20bb7227981b03afaae69287029da08096853b738"}, + {file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2b417edbe8877cda9022dde3a008e2deb50be9c407eef034aeeb3a8b11d9db3c"}, + {file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:380343e0653b1c9d7e1f55b52aaa2dbb2fdf2730088d48c43ca1c7c0abb7cc2f"}, + {file = "cryptography-46.0.6-cp311-abi3-win32.whl", hash = "sha256:bcb87663e1f7b075e48c3be3ecb5f0b46c8fc50b50a97cf264e7f60242dca3f2"}, + {file = "cryptography-46.0.6-cp311-abi3-win_amd64.whl", hash = "sha256:6739d56300662c468fddb0e5e291f9b4d084bead381667b9e654c7dd81705124"}, + {file = "cryptography-46.0.6-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:2ef9e69886cbb137c2aef9772c2e7138dc581fad4fcbcf13cc181eb5a3ab6275"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7f417f034f91dcec1cb6c5c35b07cdbb2ef262557f701b4ecd803ee8cefed4f4"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d24c13369e856b94892a89ddf70b332e0b70ad4a5c43cf3e9cb71d6d7ffa1f7b"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:aad75154a7ac9039936d50cf431719a2f8d4ed3d3c277ac03f3339ded1a5e707"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:3c21d92ed15e9cfc6eb64c1f5a0326db22ca9c2566ca46d845119b45b4400361"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:4668298aef7cddeaf5c6ecc244c2302a2b8e40f384255505c22875eebb47888b"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:8ce35b77aaf02f3b59c90b2c8a05c73bac12cea5b4e8f3fbece1f5fddea5f0ca"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:c89eb37fae9216985d8734c1afd172ba4927f5a05cfd9bf0e4863c6d5465b013"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:ed418c37d095aeddf5336898a132fba01091f0ac5844e3e8018506f014b6d2c4"}, + {file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:69cf0056d6947edc6e6760e5f17afe4bea06b56a9ac8a06de9d2bd6b532d4f3a"}, + {file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e7304c4f4e9490e11efe56af6713983460ee0780f16c63f219984dab3af9d2d"}, + {file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b928a3ca837c77a10e81a814a693f2295200adb3352395fad024559b7be7a736"}, + {file = "cryptography-46.0.6-cp314-cp314t-win32.whl", hash = "sha256:97c8115b27e19e592a05c45d0dd89c57f81f841cc9880e353e0d3bf25b2139ed"}, + {file = "cryptography-46.0.6-cp314-cp314t-win_amd64.whl", hash = "sha256:c797e2517cb7880f8297e2c0f43bb910e91381339336f75d2c1c2cbf811b70b4"}, + {file = "cryptography-46.0.6-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:12cae594e9473bca1a7aceb90536060643128bb274fcea0fc459ab90f7d1ae7a"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:639301950939d844a9e1c4464d7e07f902fe9a7f6b215bb0d4f28584729935d8"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ed3775295fb91f70b4027aeba878d79b3e55c0b3e97eaa4de71f8f23a9f2eb77"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8927ccfbe967c7df312ade694f987e7e9e22b2425976ddbf28271d7e58845290"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:b12c6b1e1651e42ab5de8b1e00dc3b6354fdfd778e7fa60541ddacc27cd21410"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:063b67749f338ca9c5a0b7fe438a52c25f9526b851e24e6c9310e7195aad3b4d"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:02fad249cb0e090b574e30b276a3da6a149e04ee2f049725b1f69e7b8351ec70"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:7e6142674f2a9291463e5e150090b95a8519b2fb6e6aaec8917dd8d094ce750d"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:456b3215172aeefb9284550b162801d62f5f264a081049a3e94307fe20792cfa"}, + {file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:341359d6c9e68834e204ceaf25936dffeafea3829ab80e9503860dcc4f4dac58"}, + {file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9a9c42a2723999a710445bc0d974e345c32adfd8d2fac6d8a251fa829ad31cfb"}, + {file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6617f67b1606dfd9fe4dbfa354a9508d4a6d37afe30306fe6c101b7ce3274b72"}, + {file = "cryptography-46.0.6-cp38-abi3-win32.whl", hash = "sha256:7f6690b6c55e9c5332c0b59b9c8a3fb232ebf059094c17f9019a51e9827df91c"}, + {file = "cryptography-46.0.6-cp38-abi3-win_amd64.whl", hash = "sha256:79e865c642cfc5c0b3eb12af83c35c5aeff4fa5c672dc28c43721c2c9fdd2f0f"}, + {file = "cryptography-46.0.6-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:2ea0f37e9a9cf0df2952893ad145fd9627d326a59daec9b0802480fa3bcd2ead"}, + {file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a3e84d5ec9ba01f8fd03802b2147ba77f0c8f2617b2aff254cedd551844209c8"}, + {file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:12f0fa16cc247b13c43d56d7b35287ff1569b5b1f4c5e87e92cc4fcc00cd10c0"}, + {file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:50575a76e2951fe7dbd1f56d181f8c5ceeeb075e9ff88e7ad997d2f42af06e7b"}, + {file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:90e5f0a7b3be5f40c3a0a0eafb32c681d8d2c181fc2a1bdabe9b3f611d9f6b1a"}, + {file = "cryptography-46.0.6-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6728c49e3b2c180ef26f8e9f0a883a2c585638db64cf265b49c9ba10652d430e"}, + {file = "cryptography-46.0.6.tar.gz", hash = "sha256:27550628a518c5c6c903d84f637fbecf287f6cb9ced3804838a1295dc1fd0759"}, ] [package.dependencies] -cffi = ">=1.12" +cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9.0\" and platform_python_implementation != \"PyPy\""} [package.extras] -docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] -docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] -nox = ["nox"] -pep8test = ["black", "check-sdist", "mypy", "ruff"] -sdist = ["build"] +docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"] +docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"] +nox = ["nox[uv] (>=2024.4.15)"] +pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"] +sdist = ["build (>=1.0.0)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi (>=2024)", "cryptography-vectors (==46.0.6)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] test-randomorder = ["pytest-randomly"] -[[package]] -name = "debugpy" -version = "1.8.0" -description = "An implementation of the Debug Adapter Protocol for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "debugpy-1.8.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:7fb95ca78f7ac43393cd0e0f2b6deda438ec7c5e47fa5d38553340897d2fbdfb"}, - {file = "debugpy-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef9ab7df0b9a42ed9c878afd3eaaff471fce3fa73df96022e1f5c9f8f8c87ada"}, - {file = "debugpy-1.8.0-cp310-cp310-win32.whl", hash = "sha256:a8b7a2fd27cd9f3553ac112f356ad4ca93338feadd8910277aff71ab24d8775f"}, - {file = "debugpy-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:5d9de202f5d42e62f932507ee8b21e30d49aae7e46d5b1dd5c908db1d7068637"}, - {file = "debugpy-1.8.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:ef54404365fae8d45cf450d0544ee40cefbcb9cb85ea7afe89a963c27028261e"}, - {file = "debugpy-1.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60009b132c91951354f54363f8ebdf7457aeb150e84abba5ae251b8e9f29a8a6"}, - {file = "debugpy-1.8.0-cp311-cp311-win32.whl", hash = "sha256:8cd0197141eb9e8a4566794550cfdcdb8b3db0818bdf8c49a8e8f8053e56e38b"}, - {file = "debugpy-1.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:a64093656c4c64dc6a438e11d59369875d200bd5abb8f9b26c1f5f723622e153"}, - {file = "debugpy-1.8.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:b05a6b503ed520ad58c8dc682749113d2fd9f41ffd45daec16e558ca884008cd"}, - {file = "debugpy-1.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c6fb41c98ec51dd010d7ed650accfd07a87fe5e93eca9d5f584d0578f28f35f"}, - {file = "debugpy-1.8.0-cp38-cp38-win32.whl", hash = "sha256:46ab6780159eeabb43c1495d9c84cf85d62975e48b6ec21ee10c95767c0590aa"}, - {file = "debugpy-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:bdc5ef99d14b9c0fcb35351b4fbfc06ac0ee576aeab6b2511702e5a648a2e595"}, - {file = "debugpy-1.8.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:61eab4a4c8b6125d41a34bad4e5fe3d2cc145caecd63c3fe953be4cc53e65bf8"}, - {file = "debugpy-1.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:125b9a637e013f9faac0a3d6a82bd17c8b5d2c875fb6b7e2772c5aba6d082332"}, - {file = "debugpy-1.8.0-cp39-cp39-win32.whl", hash = "sha256:57161629133113c97b387382045649a2b985a348f0c9366e22217c87b68b73c6"}, - {file = "debugpy-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:e3412f9faa9ade82aa64a50b602544efcba848c91384e9f93497a458767e6926"}, - {file = "debugpy-1.8.0-py2.py3-none-any.whl", hash = "sha256:9c9b0ac1ce2a42888199df1a1906e45e6f3c9555497643a85e0bf2406e3ffbc4"}, - {file = "debugpy-1.8.0.zip", hash = "sha256:12af2c55b419521e33d5fb21bd022df0b5eb267c3e178f1d374a63a2a6bdccd0"}, -] - -[[package]] -name = "decorator" -version = "5.1.1" -description = "Decorators for Humans" -optional = false -python-versions = ">=3.5" -files = [ - {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, - {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, -] - -[[package]] -name = "executing" -version = "2.0.0" -description = "Get the currently executing AST node of a frame, and other information" -optional = false -python-versions = "*" -files = [ - {file = "executing-2.0.0-py2.py3-none-any.whl", hash = "sha256:06df6183df67389625f4e763921c6cf978944721abf3e714000200aab95b0657"}, - {file = "executing-2.0.0.tar.gz", hash = "sha256:0ff053696fdeef426cda5bd18eacd94f82c91f49823a2e9090124212ceea9b08"}, -] - -[package.extras] -tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] - [[package]] name = "greenlet" -version = "3.0.0" +version = "3.3.2" description = "Lightweight in-process concurrent programming" optional = false -python-versions = ">=3.7" +python-versions = ">=3.10" +groups = ["main"] +markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\"" files = [ - {file = "greenlet-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e09dea87cc91aea5500262993cbd484b41edf8af74f976719dd83fe724644cd6"}, - {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f47932c434a3c8d3c86d865443fadc1fbf574e9b11d6650b656e602b1797908a"}, - {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bdfaeecf8cc705d35d8e6de324bf58427d7eafb55f67050d8f28053a3d57118c"}, - {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a68d670c8f89ff65c82b936275369e532772eebc027c3be68c6b87ad05ca695"}, - {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ad562a104cd41e9d4644f46ea37167b93190c6d5e4048fcc4b80d34ecb278f"}, - {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02a807b2a58d5cdebb07050efe3d7deaf915468d112dfcf5e426d0564aa3aa4a"}, - {file = "greenlet-3.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b1660a15a446206c8545edc292ab5c48b91ff732f91b3d3b30d9a915d5ec4779"}, - {file = "greenlet-3.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:813720bd57e193391dfe26f4871186cf460848b83df7e23e6bef698a7624b4c9"}, - {file = "greenlet-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:aa15a2ec737cb609ed48902b45c5e4ff6044feb5dcdfcf6fa8482379190330d7"}, - {file = "greenlet-3.0.0-cp310-universal2-macosx_11_0_x86_64.whl", hash = "sha256:7709fd7bb02b31908dc8fd35bfd0a29fc24681d5cc9ac1d64ad07f8d2b7db62f"}, - {file = "greenlet-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:211ef8d174601b80e01436f4e6905aca341b15a566f35a10dd8d1e93f5dbb3b7"}, - {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6512592cc49b2c6d9b19fbaa0312124cd4c4c8a90d28473f86f92685cc5fef8e"}, - {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:871b0a8835f9e9d461b7fdaa1b57e3492dd45398e87324c047469ce2fc9f516c"}, - {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b505fcfc26f4148551826a96f7317e02c400665fa0883fe505d4fcaab1dabfdd"}, - {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:123910c58234a8d40eaab595bc56a5ae49bdd90122dde5bdc012c20595a94c14"}, - {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:96d9ea57292f636ec851a9bb961a5cc0f9976900e16e5d5647f19aa36ba6366b"}, - {file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0b72b802496cccbd9b31acea72b6f87e7771ccfd7f7927437d592e5c92ed703c"}, - {file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:527cd90ba3d8d7ae7dceb06fda619895768a46a1b4e423bdb24c1969823b8362"}, - {file = "greenlet-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:37f60b3a42d8b5499be910d1267b24355c495064f271cfe74bf28b17b099133c"}, - {file = "greenlet-3.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1482fba7fbed96ea7842b5a7fc11d61727e8be75a077e603e8ab49d24e234383"}, - {file = "greenlet-3.0.0-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:be557119bf467d37a8099d91fbf11b2de5eb1fd5fc5b91598407574848dc910f"}, - {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73b2f1922a39d5d59cc0e597987300df3396b148a9bd10b76a058a2f2772fc04"}, - {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1e22c22f7826096ad503e9bb681b05b8c1f5a8138469b255eb91f26a76634f2"}, - {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d363666acc21d2c204dd8705c0e0457d7b2ee7a76cb16ffc099d6799744ac99"}, - {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:334ef6ed8337bd0b58bb0ae4f7f2dcc84c9f116e474bb4ec250a8bb9bd797a66"}, - {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6672fdde0fd1a60b44fb1751a7779c6db487e42b0cc65e7caa6aa686874e79fb"}, - {file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:952256c2bc5b4ee8df8dfc54fc4de330970bf5d79253c863fb5e6761f00dda35"}, - {file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:269d06fa0f9624455ce08ae0179430eea61085e3cf6457f05982b37fd2cefe17"}, - {file = "greenlet-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9adbd8ecf097e34ada8efde9b6fec4dd2a903b1e98037adf72d12993a1c80b51"}, - {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6b5ce7f40f0e2f8b88c28e6691ca6806814157ff05e794cdd161be928550f4c"}, - {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecf94aa539e97a8411b5ea52fc6ccd8371be9550c4041011a091eb8b3ca1d810"}, - {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80dcd3c938cbcac986c5c92779db8e8ce51a89a849c135172c88ecbdc8c056b7"}, - {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e52a712c38e5fb4fd68e00dc3caf00b60cb65634d50e32281a9d6431b33b4af1"}, - {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5539f6da3418c3dc002739cb2bb8d169056aa66e0c83f6bacae0cd3ac26b423"}, - {file = "greenlet-3.0.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:343675e0da2f3c69d3fb1e894ba0a1acf58f481f3b9372ce1eb465ef93cf6fed"}, - {file = "greenlet-3.0.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:abe1ef3d780de56defd0c77c5ba95e152f4e4c4e12d7e11dd8447d338b85a625"}, - {file = "greenlet-3.0.0-cp37-cp37m-win32.whl", hash = "sha256:e693e759e172fa1c2c90d35dea4acbdd1d609b6936115d3739148d5e4cd11947"}, - {file = "greenlet-3.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:bdd696947cd695924aecb3870660b7545a19851f93b9d327ef8236bfc49be705"}, - {file = "greenlet-3.0.0-cp37-universal2-macosx_11_0_x86_64.whl", hash = "sha256:cc3e2679ea13b4de79bdc44b25a0c4fcd5e94e21b8f290791744ac42d34a0353"}, - {file = "greenlet-3.0.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:63acdc34c9cde42a6534518e32ce55c30f932b473c62c235a466469a710bfbf9"}, - {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a1a6244ff96343e9994e37e5b4839f09a0207d35ef6134dce5c20d260d0302c"}, - {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b822fab253ac0f330ee807e7485769e3ac85d5eef827ca224feaaefa462dc0d0"}, - {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8060b32d8586e912a7b7dac2d15b28dbbd63a174ab32f5bc6d107a1c4143f40b"}, - {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:621fcb346141ae08cb95424ebfc5b014361621b8132c48e538e34c3c93ac7365"}, - {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6bb36985f606a7c49916eff74ab99399cdfd09241c375d5a820bb855dfb4af9f"}, - {file = "greenlet-3.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:10b5582744abd9858947d163843d323d0b67be9432db50f8bf83031032bc218d"}, - {file = "greenlet-3.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f351479a6914fd81a55c8e68963609f792d9b067fb8a60a042c585a621e0de4f"}, - {file = "greenlet-3.0.0-cp38-cp38-win32.whl", hash = "sha256:9de687479faec7db5b198cc365bc34addd256b0028956501f4d4d5e9ca2e240a"}, - {file = "greenlet-3.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:3fd2b18432e7298fcbec3d39e1a0aa91ae9ea1c93356ec089421fabc3651572b"}, - {file = "greenlet-3.0.0-cp38-universal2-macosx_11_0_x86_64.whl", hash = "sha256:3c0d36f5adc6e6100aedbc976d7428a9f7194ea79911aa4bf471f44ee13a9464"}, - {file = "greenlet-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4cd83fb8d8e17633ad534d9ac93719ef8937568d730ef07ac3a98cb520fd93e4"}, - {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a5b2d4cdaf1c71057ff823a19d850ed5c6c2d3686cb71f73ae4d6382aaa7a06"}, - {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e7dcdfad252f2ca83c685b0fa9fba00e4d8f243b73839229d56ee3d9d219314"}, - {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c94e4e924d09b5a3e37b853fe5924a95eac058cb6f6fb437ebb588b7eda79870"}, - {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad6fb737e46b8bd63156b8f59ba6cdef46fe2b7db0c5804388a2d0519b8ddb99"}, - {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d55db1db455c59b46f794346efce896e754b8942817f46a1bada2d29446e305a"}, - {file = "greenlet-3.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:56867a3b3cf26dc8a0beecdb4459c59f4c47cdd5424618c08515f682e1d46692"}, - {file = "greenlet-3.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a812224a5fb17a538207e8cf8e86f517df2080c8ee0f8c1ed2bdaccd18f38f4"}, - {file = "greenlet-3.0.0-cp39-cp39-win32.whl", hash = "sha256:0d3f83ffb18dc57243e0151331e3c383b05e5b6c5029ac29f754745c800f8ed9"}, - {file = "greenlet-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:831d6f35037cf18ca5e80a737a27d822d87cd922521d18ed3dbc8a6967be50ce"}, - {file = "greenlet-3.0.0-cp39-universal2-macosx_11_0_x86_64.whl", hash = "sha256:a048293392d4e058298710a54dfaefcefdf49d287cd33fb1f7d63d55426e4355"}, - {file = "greenlet-3.0.0.tar.gz", hash = "sha256:19834e3f91f485442adc1ee440171ec5d9a4840a1f7bd5ed97833544719ce10b"}, + {file = "greenlet-3.3.2-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9bc885b89709d901859cf95179ec9f6bb67a3d2bb1f0e88456461bd4b7f8fd0d"}, + {file = "greenlet-3.3.2-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b568183cf65b94919be4438dc28416b234b678c608cafac8874dfeeb2a9bbe13"}, + {file = "greenlet-3.3.2-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:527fec58dc9f90efd594b9b700662ed3fb2493c2122067ac9c740d98080a620e"}, + {file = "greenlet-3.3.2-cp310-cp310-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:508c7f01f1791fbc8e011bd508f6794cb95397fdb198a46cb6635eb5b78d85a7"}, + {file = "greenlet-3.3.2-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ad0c8917dd42a819fe77e6bdfcb84e3379c0de956469301d9fd36427a1ca501f"}, + {file = "greenlet-3.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:97245cc10e5515dbc8c3104b2928f7f02b6813002770cfaffaf9a6e0fc2b94ef"}, + {file = "greenlet-3.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8c1fdd7d1b309ff0da81d60a9688a8bd044ac4e18b250320a96fc68d31c209ca"}, + {file = "greenlet-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:5d0e35379f93a6d0222de929a25ab47b5eb35b5ef4721c2b9cbcc4036129ff1f"}, + {file = "greenlet-3.3.2-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:c56692189a7d1c7606cb794be0a8381470d95c57ce5be03fb3d0ef57c7853b86"}, + {file = "greenlet-3.3.2-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ebd458fa8285960f382841da585e02201b53a5ec2bac6b156fc623b5ce4499f"}, + {file = "greenlet-3.3.2-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a443358b33c4ec7b05b79a7c8b466f5d275025e750298be7340f8fc63dff2a55"}, + {file = "greenlet-3.3.2-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:4375a58e49522698d3e70cc0b801c19433021b5c37686f7ce9c65b0d5c8677d2"}, + {file = "greenlet-3.3.2-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8e2cd90d413acbf5e77ae41e5d3c9b3ac1d011a756d7284d7f3f2b806bbd6358"}, + {file = "greenlet-3.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:442b6057453c8cb29b4fb36a2ac689382fc71112273726e2423f7f17dc73bf99"}, + {file = "greenlet-3.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:45abe8eb6339518180d5a7fa47fa01945414d7cca5ecb745346fc6a87d2750be"}, + {file = "greenlet-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e692b2dae4cc7077cbb11b47d258533b48c8fde69a33d0d8a82e2fe8d8531d5"}, + {file = "greenlet-3.3.2-cp311-cp311-win_arm64.whl", hash = "sha256:02b0a8682aecd4d3c6c18edf52bc8e51eacdd75c8eac52a790a210b06aa295fd"}, + {file = "greenlet-3.3.2-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:ac8d61d4343b799d1e526db579833d72f23759c71e07181c2d2944e429eb09cd"}, + {file = "greenlet-3.3.2-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3ceec72030dae6ac0c8ed7591b96b70410a8be370b6a477b1dbc072856ad02bd"}, + {file = "greenlet-3.3.2-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2a5be83a45ce6188c045bcc44b0ee037d6a518978de9a5d97438548b953a1ac"}, + {file = "greenlet-3.3.2-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ae9e21c84035c490506c17002f5c8ab25f980205c3e61ddb3a2a2a2e6c411fcb"}, + {file = "greenlet-3.3.2-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43e99d1749147ac21dde49b99c9abffcbc1e2d55c67501465ef0930d6e78e070"}, + {file = "greenlet-3.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4c956a19350e2c37f2c48b336a3afb4bff120b36076d9d7fb68cb44e05d95b79"}, + {file = "greenlet-3.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6c6f8ba97d17a1e7d664151284cb3315fc5f8353e75221ed4324f84eb162b395"}, + {file = "greenlet-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:34308836d8370bddadb41f5a7ce96879b72e2fdfb4e87729330c6ab52376409f"}, + {file = "greenlet-3.3.2-cp312-cp312-win_arm64.whl", hash = "sha256:d3a62fa76a32b462a97198e4c9e99afb9ab375115e74e9a83ce180e7a496f643"}, + {file = "greenlet-3.3.2-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:aa6ac98bdfd716a749b84d4034486863fd81c3abde9aa3cf8eff9127981a4ae4"}, + {file = "greenlet-3.3.2-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ab0c7e7901a00bc0a7284907273dc165b32e0d109a6713babd04471327ff7986"}, + {file = "greenlet-3.3.2-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d248d8c23c67d2291ffd47af766e2a3aa9fa1c6703155c099feb11f526c63a92"}, + {file = "greenlet-3.3.2-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ccd21bb86944ca9be6d967cf7691e658e43417782bce90b5d2faeda0ff78a7dd"}, + {file = "greenlet-3.3.2-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b6997d360a4e6a4e936c0f9625b1c20416b8a0ea18a8e19cabbefc712e7397ab"}, + {file = "greenlet-3.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:64970c33a50551c7c50491671265d8954046cb6e8e2999aacdd60e439b70418a"}, + {file = "greenlet-3.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1a9172f5bf6bd88e6ba5a84e0a68afeac9dc7b6b412b245dd64f52d83c81e55b"}, + {file = "greenlet-3.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:a7945dd0eab63ded0a48e4dcade82939783c172290a7903ebde9e184333ca124"}, + {file = "greenlet-3.3.2-cp313-cp313-win_arm64.whl", hash = "sha256:394ead29063ee3515b4e775216cb756b2e3b4a7e55ae8fd884f17fa579e6b327"}, + {file = "greenlet-3.3.2-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:8d1658d7291f9859beed69a776c10822a0a799bc4bfe1bd4272bb60e62507dab"}, + {file = "greenlet-3.3.2-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:18cb1b7337bca281915b3c5d5ae19f4e76d35e1df80f4ad3c1a7be91fadf1082"}, + {file = "greenlet-3.3.2-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c2e47408e8ce1c6f1ceea0dffcdf6ebb85cc09e55c7af407c99f1112016e45e9"}, + {file = "greenlet-3.3.2-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e3cb43ce200f59483eb82949bf1835a99cf43d7571e900d7c8d5c62cdf25d2f9"}, + {file = "greenlet-3.3.2-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63d10328839d1973e5ba35e98cccbca71b232b14051fd957b6f8b6e8e80d0506"}, + {file = "greenlet-3.3.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8e4ab3cfb02993c8cc248ea73d7dae6cec0253e9afa311c9b37e603ca9fad2ce"}, + {file = "greenlet-3.3.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:94ad81f0fd3c0c0681a018a976e5c2bd2ca2d9d94895f23e7bb1af4e8af4e2d5"}, + {file = "greenlet-3.3.2-cp314-cp314-win_amd64.whl", hash = "sha256:8c4dd0f3997cf2512f7601563cc90dfb8957c0cff1e3a1b23991d4ea1776c492"}, + {file = "greenlet-3.3.2-cp314-cp314-win_arm64.whl", hash = "sha256:cd6f9e2bbd46321ba3bbb4c8a15794d32960e3b0ae2cc4d49a1a53d314805d71"}, + {file = "greenlet-3.3.2-cp314-cp314t-macosx_11_0_universal2.whl", hash = "sha256:e26e72bec7ab387ac80caa7496e0f908ff954f31065b0ffc1f8ecb1338b11b54"}, + {file = "greenlet-3.3.2-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b466dff7a4ffda6ca975979bab80bdadde979e29fc947ac3be4451428d8b0e4"}, + {file = "greenlet-3.3.2-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b8bddc5b73c9720bea487b3bffdb1840fe4e3656fba3bd40aa1489e9f37877ff"}, + {file = "greenlet-3.3.2-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:59b3e2c40f6706b05a9cd299c836c6aa2378cabe25d021acd80f13abf81181cf"}, + {file = "greenlet-3.3.2-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b26b0f4428b871a751968285a1ac9648944cea09807177ac639b030bddebcea4"}, + {file = "greenlet-3.3.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1fb39a11ee2e4d94be9a76671482be9398560955c9e568550de0224e41104727"}, + {file = "greenlet-3.3.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:20154044d9085151bc309e7689d6f7ba10027f8f5a8c0676ad398b951913d89e"}, + {file = "greenlet-3.3.2-cp314-cp314t-win_amd64.whl", hash = "sha256:c04c5e06ec3e022cbfe2cd4a846e1d4e50087444f875ff6d2c2ad8445495cf1a"}, + {file = "greenlet-3.3.2.tar.gz", hash = "sha256:2eaf067fc6d886931c7962e8c6bede15d2f01965560f3359b27c80bde2d151f2"}, ] [package.extras] -docs = ["Sphinx"] -test = ["objgraph", "psutil"] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil", "setuptools"] [[package]] name = "idna" -version = "3.4" +version = "3.11" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" -files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, -] - -[[package]] -name = "ipykernel" -version = "6.25.2" -description = "IPython Kernel for Jupyter" -optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "ipykernel-6.25.2-py3-none-any.whl", hash = "sha256:2e2ee359baba19f10251b99415bb39de1e97d04e1fab385646f24f0596510b77"}, - {file = "ipykernel-6.25.2.tar.gz", hash = "sha256:f468ddd1f17acb48c8ce67fcfa49ba6d46d4f9ac0438c1f441be7c3d1372230b"}, + {file = "idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea"}, + {file = "idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902"}, ] -[package.dependencies] -appnope = {version = "*", markers = "platform_system == \"Darwin\""} -comm = ">=0.1.1" -debugpy = ">=1.6.5" -ipython = ">=7.23.1" -jupyter-client = ">=6.1.12" -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -matplotlib-inline = ">=0.1" -nest-asyncio = "*" -packaging = "*" -psutil = "*" -pyzmq = ">=20" -tornado = ">=6.1" -traitlets = ">=5.4.0" - [package.extras] -cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] -pyqt5 = ["pyqt5"] -pyside6 = ["pyside6"] -test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio", "pytest-cov", "pytest-timeout"] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] [[package]] -name = "ipython" -version = "8.16.1" -description = "IPython: Productive Interactive Computing" +name = "iniconfig" +version = "2.3.0" +description = "brain-dead simple config-ini parsing" optional = false -python-versions = ">=3.9" +python-versions = ">=3.10" +groups = ["dev"] files = [ - {file = "ipython-8.16.1-py3-none-any.whl", hash = "sha256:0852469d4d579d9cd613c220af7bf0c9cc251813e12be647cb9d463939db9b1e"}, - {file = "ipython-8.16.1.tar.gz", hash = "sha256:ad52f58fca8f9f848e256c629eff888efc0528c12fe0f8ec14f33205f23ef938"}, + {file = "iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12"}, + {file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"}, ] -[package.dependencies] -appnope = {version = "*", markers = "sys_platform == \"darwin\""} -backcall = "*" -colorama = {version = "*", markers = "sys_platform == \"win32\""} -decorator = "*" -jedi = ">=0.16" -matplotlib-inline = "*" -pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} -pickleshare = "*" -prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0" -pygments = ">=2.4.0" -stack-data = "*" -traitlets = ">=5" - -[package.extras] -all = ["black", "curio", "docrepr", "exceptiongroup", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] -black = ["black"] -doc = ["docrepr", "exceptiongroup", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] -kernel = ["ipykernel"] -nbconvert = ["nbconvert"] -nbformat = ["nbformat"] -notebook = ["ipywidgets", "notebook"] -parallel = ["ipyparallel"] -qtconsole = ["qtconsole"] -test = ["pytest (<7.1)", "pytest-asyncio", "testpath"] -test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"] - -[[package]] -name = "jedi" -version = "0.19.1" -description = "An autocompletion tool for Python that can be used for text editors." -optional = false -python-versions = ">=3.6" -files = [ - {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, - {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, -] - -[package.dependencies] -parso = ">=0.8.3,<0.9.0" - -[package.extras] -docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] -qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] -testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] - [[package]] name = "jmespath" -version = "1.0.1" +version = "1.1.0" description = "JSON Matching Expressions" optional = false -python-versions = ">=3.7" -files = [ - {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, - {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, -] - -[[package]] -name = "jupyter-client" -version = "8.3.1" -description = "Jupyter protocol implementation and client libraries" -optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "jupyter_client-8.3.1-py3-none-any.whl", hash = "sha256:5eb9f55eb0650e81de6b7e34308d8b92d04fe4ec41cd8193a913979e33d8e1a5"}, - {file = "jupyter_client-8.3.1.tar.gz", hash = "sha256:60294b2d5b869356c893f57b1a877ea6510d60d45cf4b38057f1672d85699ac9"}, + {file = "jmespath-1.1.0-py3-none-any.whl", hash = "sha256:a5663118de4908c91729bea0acadca56526eb2698e83de10cd116ae0f4e97c64"}, + {file = "jmespath-1.1.0.tar.gz", hash = "sha256:472c87d80f36026ae83c6ddd0f1d05d4e510134ed462851fd5f754c8c3cbb88d"}, ] -[package.dependencies] -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -python-dateutil = ">=2.8.2" -pyzmq = ">=23.0" -tornado = ">=6.2" -traitlets = ">=5.3" - -[package.extras] -docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] -test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] - [[package]] -name = "jupyter-core" -version = "5.3.2" -description = "Jupyter core package. A base package on which Jupyter projects rely." +name = "mako" +version = "1.3.10" +description = "A super-fast templating language that borrows the best ideas from the existing templating languages." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "jupyter_core-5.3.2-py3-none-any.whl", hash = "sha256:a4af53c3fa3f6330cebb0d9f658e148725d15652811d1c32dc0f63bb96f2e6d6"}, - {file = "jupyter_core-5.3.2.tar.gz", hash = "sha256:0c28db6cbe2c37b5b398e1a1a5b22f84fd64cd10afc1f6c05b02fb09481ba45f"}, + {file = "mako-1.3.10-py3-none-any.whl", hash = "sha256:baef24a52fc4fc514a0887ac600f9f1cff3d82c61d4d700a1fa84d597b88db59"}, + {file = "mako-1.3.10.tar.gz", hash = "sha256:99579a6f39583fa7e5630a28c3c1f440e4e97a414b80372649c0ce338da2ea28"}, ] [package.dependencies] -platformdirs = ">=2.5" -pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} -traitlets = ">=5.3" +MarkupSafe = ">=0.9.2" [package.extras] -docs = ["myst-parser", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] -test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] +babel = ["Babel"] +lingua = ["lingua"] +testing = ["pytest"] [[package]] -name = "matplotlib-inline" -version = "0.1.6" -description = "Inline Matplotlib backend for Jupyter" +name = "markupsafe" +version = "3.0.3" +description = "Safely add untrusted strings to HTML/XML markup." optional = false -python-versions = ">=3.5" -files = [ - {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, - {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"}, -] - -[package.dependencies] -traitlets = "*" - -[[package]] -name = "nest-asyncio" -version = "1.5.8" -description = "Patch asyncio to allow nested event loops" -optional = false -python-versions = ">=3.5" +python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "nest_asyncio-1.5.8-py3-none-any.whl", hash = "sha256:accda7a339a70599cb08f9dd09a67e0c2ef8d8d6f4c07f96ab203f2ae254e48d"}, - {file = "nest_asyncio-1.5.8.tar.gz", hash = "sha256:25aa2ca0d2a5b5531956b9e273b45cf664cae2b145101d73b86b199978d48fdb"}, + {file = "markupsafe-3.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559"}, + {file = "markupsafe-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1"}, + {file = "markupsafe-3.0.3-cp310-cp310-win32.whl", hash = "sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a"}, + {file = "markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b"}, + {file = "markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12"}, + {file = "markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe"}, + {file = "markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d"}, + {file = "markupsafe-3.0.3-cp39-cp39-win32.whl", hash = "sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8"}, + {file = "markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698"}, ] [[package]] name = "packaging" -version = "23.2" +version = "26.0" description = "Core utilities for Python packages" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" +groups = ["dev"] files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, + {file = "packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529"}, + {file = "packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4"}, ] [[package]] -name = "parso" -version = "0.8.3" -description = "A Python Parser" +name = "pluggy" +version = "1.6.0" +description = "plugin and hook calling mechanisms for python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, - {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, ] [package.extras] -qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] -testing = ["docopt", "pytest (<6.0.0)"] +dev = ["pre-commit", "tox"] +testing = ["coverage", "pytest", "pytest-benchmark"] [[package]] -name = "pexpect" -version = "4.8.0" -description = "Pexpect allows easy control of interactive console applications." +name = "psycopg2-binary" +version = "2.9.11" +description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = false -python-versions = "*" +python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"}, - {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"}, + {file = "psycopg2-binary-2.9.11.tar.gz", hash = "sha256:b6aed9e096bf63f9e75edf2581aa9a7e7186d97ab5c177aa6c87797cd591236c"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6fe6b47d0b42ce1c9f1fa3e35bb365011ca22e39db37074458f27921dca40f2"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6c0e4262e089516603a09474ee13eabf09cb65c332277e39af68f6233911087"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c47676e5b485393f069b4d7a811267d3168ce46f988fa602658b8bb901e9e64d"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:a28d8c01a7b27a1e3265b11250ba7557e5f72b5ee9e5f3a2fa8d2949c29bf5d2"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5f3f2732cf504a1aa9e9609d02f79bea1067d99edf844ab92c247bbca143303b"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:865f9945ed1b3950d968ec4690ce68c55019d79e4497366d36e090327ce7db14"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:91537a8df2bde69b1c1db01d6d944c831ca793952e4f57892600e96cee95f2cd"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4dca1f356a67ecb68c81a7bc7809f1569ad9e152ce7fd02c2f2036862ca9f66b"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:0da4de5c1ac69d94ed4364b6cbe7190c1a70d325f112ba783d83f8440285f152"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37d8412565a7267f7d79e29ab66876e55cb5e8e7b3bbf94f8206f6795f8f7e7e"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:c665f01ec8ab273a61c62beeb8cce3014c214429ced8a308ca1fc410ecac3a39"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0e8480afd62362d0a6a27dd09e4ca2def6fa50ed3a4e7c09165266106b2ffa10"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:763c93ef1df3da6d1a90f86ea7f3f806dc06b21c198fa87c3c25504abec9404a"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e164359396576a3cc701ba8af4751ae68a07235d7a380c631184a611220d9a4"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:d57c9c387660b8893093459738b6abddbb30a7eab058b77b0d0d1c7d521ddfd7"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2c226ef95eb2250974bf6fa7a842082b31f68385c4f3268370e3f3870e7859ee"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a311f1edc9967723d3511ea7d2708e2c3592e3405677bf53d5c7246753591fbb"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ebb415404821b6d1c47353ebe9c8645967a5235e6d88f914147e7fd411419e6f"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f07c9c4a5093258a03b28fab9b4f151aa376989e7f35f855088234e656ee6a94"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:00ce1830d971f43b667abe4a56e42c1e2d594b32da4802e44a73bacacb25535f"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cffe9d7697ae7456649617e8bb8d7a45afb71cd13f7ab22af3e5c61f04840908"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:304fd7b7f97eef30e91b8f7e720b3db75fee010b520e434ea35ed1ff22501d03"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:be9b840ac0525a283a96b556616f5b4820e0526addb8dcf6525a0fa162730be4"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f090b7ddd13ca842ebfe301cd587a76a4cf0913b1e429eb92c1be5dbeb1a19bc"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ab8905b5dcb05bf3fb22e0cf90e10f469563486ffb6a96569e51f897c750a76a"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:bf940cd7e7fec19181fdbc29d76911741153d51cab52e5c21165f3262125685e"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fa0f693d3c68ae925966f0b14b8edda71696608039f4ed61b1fe9ffa468d16db"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a1cf393f1cdaf6a9b57c0a719a1068ba1069f022a59b8b1fe44b006745b59757"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ef7a6beb4beaa62f88592ccc65df20328029d721db309cb3250b0aae0fa146c3"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:31b32c457a6025e74d233957cc9736742ac5a6cb196c6b68499f6bb51390bd6a"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:edcb3aeb11cb4bf13a2af3c53a15b3d612edeb6409047ea0b5d6a21a9d744b34"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b6d93d7c0b61a1dd6197d208ab613eb7dcfdcca0a49c42ceb082257991de9d"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:b33fabeb1fde21180479b2d4667e994de7bbf0eec22832ba5d9b5e4cf65b6c6d"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b8fb3db325435d34235b044b199e56cdf9ff41223a4b9752e8576465170bb38c"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:366df99e710a2acd90efed3764bb1e28df6c675d33a7fb40df9b7281694432ee"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8c55b385daa2f92cb64b12ec4536c66954ac53654c7f15a203578da4e78105c0"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c0377174bf1dd416993d16edc15357f6eb17ac998244cca19bc67cdc0e2e5766"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5c6ff3335ce08c75afaed19e08699e8aacf95d4a260b495a4a8545244fe2ceb3"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:84011ba3109e06ac412f95399b704d3d6950e386b7994475b231cf61eec2fc1f"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ba34475ceb08cccbdd98f6b46916917ae6eeb92b5ae111df10b544c3a4621dc4"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b31e90fdd0f968c2de3b26ab014314fe814225b6c324f770952f7d38abf17e3c"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:d526864e0f67f74937a8fce859bd56c979f5e2ec57ca7c627f5f1071ef7fee60"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:04195548662fa544626c8ea0f06561eb6203f1984ba5b4562764fbeb4c3d14b1"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:efff12b432179443f54e230fdf60de1f6cc726b6c832db8701227d089310e8aa"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:92e3b669236327083a2e33ccfa0d320dd01b9803b3e14dd986a4fc54aa00f4e1"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:e0deeb03da539fa3577fcb0b3f2554a97f7e5477c246098dbb18091a4a01c16f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b52a3f9bb540a3e4ec0f6ba6d31339727b2950c9772850d6545b7eae0b9d7c5"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:db4fd476874ccfdbb630a54426964959e58da4c61c9feba73e6094d51303d7d8"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47f212c1d3be608a12937cc131bd85502954398aaa1320cb4c14421a0ffccf4c"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e35b7abae2b0adab776add56111df1735ccc71406e56203515e228a8dc07089f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fcf21be3ce5f5659daefd2b3b3b6e4727b028221ddc94e6c1523425579664747"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:9bd81e64e8de111237737b29d68039b9c813bdf520156af36d26819c9a979e5f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:32770a4d666fbdafab017086655bcddab791d7cb260a16679cc5a7338b64343b"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c3cb3a676873d7506825221045bd70e0427c905b9c8ee8d6acd70cfcbd6e576d"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-win_amd64.whl", hash = "sha256:4012c9c954dfaccd28f94e84ab9f94e12df76b4afb22331b1f0d3154893a6316"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:20e7fb94e20b03dcc783f76c0865f9da39559dcc0c28dd1a3fce0d01902a6b9c"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4bdab48575b6f870f465b397c38f1b415520e9879fdf10a53ee4f49dcbdf8a21"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9d3a9edcfbe77a3ed4bc72836d466dfce4174beb79eda79ea155cc77237ed9e8"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:44fc5c2b8fa871ce7f0023f619f1349a0aa03a0857f2c96fbc01c657dcbbdb49"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9c55460033867b4622cda1b6872edf445809535144152e5d14941ef591980edf"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:2d11098a83cca92deaeaed3d58cfd150d49b3b06ee0d0852be466bf87596899e"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:691c807d94aecfbc76a14e1408847d59ff5b5906a04a23e12a89007672b9e819"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:8b81627b691f29c4c30a8f322546ad039c40c328373b11dff7490a3e1b517855"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:b637d6d941209e8d96a072d7977238eea128046effbf37d1d8b2c0764750017d"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:41360b01c140c2a03d346cec3280cf8a71aa07d94f3b1509fa0161c366af66b4"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:875039274f8a2361e5207857899706da840768e2a775bf8c65e82f60b197df02"}, ] -[package.dependencies] -ptyprocess = ">=0.5" - [[package]] -name = "pickleshare" -version = "0.7.5" -description = "Tiny 'shelve'-like database with concurrency support" +name = "pycparser" +version = "3.0" +description = "C parser in Python" optional = false -python-versions = "*" +python-versions = ">=3.10" +groups = ["main"] +markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\"" files = [ - {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"}, - {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, + {file = "pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992"}, + {file = "pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29"}, ] [[package]] -name = "pillow" -version = "10.3.0" -description = "Python Imaging Library (Fork)" +name = "pygments" +version = "2.19.2" +description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ - {file = "pillow-10.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:90b9e29824800e90c84e4022dd5cc16eb2d9605ee13f05d47641eb183cd73d45"}, - {file = "pillow-10.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2c405445c79c3f5a124573a051062300936b0281fee57637e706453e452746c"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78618cdbccaa74d3f88d0ad6cb8ac3007f1a6fa5c6f19af64b55ca170bfa1edf"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261ddb7ca91fcf71757979534fb4c128448b5b4c55cb6152d280312062f69599"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ce49c67f4ea0609933d01c0731b34b8695a7a748d6c8d186f95e7d085d2fe475"}, - {file = "pillow-10.3.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b14f16f94cbc61215115b9b1236f9c18403c15dd3c52cf629072afa9d54c1cbf"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d33891be6df59d93df4d846640f0e46f1a807339f09e79a8040bc887bdcd7ed3"}, - {file = "pillow-10.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b50811d664d392f02f7761621303eba9d1b056fb1868c8cdf4231279645c25f5"}, - {file = "pillow-10.3.0-cp310-cp310-win32.whl", hash = "sha256:ca2870d5d10d8726a27396d3ca4cf7976cec0f3cb706debe88e3a5bd4610f7d2"}, - {file = "pillow-10.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:f0d0591a0aeaefdaf9a5e545e7485f89910c977087e7de2b6c388aec32011e9f"}, - {file = "pillow-10.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:ccce24b7ad89adb5a1e34a6ba96ac2530046763912806ad4c247356a8f33a67b"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:5f77cf66e96ae734717d341c145c5949c63180842a545c47a0ce7ae52ca83795"}, - {file = "pillow-10.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e4b878386c4bf293578b48fc570b84ecfe477d3b77ba39a6e87150af77f40c57"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdcbb4068117dfd9ce0138d068ac512843c52295ed996ae6dd1faf537b6dbc27"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9797a6c8fe16f25749b371c02e2ade0efb51155e767a971c61734b1bf6293994"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9e91179a242bbc99be65e139e30690e081fe6cb91a8e77faf4c409653de39451"}, - {file = "pillow-10.3.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:1b87bd9d81d179bd8ab871603bd80d8645729939f90b71e62914e816a76fc6bd"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:81d09caa7b27ef4e61cb7d8fbf1714f5aec1c6b6c5270ee53504981e6e9121ad"}, - {file = "pillow-10.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:048ad577748b9fa4a99a0548c64f2cb8d672d5bf2e643a739ac8faff1164238c"}, - {file = "pillow-10.3.0-cp311-cp311-win32.whl", hash = "sha256:7161ec49ef0800947dc5570f86568a7bb36fa97dd09e9827dc02b718c5643f09"}, - {file = "pillow-10.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:8eb0908e954d093b02a543dc963984d6e99ad2b5e36503d8a0aaf040505f747d"}, - {file = "pillow-10.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e6f7d1c414191c1199f8996d3f2282b9ebea0945693fb67392c75a3a320941f"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:e46f38133e5a060d46bd630faa4d9fa0202377495df1f068a8299fd78c84de84"}, - {file = "pillow-10.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50b8eae8f7334ec826d6eeffaeeb00e36b5e24aa0b9df322c247539714c6df19"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d3bea1c75f8c53ee4d505c3e67d8c158ad4df0d83170605b50b64025917f338"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19aeb96d43902f0a783946a0a87dbdad5c84c936025b8419da0a0cd7724356b1"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:74d28c17412d9caa1066f7a31df8403ec23d5268ba46cd0ad2c50fb82ae40462"}, - {file = "pillow-10.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:ff61bfd9253c3915e6d41c651d5f962da23eda633cf02262990094a18a55371a"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d886f5d353333b4771d21267c7ecc75b710f1a73d72d03ca06df49b09015a9ef"}, - {file = "pillow-10.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b5ec25d8b17217d635f8935dbc1b9aa5907962fae29dff220f2659487891cd3"}, - {file = "pillow-10.3.0-cp312-cp312-win32.whl", hash = "sha256:51243f1ed5161b9945011a7360e997729776f6e5d7005ba0c6879267d4c5139d"}, - {file = "pillow-10.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:412444afb8c4c7a6cc11a47dade32982439925537e483be7c0ae0cf96c4f6a0b"}, - {file = "pillow-10.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:798232c92e7665fe82ac085f9d8e8ca98826f8e27859d9a96b41d519ecd2e49a"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4eaa22f0d22b1a7e93ff0a596d57fdede2e550aecffb5a1ef1106aaece48e96b"}, - {file = "pillow-10.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cd5e14fbf22a87321b24c88669aad3a51ec052eb145315b3da3b7e3cc105b9a2"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1530e8f3a4b965eb6a7785cf17a426c779333eb62c9a7d1bbcf3ffd5bf77a4aa"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d512aafa1d32efa014fa041d38868fda85028e3f930a96f85d49c7d8ddc0383"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:339894035d0ede518b16073bdc2feef4c991ee991a29774b33e515f1d308e08d"}, - {file = "pillow-10.3.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:aa7e402ce11f0885305bfb6afb3434b3cd8f53b563ac065452d9d5654c7b86fd"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0ea2a783a2bdf2a561808fe4a7a12e9aa3799b701ba305de596bc48b8bdfce9d"}, - {file = "pillow-10.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c78e1b00a87ce43bb37642c0812315b411e856a905d58d597750eb79802aaaa3"}, - {file = "pillow-10.3.0-cp38-cp38-win32.whl", hash = "sha256:72d622d262e463dfb7595202d229f5f3ab4b852289a1cd09650362db23b9eb0b"}, - {file = "pillow-10.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:2034f6759a722da3a3dbd91a81148cf884e91d1b747992ca288ab88c1de15999"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2ed854e716a89b1afcedea551cd85f2eb2a807613752ab997b9974aaa0d56936"}, - {file = "pillow-10.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc1a390a82755a8c26c9964d457d4c9cbec5405896cba94cf51f36ea0d855002"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4203efca580f0dd6f882ca211f923168548f7ba334c189e9eab1178ab840bf60"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3102045a10945173d38336f6e71a8dc71bcaeed55c3123ad4af82c52807b9375"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:6fb1b30043271ec92dc65f6d9f0b7a830c210b8a96423074b15c7bc999975f57"}, - {file = "pillow-10.3.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1dfc94946bc60ea375cc39cff0b8da6c7e5f8fcdc1d946beb8da5c216156ddd8"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b09b86b27a064c9624d0a6c54da01c1beaf5b6cadfa609cf63789b1d08a797b9"}, - {file = "pillow-10.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d3b2348a78bc939b4fed6552abfd2e7988e0f81443ef3911a4b8498ca084f6eb"}, - {file = "pillow-10.3.0-cp39-cp39-win32.whl", hash = "sha256:45ebc7b45406febf07fef35d856f0293a92e7417ae7933207e90bf9090b70572"}, - {file = "pillow-10.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:0ba26351b137ca4e0db0342d5d00d2e355eb29372c05afd544ebf47c0956ffeb"}, - {file = "pillow-10.3.0-cp39-cp39-win_arm64.whl", hash = "sha256:50fd3f6b26e3441ae07b7c979309638b72abc1a25da31a81a7fbd9495713ef4f"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:6b02471b72526ab8a18c39cb7967b72d194ec53c1fd0a70b050565a0f366d355"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8ab74c06ffdab957d7670c2a5a6e1a70181cd10b727cd788c4dd9005b6a8acd9"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:048eeade4c33fdf7e08da40ef402e748df113fd0b4584e32c4af74fe78baaeb2"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e2ec1e921fd07c7cda7962bad283acc2f2a9ccc1b971ee4b216b75fad6f0463"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c8e73e99da7db1b4cad7f8d682cf6abad7844da39834c288fbfa394a47bbced"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:16563993329b79513f59142a6b02055e10514c1a8e86dca8b48a893e33cf91e3"}, - {file = "pillow-10.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dd78700f5788ae180b5ee8902c6aea5a5726bac7c364b202b4b3e3ba2d293170"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:aff76a55a8aa8364d25400a210a65ff59d0168e0b4285ba6bf2bd83cf675ba32"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7bc2176354defba3edc2b9a777744462da2f8e921fbaf61e52acb95bafa9828"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:793b4e24db2e8742ca6423d3fde8396db336698c55cd34b660663ee9e45ed37f"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d93480005693d247f8346bc8ee28c72a2191bdf1f6b5db469c096c0c867ac015"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c83341b89884e2b2e55886e8fbbf37c3fa5efd6c8907124aeb72f285ae5696e5"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a1d1915db1a4fdb2754b9de292642a39a7fb28f1736699527bb649484fb966a"}, - {file = "pillow-10.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a0eaa93d054751ee9964afa21c06247779b90440ca41d184aeb5d410f20ff591"}, - {file = "pillow-10.3.0.tar.gz", hash = "sha256:9d2455fbf44c914840c793e89aa82d0e1763a14253a000743719ae5946814b2d"}, + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, ] [package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] -fpx = ["olefile"] -mic = ["olefile"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] -typing = ["typing-extensions"] -xmp = ["defusedxml"] +windows-terminal = ["colorama (>=0.4.6)"] [[package]] -name = "pillow-heif" -version = "0.16.0" -description = "Python interface for libheif library" +name = "pymysql" +version = "1.1.2" +description = "Pure Python MySQL Driver" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "pillow_heif-0.16.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:c7db96ac172e2654676986e8c35fa32bffdd5b429a8c86b9d628c0333c570d82"}, - {file = "pillow_heif-0.16.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:a146be0c8e7bef204eeaa14799b2fca8a4a52ad972850975e23ef10cee4e7de7"}, - {file = "pillow_heif-0.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33e0b1549bcdfec363b3ba6fb55b3de882e1409b5b00f5a68a1a027f051e8ef2"}, - {file = "pillow_heif-0.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fea4410ce02e295079db5b2617579ba016671d334ac1888a1d4b34aedb56b866"}, - {file = "pillow_heif-0.16.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:331579ce4f5fa079595c529b06810886ff76f8ade3eb411a1c9c90853a708022"}, - {file = "pillow_heif-0.16.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:792e5d88b7d016fe48ae2fd77a852ec8dcf9a7fad1f7f191d35bc173896fe378"}, - {file = "pillow_heif-0.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e0492e4fd6d3334b9eed3651058216ef62f04afa099cfc6b05815c1bf0da2c38"}, - {file = "pillow_heif-0.16.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:beb6576cbe5a9404a8f2ad9ec68f6b0c406e5e9f5d5573722dc3244898dc9866"}, - {file = "pillow_heif-0.16.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:241cf6c510215c6df0ee948dfed06a20c099475250c5c6cac5e7a1ef9e0ec4c3"}, - {file = "pillow_heif-0.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28c980bf8d5239ee87986c9217a5954b07993d71d391949a9feafad0a9c5e9a7"}, - {file = "pillow_heif-0.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8856cf5f0d53f83d814ae5c8d34433e5e5ad9f3e328480257cd6e9fbdb4a458"}, - {file = "pillow_heif-0.16.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fba5c46f84031f1186bdea2a0c95f82958f8c29321200e73d7ac5e79ee460c83"}, - {file = "pillow_heif-0.16.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5c7f7a94fc2d08ddcf55a6834c4c55b7dea9605656c565ce11c82e3f6e0454a8"}, - {file = "pillow_heif-0.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a2681d4b62418813289987a9420059d724cd93542d0b05e0928fe4578517714"}, - {file = "pillow_heif-0.16.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:7e424d6a34b9466d054706393e76b5abdd84fabdc0c72b19ca10435a76140de7"}, - {file = "pillow_heif-0.16.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:be41b7fadd4a9355d24936f6fad83bb8130fe55ba228ec298ad316392bb6f38b"}, - {file = "pillow_heif-0.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:078bc74fd767625e465b2c107228f9c398b9a128bdf81b3f18812d7c07be660f"}, - {file = "pillow_heif-0.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f4293ecbb81d255d8d887dce4708a58e87c86e53c6f1b1affc4c3105e1bcb8c"}, - {file = "pillow_heif-0.16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f63a1d8f95811569df5df9b6b11674038929c2f696221f2a393aee5ac1e535b4"}, - {file = "pillow_heif-0.16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:89ec30420ddc843c43916febbe31697552ed123396a1696715eea75169866c07"}, - {file = "pillow_heif-0.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:d4595ec975db845d84ab90cbf0678f15b0068b8b83c01d1db7ea524e31bab4b4"}, - {file = "pillow_heif-0.16.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:1421d96aebdc9f5773213c8221ce547efb56e37a62da6698312edd4f281efb42"}, - {file = "pillow_heif-0.16.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:88ff22d2b162e7edd9cb9dd98de81455be04c40a99d1d3d3ebe1602b1a21c453"}, - {file = "pillow_heif-0.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb3efbe8efd26203589794988b11ea9bf3dea2d3bcf218e658f779d526dfcf80"}, - {file = "pillow_heif-0.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f062c1be6f04804ffdf0bc452142eff38d7544c8655c04291d16e3b996e4dc4"}, - {file = "pillow_heif-0.16.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:7fabd6534a38078a66ce8b7a5ae8ad37afd9863c930abd3031fb553f1ab4f01a"}, - {file = "pillow_heif-0.16.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d9e465d92cf01093e3e4c33776af97368add23ac1c8d0007f34b8d3e3390d6ad"}, - {file = "pillow_heif-0.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:502cebc90c11a6bffa2ea899088999c25fc99c8f322e047a266e541e3046b27c"}, - {file = "pillow_heif-0.16.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:c2ad68e3e4be40adfc5290bf6daa1569dd7d18501e17779d217ce5cd8c1e338d"}, - {file = "pillow_heif-0.16.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:8e168d45b2ce63c1fe2334fd02927699b0097de72605f7571948010fd79e58f0"}, - {file = "pillow_heif-0.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bf10a1686c2d51f4db8ebb78825f96f28d18d1878599e1c64e88cfbdb70a3d2"}, - {file = "pillow_heif-0.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f15dc73ced02a0ccfac93159d12deeaecfbe4335883a1a3309df0f01c26e6e6"}, - {file = "pillow_heif-0.16.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2673048f3cf1498327add70f16e1129be2a09cf4a31cbc02363f5760eb5ba955"}, - {file = "pillow_heif-0.16.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9273af7224e0fb16c18637184a8ea9a8790105658daab04ad541982b8623e5c1"}, - {file = "pillow_heif-0.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:f613dfd05fd62a8b7b57649bfa5db1501be41e18b5e15dd4a2fc12d3e3ddfdaa"}, - {file = "pillow_heif-0.16.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:3501f22985cbb427c76febf07a7e309cb828e485c0cf250a625733fc06fc1815"}, - {file = "pillow_heif-0.16.0-pp310-pypy310_pp73-macosx_12_0_arm64.whl", hash = "sha256:2b7450303f08ec81d1a63a75052863bb687fc3be1fdd8a34d2c0fef627aacae5"}, - {file = "pillow_heif-0.16.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7794c1a8304eeb841d72cb73aa64cc60c9e5dccb2c7612f8caf528505f78581f"}, - {file = "pillow_heif-0.16.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5edd98192f74e4c7cffdd62953b2987e2b1e0d6a55d5c940306bed71f40206a"}, - {file = "pillow_heif-0.16.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:38fa2854ec7dbe6c875d64cc5b3cf5cc55f1c8a0248dc1c7c34e9d2505521b82"}, - {file = "pillow_heif-0.16.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b50160331754b603524e6ed33c386f478fd66fb345fa6433a507a01c8de642c6"}, - {file = "pillow_heif-0.16.0-pp38-pypy38_pp73-macosx_12_0_arm64.whl", hash = "sha256:9fd829c257a763e3a2e8418a773c2808c90799ee3e6b405b5399cb4fdfbe336e"}, - {file = "pillow_heif-0.16.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbd9cc527bbd53c3e7588e16aad170e11cfd180b7e9bd84f18fb020ddec11408"}, - {file = "pillow_heif-0.16.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a27abb523a07b17c118c09f1a00f92cde2295f8e997600024d4b57df3c5ba818"}, - {file = "pillow_heif-0.16.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:0075adeb324adb07ddbfbe8a5c79ed12e5d04e60e9a642ff9427e71b5b0adccd"}, - {file = "pillow_heif-0.16.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:40014105688478d6ca146fc04bff6c13f445d01bdea79417b34ee50c1e559190"}, - {file = "pillow_heif-0.16.0-pp39-pypy39_pp73-macosx_12_0_arm64.whl", hash = "sha256:7ef47297d526147923f4ecc7ff681a5d5f4e6e3300017681f59968652a0d8afb"}, - {file = "pillow_heif-0.16.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9923dfcc97ae9484d3514f2f6ec368e2ac97cd66f7b95359cc1b0ec0c1cd6157"}, - {file = "pillow_heif-0.16.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17963a73186961fe7792aef01c46e980635f3fcc1836393de39ec9c6776ca51e"}, - {file = "pillow_heif-0.16.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4b6caa5b13b4dfc180507527254014530f6bedbeabc1de2238918bf5b2700c7e"}, - {file = "pillow_heif-0.16.0.tar.gz", hash = "sha256:4d95004bb77aa640f80617716aa21bc092ec06307f6f2ad423deeeda07b4d29c"}, + {file = "pymysql-1.1.2-py3-none-any.whl", hash = "sha256:e6b1d89711dd51f8f74b1631fe08f039e7d76cf67a42a323d3178f0f25762ed9"}, + {file = "pymysql-1.1.2.tar.gz", hash = "sha256:4961d3e165614ae65014e361811a724e2044ad3ea3739de9903ae7c21f539f03"}, ] -[package.dependencies] -pillow = ">=9.5.0" - [package.extras] -dev = ["coverage", "defusedxml", "numpy", "opencv-python (==4.9.0.80)", "packaging", "pre-commit", "pylint", "pympler", "pytest"] -docs = ["sphinx (>=4.4)", "sphinx-issues (>=3.0.1)", "sphinx-rtd-theme (>=1.0)"] -tests = ["defusedxml", "numpy", "packaging", "pympler", "pytest"] -tests-min = ["defusedxml", "packaging", "pytest"] - -[[package]] -name = "platformdirs" -version = "3.11.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -optional = false -python-versions = ">=3.7" -files = [ - {file = "platformdirs-3.11.0-py3-none-any.whl", hash = "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e"}, - {file = "platformdirs-3.11.0.tar.gz", hash = "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3"}, -] - -[package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] +ed25519 = ["PyNaCl (>=1.4.0)"] +rsa = ["cryptography"] [[package]] -name = "prompt-toolkit" -version = "3.0.39" -description = "Library for building powerful interactive command lines in Python" +name = "pytest" +version = "9.0.2" +description = "pytest: simple powerful testing with Python" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.10" +groups = ["dev"] files = [ - {file = "prompt_toolkit-3.0.39-py3-none-any.whl", hash = "sha256:9dffbe1d8acf91e3de75f3b544e4842382fc06c6babe903ac9acb74dc6e08d88"}, - {file = "prompt_toolkit-3.0.39.tar.gz", hash = "sha256:04505ade687dc26dc4284b1ad19a83be2f2afe83e7a828ace0c72f3a1df72aac"}, + {file = "pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b"}, + {file = "pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11"}, ] [package.dependencies] -wcwidth = "*" - -[[package]] -name = "psutil" -version = "5.9.5" -description = "Cross-platform lib for process and system monitoring in Python." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "psutil-5.9.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:be8929ce4313f9f8146caad4272f6abb8bf99fc6cf59344a3167ecd74f4f203f"}, - {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab8ed1a1d77c95453db1ae00a3f9c50227ebd955437bcf2a574ba8adbf6a74d5"}, - {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4aef137f3345082a3d3232187aeb4ac4ef959ba3d7c10c33dd73763fbc063da4"}, - {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ea8518d152174e1249c4f2a1c89e3e6065941df2fa13a1ab45327716a23c2b48"}, - {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:acf2aef9391710afded549ff602b5887d7a2349831ae4c26be7c807c0a39fac4"}, - {file = "psutil-5.9.5-cp27-none-win32.whl", hash = "sha256:5b9b8cb93f507e8dbaf22af6a2fd0ccbe8244bf30b1baad6b3954e935157ae3f"}, - {file = "psutil-5.9.5-cp27-none-win_amd64.whl", hash = "sha256:8c5f7c5a052d1d567db4ddd231a9d27a74e8e4a9c3f44b1032762bd7b9fdcd42"}, - {file = "psutil-5.9.5-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3c6f686f4225553615612f6d9bc21f1c0e305f75d7d8454f9b46e901778e7217"}, - {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a7dd9997128a0d928ed4fb2c2d57e5102bb6089027939f3b722f3a210f9a8da"}, - {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89518112647f1276b03ca97b65cc7f64ca587b1eb0278383017c2a0dcc26cbe4"}, - {file = "psutil-5.9.5-cp36-abi3-win32.whl", hash = "sha256:104a5cc0e31baa2bcf67900be36acde157756b9c44017b86b2c049f11957887d"}, - {file = "psutil-5.9.5-cp36-abi3-win_amd64.whl", hash = "sha256:b258c0c1c9d145a1d5ceffab1134441c4c5113b2417fafff7315a917a026c3c9"}, - {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"}, - {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"}, -] - -[package.extras] -test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] - -[[package]] -name = "ptyprocess" -version = "0.7.0" -description = "Run a subprocess in a pseudo terminal" -optional = false -python-versions = "*" -files = [ - {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, - {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, -] - -[[package]] -name = "pure-eval" -version = "0.2.2" -description = "Safely evaluate AST nodes without side effects" -optional = false -python-versions = "*" -files = [ - {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, - {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"}, -] - -[package.extras] -tests = ["pytest"] - -[[package]] -name = "pycparser" -version = "2.21" -description = "C parser in Python" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, -] - -[[package]] -name = "pygments" -version = "2.16.1" -description = "Pygments is a syntax highlighting package written in Python." -optional = false -python-versions = ">=3.7" -files = [ - {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"}, - {file = "Pygments-2.16.1.tar.gz", hash = "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"}, -] - -[package.extras] -plugins = ["importlib-metadata"] - -[[package]] -name = "pymysql" -version = "1.1.0" -description = "Pure Python MySQL Driver" -optional = false -python-versions = ">=3.7" -files = [ - {file = "PyMySQL-1.1.0-py3-none-any.whl", hash = "sha256:8969ec6d763c856f7073c4c64662882675702efcb114b4bcbb955aea3a069fa7"}, - {file = "PyMySQL-1.1.0.tar.gz", hash = "sha256:4f13a7df8bf36a51e81dd9f3605fede45a4878fe02f9236349fd82a3f0612f96"}, -] +colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} +iniconfig = ">=1.0.1" +packaging = ">=22" +pluggy = ">=1.5,<2" +pygments = ">=2.7.2" [package.extras] -ed25519 = ["PyNaCl (>=1.4.0)"] -rsa = ["cryptography"] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] [[package]] name = "python-dateutil" -version = "2.8.2" +version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["dev"] files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] [package.dependencies] six = ">=1.5" [[package]] -name = "pywin32" -version = "306" -description = "Python for Window Extensions" +name = "python-dotenv" +version = "1.2.2" +description = "Read key-value pairs from a .env file and set them as environment variables" optional = false -python-versions = "*" +python-versions = ">=3.10" +groups = ["main"] files = [ - {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, - {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, - {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, - {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, - {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, - {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, - {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, - {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, - {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, - {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, - {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, - {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, - {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, - {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, + {file = "python_dotenv-1.2.2-py3-none-any.whl", hash = "sha256:1d8214789a24de455a8b8bd8ae6fe3c6b69a5e3d64aa8a8e5d68e694bbcb285a"}, + {file = "python_dotenv-1.2.2.tar.gz", hash = "sha256:2c371a91fbd7ba082c2c1dc1f8bf89ca22564a087c2c287cd9b662adde799cf3"}, ] -[[package]] -name = "pyzmq" -version = "25.1.1" -description = "Python bindings for 0MQ" -optional = false -python-versions = ">=3.6" -files = [ - {file = "pyzmq-25.1.1-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:381469297409c5adf9a0e884c5eb5186ed33137badcbbb0560b86e910a2f1e76"}, - {file = "pyzmq-25.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:955215ed0604dac5b01907424dfa28b40f2b2292d6493445dd34d0dfa72586a8"}, - {file = "pyzmq-25.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:985bbb1316192b98f32e25e7b9958088431d853ac63aca1d2c236f40afb17c83"}, - {file = "pyzmq-25.1.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:afea96f64efa98df4da6958bae37f1cbea7932c35878b185e5982821bc883369"}, - {file = "pyzmq-25.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76705c9325d72a81155bb6ab48d4312e0032bf045fb0754889133200f7a0d849"}, - {file = "pyzmq-25.1.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:77a41c26205d2353a4c94d02be51d6cbdf63c06fbc1295ea57dad7e2d3381b71"}, - {file = "pyzmq-25.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:12720a53e61c3b99d87262294e2b375c915fea93c31fc2336898c26d7aed34cd"}, - {file = "pyzmq-25.1.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:57459b68e5cd85b0be8184382cefd91959cafe79ae019e6b1ae6e2ba8a12cda7"}, - {file = "pyzmq-25.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:292fe3fc5ad4a75bc8df0dfaee7d0babe8b1f4ceb596437213821f761b4589f9"}, - {file = "pyzmq-25.1.1-cp310-cp310-win32.whl", hash = "sha256:35b5ab8c28978fbbb86ea54958cd89f5176ce747c1fb3d87356cf698048a7790"}, - {file = "pyzmq-25.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:11baebdd5fc5b475d484195e49bae2dc64b94a5208f7c89954e9e354fc609d8f"}, - {file = "pyzmq-25.1.1-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:d20a0ddb3e989e8807d83225a27e5c2eb2260eaa851532086e9e0fa0d5287d83"}, - {file = "pyzmq-25.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e1c1be77bc5fb77d923850f82e55a928f8638f64a61f00ff18a67c7404faf008"}, - {file = "pyzmq-25.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d89528b4943d27029a2818f847c10c2cecc79fa9590f3cb1860459a5be7933eb"}, - {file = "pyzmq-25.1.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:90f26dc6d5f241ba358bef79be9ce06de58d477ca8485e3291675436d3827cf8"}, - {file = "pyzmq-25.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2b92812bd214018e50b6380ea3ac0c8bb01ac07fcc14c5f86a5bb25e74026e9"}, - {file = "pyzmq-25.1.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:2f957ce63d13c28730f7fd6b72333814221c84ca2421298f66e5143f81c9f91f"}, - {file = "pyzmq-25.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:047a640f5c9c6ade7b1cc6680a0e28c9dd5a0825135acbd3569cc96ea00b2505"}, - {file = "pyzmq-25.1.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7f7e58effd14b641c5e4dec8c7dab02fb67a13df90329e61c869b9cc607ef752"}, - {file = "pyzmq-25.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c2910967e6ab16bf6fbeb1f771c89a7050947221ae12a5b0b60f3bca2ee19bca"}, - {file = "pyzmq-25.1.1-cp311-cp311-win32.whl", hash = "sha256:76c1c8efb3ca3a1818b837aea423ff8a07bbf7aafe9f2f6582b61a0458b1a329"}, - {file = "pyzmq-25.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:44e58a0554b21fc662f2712814a746635ed668d0fbc98b7cb9d74cb798d202e6"}, - {file = "pyzmq-25.1.1-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:e1ffa1c924e8c72778b9ccd386a7067cddf626884fd8277f503c48bb5f51c762"}, - {file = "pyzmq-25.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1af379b33ef33757224da93e9da62e6471cf4a66d10078cf32bae8127d3d0d4a"}, - {file = "pyzmq-25.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cff084c6933680d1f8b2f3b4ff5bbb88538a4aac00d199ac13f49d0698727ecb"}, - {file = "pyzmq-25.1.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2400a94f7dd9cb20cd012951a0cbf8249e3d554c63a9c0cdfd5cbb6c01d2dec"}, - {file = "pyzmq-25.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d81f1ddae3858b8299d1da72dd7d19dd36aab654c19671aa8a7e7fb02f6638a"}, - {file = "pyzmq-25.1.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:255ca2b219f9e5a3a9ef3081512e1358bd4760ce77828e1028b818ff5610b87b"}, - {file = "pyzmq-25.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a882ac0a351288dd18ecae3326b8a49d10c61a68b01419f3a0b9a306190baf69"}, - {file = "pyzmq-25.1.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:724c292bb26365659fc434e9567b3f1adbdb5e8d640c936ed901f49e03e5d32e"}, - {file = "pyzmq-25.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ca1ed0bb2d850aa8471387882247c68f1e62a4af0ce9c8a1dbe0d2bf69e41fb"}, - {file = "pyzmq-25.1.1-cp312-cp312-win32.whl", hash = "sha256:b3451108ab861040754fa5208bca4a5496c65875710f76789a9ad27c801a0075"}, - {file = "pyzmq-25.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:eadbefd5e92ef8a345f0525b5cfd01cf4e4cc651a2cffb8f23c0dd184975d787"}, - {file = "pyzmq-25.1.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:db0b2af416ba735c6304c47f75d348f498b92952f5e3e8bff449336d2728795d"}, - {file = "pyzmq-25.1.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7c133e93b405eb0d36fa430c94185bdd13c36204a8635470cccc200723c13bb"}, - {file = "pyzmq-25.1.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:273bc3959bcbff3f48606b28229b4721716598d76b5aaea2b4a9d0ab454ec062"}, - {file = "pyzmq-25.1.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cbc8df5c6a88ba5ae385d8930da02201165408dde8d8322072e3e5ddd4f68e22"}, - {file = "pyzmq-25.1.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:18d43df3f2302d836f2a56f17e5663e398416e9dd74b205b179065e61f1a6edf"}, - {file = "pyzmq-25.1.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:73461eed88a88c866656e08f89299720a38cb4e9d34ae6bf5df6f71102570f2e"}, - {file = "pyzmq-25.1.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:34c850ce7976d19ebe7b9d4b9bb8c9dfc7aac336c0958e2651b88cbd46682123"}, - {file = "pyzmq-25.1.1-cp36-cp36m-win32.whl", hash = "sha256:d2045d6d9439a0078f2a34b57c7b18c4a6aef0bee37f22e4ec9f32456c852c71"}, - {file = "pyzmq-25.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:458dea649f2f02a0b244ae6aef8dc29325a2810aa26b07af8374dc2a9faf57e3"}, - {file = "pyzmq-25.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7cff25c5b315e63b07a36f0c2bab32c58eafbe57d0dce61b614ef4c76058c115"}, - {file = "pyzmq-25.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1579413ae492b05de5a6174574f8c44c2b9b122a42015c5292afa4be2507f28"}, - {file = "pyzmq-25.1.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3d0a409d3b28607cc427aa5c30a6f1e4452cc44e311f843e05edb28ab5e36da0"}, - {file = "pyzmq-25.1.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:21eb4e609a154a57c520e3d5bfa0d97e49b6872ea057b7c85257b11e78068222"}, - {file = "pyzmq-25.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:034239843541ef7a1aee0c7b2cb7f6aafffb005ede965ae9cbd49d5ff4ff73cf"}, - {file = "pyzmq-25.1.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f8115e303280ba09f3898194791a153862cbf9eef722ad8f7f741987ee2a97c7"}, - {file = "pyzmq-25.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1a5d26fe8f32f137e784f768143728438877d69a586ddeaad898558dc971a5ae"}, - {file = "pyzmq-25.1.1-cp37-cp37m-win32.whl", hash = "sha256:f32260e556a983bc5c7ed588d04c942c9a8f9c2e99213fec11a031e316874c7e"}, - {file = "pyzmq-25.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:abf34e43c531bbb510ae7e8f5b2b1f2a8ab93219510e2b287a944432fad135f3"}, - {file = "pyzmq-25.1.1-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:87e34f31ca8f168c56d6fbf99692cc8d3b445abb5bfd08c229ae992d7547a92a"}, - {file = "pyzmq-25.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c9c6c9b2c2f80747a98f34ef491c4d7b1a8d4853937bb1492774992a120f475d"}, - {file = "pyzmq-25.1.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5619f3f5a4db5dbb572b095ea3cb5cc035335159d9da950830c9c4db2fbb6995"}, - {file = "pyzmq-25.1.1-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5a34d2395073ef862b4032343cf0c32a712f3ab49d7ec4f42c9661e0294d106f"}, - {file = "pyzmq-25.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25f0e6b78220aba09815cd1f3a32b9c7cb3e02cb846d1cfc526b6595f6046618"}, - {file = "pyzmq-25.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3669cf8ee3520c2f13b2e0351c41fea919852b220988d2049249db10046a7afb"}, - {file = "pyzmq-25.1.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2d163a18819277e49911f7461567bda923461c50b19d169a062536fffe7cd9d2"}, - {file = "pyzmq-25.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:df27ffddff4190667d40de7beba4a950b5ce78fe28a7dcc41d6f8a700a80a3c0"}, - {file = "pyzmq-25.1.1-cp38-cp38-win32.whl", hash = "sha256:a382372898a07479bd34bda781008e4a954ed8750f17891e794521c3e21c2e1c"}, - {file = "pyzmq-25.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:52533489f28d62eb1258a965f2aba28a82aa747202c8fa5a1c7a43b5db0e85c1"}, - {file = "pyzmq-25.1.1-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:03b3f49b57264909aacd0741892f2aecf2f51fb053e7d8ac6767f6c700832f45"}, - {file = "pyzmq-25.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:330f9e188d0d89080cde66dc7470f57d1926ff2fb5576227f14d5be7ab30b9fa"}, - {file = "pyzmq-25.1.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2ca57a5be0389f2a65e6d3bb2962a971688cbdd30b4c0bd188c99e39c234f414"}, - {file = "pyzmq-25.1.1-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d457aed310f2670f59cc5b57dcfced452aeeed77f9da2b9763616bd57e4dbaae"}, - {file = "pyzmq-25.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c56d748ea50215abef7030c72b60dd723ed5b5c7e65e7bc2504e77843631c1a6"}, - {file = "pyzmq-25.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8f03d3f0d01cb5a018debeb412441996a517b11c5c17ab2001aa0597c6d6882c"}, - {file = "pyzmq-25.1.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:820c4a08195a681252f46926de10e29b6bbf3e17b30037bd4250d72dd3ddaab8"}, - {file = "pyzmq-25.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17ef5f01d25b67ca8f98120d5fa1d21efe9611604e8eb03a5147360f517dd1e2"}, - {file = "pyzmq-25.1.1-cp39-cp39-win32.whl", hash = "sha256:04ccbed567171579ec2cebb9c8a3e30801723c575601f9a990ab25bcac6b51e2"}, - {file = "pyzmq-25.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:e61f091c3ba0c3578411ef505992d356a812fb200643eab27f4f70eed34a29ef"}, - {file = "pyzmq-25.1.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ade6d25bb29c4555d718ac6d1443a7386595528c33d6b133b258f65f963bb0f6"}, - {file = "pyzmq-25.1.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0c95ddd4f6e9fca4e9e3afaa4f9df8552f0ba5d1004e89ef0a68e1f1f9807c7"}, - {file = "pyzmq-25.1.1-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48e466162a24daf86f6b5ca72444d2bf39a5e58da5f96370078be67c67adc978"}, - {file = "pyzmq-25.1.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abc719161780932c4e11aaebb203be3d6acc6b38d2f26c0f523b5b59d2fc1996"}, - {file = "pyzmq-25.1.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1ccf825981640b8c34ae54231b7ed00271822ea1c6d8ba1090ebd4943759abf5"}, - {file = "pyzmq-25.1.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c2f20ce161ebdb0091a10c9ca0372e023ce24980d0e1f810f519da6f79c60800"}, - {file = "pyzmq-25.1.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:deee9ca4727f53464daf089536e68b13e6104e84a37820a88b0a057b97bba2d2"}, - {file = "pyzmq-25.1.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:aa8d6cdc8b8aa19ceb319aaa2b660cdaccc533ec477eeb1309e2a291eaacc43a"}, - {file = "pyzmq-25.1.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:019e59ef5c5256a2c7378f2fb8560fc2a9ff1d315755204295b2eab96b254d0a"}, - {file = "pyzmq-25.1.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:b9af3757495c1ee3b5c4e945c1df7be95562277c6e5bccc20a39aec50f826cd0"}, - {file = "pyzmq-25.1.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:548d6482dc8aadbe7e79d1b5806585c8120bafa1ef841167bc9090522b610fa6"}, - {file = "pyzmq-25.1.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:057e824b2aae50accc0f9a0570998adc021b372478a921506fddd6c02e60308e"}, - {file = "pyzmq-25.1.1-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2243700cc5548cff20963f0ca92d3e5e436394375ab8a354bbea2b12911b20b0"}, - {file = "pyzmq-25.1.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79986f3b4af059777111409ee517da24a529bdbd46da578b33f25580adcff728"}, - {file = "pyzmq-25.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:11d58723d44d6ed4dd677c5615b2ffb19d5c426636345567d6af82be4dff8a55"}, - {file = "pyzmq-25.1.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:49d238cf4b69652257db66d0c623cd3e09b5d2e9576b56bc067a396133a00d4a"}, - {file = "pyzmq-25.1.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fedbdc753827cf014c01dbbee9c3be17e5a208dcd1bf8641ce2cd29580d1f0d4"}, - {file = "pyzmq-25.1.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc16ac425cc927d0a57d242589f87ee093884ea4804c05a13834d07c20db203c"}, - {file = "pyzmq-25.1.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11c1d2aed9079c6b0c9550a7257a836b4a637feb334904610f06d70eb44c56d2"}, - {file = "pyzmq-25.1.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e8a701123029cc240cea61dd2d16ad57cab4691804143ce80ecd9286b464d180"}, - {file = "pyzmq-25.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:61706a6b6c24bdece85ff177fec393545a3191eeda35b07aaa1458a027ad1304"}, - {file = "pyzmq-25.1.1.tar.gz", hash = "sha256:259c22485b71abacdfa8bf79720cd7bcf4b9d128b30ea554f01ae71fdbfdaa23"}, -] - -[package.dependencies] -cffi = {version = "*", markers = "implementation_name == \"pypy\""} +[package.extras] +cli = ["click (>=5.0)"] [[package]] name = "requests" -version = "2.31.0" +version = "2.33.0" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.10" +groups = ["main"] files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.33.0-py3-none-any.whl", hash = "sha256:3324635456fa185245e24865e810cecec7b4caf933d7eb133dcde67d48cee69b"}, + {file = "requests-2.33.0.tar.gz", hash = "sha256:c7ebc5e8b0f21837386ad0e1c8fe8b829fa5f544d8df3b2253bff14ef29d7652"}, ] [package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" +certifi = ">=2023.5.7" +charset_normalizer = ">=2,<4" idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" +urllib3 = ">=1.26,<3" [package.extras] socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +test = ["PySocks (>=1.5.6,!=1.5.7)", "pytest (>=3)", "pytest-cov", "pytest-httpbin (==2.1.0)", "pytest-mock", "pytest-xdist"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<8)"] [[package]] name = "s3transfer" -version = "0.7.0" +version = "0.16.0" description = "An Amazon S3 Transfer Manager" optional = false -python-versions = ">= 3.7" +python-versions = ">=3.9" +groups = ["dev"] files = [ - {file = "s3transfer-0.7.0-py3-none-any.whl", hash = "sha256:10d6923c6359175f264811ef4bf6161a3156ce8e350e705396a7557d6293c33a"}, - {file = "s3transfer-0.7.0.tar.gz", hash = "sha256:fd3889a66f5fe17299fe75b82eae6cf722554edca744ca5d5fe308b104883d2e"}, + {file = "s3transfer-0.16.0-py3-none-any.whl", hash = "sha256:18e25d66fed509e3868dc1572b3f427ff947dd2c56f844a5bf09481ad3f3b2fe"}, + {file = "s3transfer-0.16.0.tar.gz", hash = "sha256:8e990f13268025792229cd52fa10cb7163744bf56e719e0b9cb925ab79abf920"}, ] [package.dependencies] -botocore = ">=1.12.36,<2.0a.0" +botocore = ">=1.37.4,<2.0a.0" [package.extras] -crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] +crt = ["botocore[crt] (>=1.37.4,<2.0a.0)"] [[package]] name = "six" -version = "1.16.0" +version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["dev"] files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] [[package]] name = "slack-bolt" -version = "1.18.0" +version = "1.27.0" description = "The Bolt Framework for Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" +groups = ["main"] files = [ - {file = "slack_bolt-1.18.0-py2.py3-none-any.whl", hash = "sha256:63089a401ae3900c37698890249acd008a4651d06e86194edc7b72a00819bbac"}, - {file = "slack_bolt-1.18.0.tar.gz", hash = "sha256:43b121acf78440303ce5129e53be36bdfe5d926a193daef7daf2860688e65dd3"}, + {file = "slack_bolt-1.27.0-py2.py3-none-any.whl", hash = "sha256:c43c94bf34740f2adeb9b55566c83f1e73fed6ba2878bd346cdfd6fd8ad22360"}, + {file = "slack_bolt-1.27.0.tar.gz", hash = "sha256:3db91d64e277e176a565c574ae82748aa8554f19e41a4fceadca4d65374ce1e0"}, ] [package.dependencies] -slack-sdk = ">=3.21.2,<4" - -[package.extras] -adapter = ["CherryPy (>=18,<19)", "Django (>=3,<5)", "Flask (>=1,<3)", "Werkzeug (>=2,<3)", "boto3 (<=2)", "bottle (>=0.12,<1)", "chalice (>=1.28,<2)", "falcon (>=2,<4)", "fastapi (>=0.70.0,<1)", "gunicorn (>=20,<21)", "pyramid (>=1,<3)", "sanic (>=22,<23)", "starlette (>=0.14,<1)", "tornado (>=6,<7)", "uvicorn (<1)", "websocket-client (>=1.2.3,<2)"] -adapter-testing = ["Flask (>=1,<2)", "Werkzeug (>=1,<2)", "boddle (>=0.2,<0.3)", "docker (>=5,<6)", "moto (>=3,<4)", "requests (>=2,<3)", "sanic-testing (>=0.7)"] -async = ["aiohttp (>=3,<4)", "websockets (>=10,<11)"] -testing = ["Flask-Sockets (>=0.2,<1)", "Jinja2 (==3.0.3)", "Werkzeug (>=1,<2)", "aiohttp (>=3,<4)", "black (==22.8.0)", "click (<=8.0.4)", "itsdangerous (==2.0.1)", "pytest (>=6.2.5,<7)", "pytest-asyncio (>=0.18.2,<1)", "pytest-cov (>=3,<4)"] -testing-without-asyncio = ["Flask-Sockets (>=0.2,<1)", "Jinja2 (==3.0.3)", "Werkzeug (>=1,<2)", "black (==22.8.0)", "click (<=8.0.4)", "itsdangerous (==2.0.1)", "pytest (>=6.2.5,<7)", "pytest-cov (>=3,<4)"] +slack_sdk = ">=3.38.0,<4" [[package]] name = "slack-sdk" -version = "3.23.0" +version = "3.41.0" description = "The Slack API Platform SDK for Python" optional = false -python-versions = ">=3.6.0" +python-versions = ">=3.7" +groups = ["main"] files = [ - {file = "slack_sdk-3.23.0-py2.py3-none-any.whl", hash = "sha256:2a8513505cced20ceee22b5b49c11d9545caa6234b56bf0ad47133ea5b357d10"}, - {file = "slack_sdk-3.23.0.tar.gz", hash = "sha256:9d6ebc4ff74e7983e1b27dbdb0f2bb6fc3c2a2451694686eaa2be23bbb085a73"}, + {file = "slack_sdk-3.41.0-py2.py3-none-any.whl", hash = "sha256:bb18dcdfff1413ec448e759cf807ec3324090993d8ab9111c74081623b692a89"}, + {file = "slack_sdk-3.41.0.tar.gz", hash = "sha256:eb61eb12a65bebeca9cb5d36b3f799e836ed2be21b456d15df2627cfe34076ca"}, ] [package.extras] -optional = ["SQLAlchemy (>=1.4,<3)", "aiodns (>1.0)", "aiohttp (>=3.7.3,<4)", "boto3 (<=2)", "websocket-client (>=1,<2)", "websockets (>=10,<11)"] -testing = ["Flask (>=1,<2)", "Flask-Sockets (>=0.2,<1)", "Jinja2 (==3.0.3)", "Werkzeug (<2)", "black (==22.8.0)", "boto3 (<=2)", "click (==8.0.4)", "flake8 (>=5,<6)", "itsdangerous (==1.1.0)", "moto (>=3,<4)", "psutil (>=5,<6)", "pytest (>=6.2.5,<7)", "pytest-asyncio (<1)", "pytest-cov (>=2,<3)"] +optional = ["SQLAlchemy (>=1.4,<3)", "aiodns (>1.0)", "aiohttp (>=3.7.3,<4)", "boto3 (<=2)", "websocket-client (>=1,<2)", "websockets (>=9.1,<16)"] [[package]] name = "sqlalchemy" -version = "1.4.49" +version = "2.0.48" description = "Database Abstraction Library" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +python-versions = ">=3.7" +groups = ["main"] files = [ - {file = "SQLAlchemy-1.4.49-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2e126cf98b7fd38f1e33c64484406b78e937b1a280e078ef558b95bf5b6895f6"}, - {file = "SQLAlchemy-1.4.49-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:03db81b89fe7ef3857b4a00b63dedd632d6183d4ea5a31c5d8a92e000a41fc71"}, - {file = "SQLAlchemy-1.4.49-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:95b9df9afd680b7a3b13b38adf6e3a38995da5e162cc7524ef08e3be4e5ed3e1"}, - {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a63e43bf3f668c11bb0444ce6e809c1227b8f067ca1068898f3008a273f52b09"}, - {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f835c050ebaa4e48b18403bed2c0fda986525896efd76c245bdd4db995e51a4c"}, - {file = "SQLAlchemy-1.4.49-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c21b172dfb22e0db303ff6419451f0cac891d2e911bb9fbf8003d717f1bcf91"}, - {file = "SQLAlchemy-1.4.49-cp310-cp310-win32.whl", hash = "sha256:5fb1ebdfc8373b5a291485757bd6431de8d7ed42c27439f543c81f6c8febd729"}, - {file = "SQLAlchemy-1.4.49-cp310-cp310-win_amd64.whl", hash = "sha256:f8a65990c9c490f4651b5c02abccc9f113a7f56fa482031ac8cb88b70bc8ccaa"}, - {file = "SQLAlchemy-1.4.49-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8923dfdf24d5aa8a3adb59723f54118dd4fe62cf59ed0d0d65d940579c1170a4"}, - {file = "SQLAlchemy-1.4.49-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9ab2c507a7a439f13ca4499db6d3f50423d1d65dc9b5ed897e70941d9e135b0"}, - {file = "SQLAlchemy-1.4.49-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5debe7d49b8acf1f3035317e63d9ec8d5e4d904c6e75a2a9246a119f5f2fdf3d"}, - {file = "SQLAlchemy-1.4.49-cp311-cp311-win32.whl", hash = "sha256:82b08e82da3756765c2e75f327b9bf6b0f043c9c3925fb95fb51e1567fa4ee87"}, - {file = "SQLAlchemy-1.4.49-cp311-cp311-win_amd64.whl", hash = "sha256:171e04eeb5d1c0d96a544caf982621a1711d078dbc5c96f11d6469169bd003f1"}, - {file = "SQLAlchemy-1.4.49-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:36e58f8c4fe43984384e3fbe6341ac99b6b4e083de2fe838f0fdb91cebe9e9cb"}, - {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b31e67ff419013f99ad6f8fc73ee19ea31585e1e9fe773744c0f3ce58c039c30"}, - {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c14b29d9e1529f99efd550cd04dbb6db6ba5d690abb96d52de2bff4ed518bc95"}, - {file = "SQLAlchemy-1.4.49-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c40f3470e084d31247aea228aa1c39bbc0904c2b9ccbf5d3cfa2ea2dac06f26d"}, - {file = "SQLAlchemy-1.4.49-cp36-cp36m-win32.whl", hash = "sha256:706bfa02157b97c136547c406f263e4c6274a7b061b3eb9742915dd774bbc264"}, - {file = "SQLAlchemy-1.4.49-cp36-cp36m-win_amd64.whl", hash = "sha256:a7f7b5c07ae5c0cfd24c2db86071fb2a3d947da7bd487e359cc91e67ac1c6d2e"}, - {file = "SQLAlchemy-1.4.49-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:4afbbf5ef41ac18e02c8dc1f86c04b22b7a2125f2a030e25bbb4aff31abb224b"}, - {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24e300c0c2147484a002b175f4e1361f102e82c345bf263242f0449672a4bccf"}, - {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:201de072b818f8ad55c80d18d1a788729cccf9be6d9dc3b9d8613b053cd4836d"}, - {file = "SQLAlchemy-1.4.49-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7653ed6817c710d0c95558232aba799307d14ae084cc9b1f4c389157ec50df5c"}, - {file = "SQLAlchemy-1.4.49-cp37-cp37m-win32.whl", hash = "sha256:647e0b309cb4512b1f1b78471fdaf72921b6fa6e750b9f891e09c6e2f0e5326f"}, - {file = "SQLAlchemy-1.4.49-cp37-cp37m-win_amd64.whl", hash = "sha256:ab73ed1a05ff539afc4a7f8cf371764cdf79768ecb7d2ec691e3ff89abbc541e"}, - {file = "SQLAlchemy-1.4.49-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:37ce517c011560d68f1ffb28af65d7e06f873f191eb3a73af5671e9c3fada08a"}, - {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1878ce508edea4a879015ab5215546c444233881301e97ca16fe251e89f1c55"}, - {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0e8e608983e6f85d0852ca61f97e521b62e67969e6e640fe6c6b575d4db68557"}, - {file = "SQLAlchemy-1.4.49-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccf956da45290df6e809ea12c54c02ace7f8ff4d765d6d3dfb3655ee876ce58d"}, - {file = "SQLAlchemy-1.4.49-cp38-cp38-win32.whl", hash = "sha256:f167c8175ab908ce48bd6550679cc6ea20ae169379e73c7720a28f89e53aa532"}, - {file = "SQLAlchemy-1.4.49-cp38-cp38-win_amd64.whl", hash = "sha256:45806315aae81a0c202752558f0df52b42d11dd7ba0097bf71e253b4215f34f4"}, - {file = "SQLAlchemy-1.4.49-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:b6d0c4b15d65087738a6e22e0ff461b407533ff65a73b818089efc8eb2b3e1de"}, - {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a843e34abfd4c797018fd8d00ffffa99fd5184c421f190b6ca99def4087689bd"}, - {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1c890421651b45a681181301b3497e4d57c0d01dc001e10438a40e9a9c25ee77"}, - {file = "SQLAlchemy-1.4.49-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d26f280b8f0a8f497bc10573849ad6dc62e671d2468826e5c748d04ed9e670d5"}, - {file = "SQLAlchemy-1.4.49-cp39-cp39-win32.whl", hash = "sha256:ec2268de67f73b43320383947e74700e95c6770d0c68c4e615e9897e46296294"}, - {file = "SQLAlchemy-1.4.49-cp39-cp39-win_amd64.whl", hash = "sha256:bbdf16372859b8ed3f4d05f925a984771cd2abd18bd187042f24be4886c2a15f"}, - {file = "SQLAlchemy-1.4.49.tar.gz", hash = "sha256:06ff25cbae30c396c4b7737464f2a7fc37a67b7da409993b182b024cec80aed9"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7001dc9d5f6bb4deb756d5928eaefe1930f6f4179da3924cbd95ee0e9f4dce89"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1a89ce07ad2d4b8cfc30bd5889ec40613e028ed80ef47da7d9dd2ce969ad30e0"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:10853a53a4a00417a00913d270dddda75815fcb80675874285f41051c094d7dd"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fac0fa4e4f55f118fd87177dacb1c6522fe39c28d498d259014020fec9164c29"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3713e21ea67bca727eecd4a24bf68bcd414c403faae4989442be60994301ded0"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-win32.whl", hash = "sha256:d404dc897ce10e565d647795861762aa2d06ca3f4a728c5e9a835096c7059018"}, + {file = "sqlalchemy-2.0.48-cp310-cp310-win_amd64.whl", hash = "sha256:841a94c66577661c1f088ac958cd767d7c9bf507698f45afffe7a4017049de76"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b4c575df7368b3b13e0cebf01d4679f9a28ed2ae6c1cd0b1d5beffb6b2007dc"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e83e3f959aaa1c9df95c22c528096d94848a1bc819f5d0ebf7ee3df0ca63db6c"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6f7b7243850edd0b8b97043f04748f31de50cf426e939def5c16bedb540698f7"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:82745b03b4043e04600a6b665cb98697c4339b24e34d74b0a2ac0a2488b6f94d"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e5e088bf43f6ee6fec7dbf1ef7ff7774a616c236b5c0cb3e00662dd71a56b571"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-win32.whl", hash = "sha256:9c7d0a77e36b5f4b01ca398482230ab792061d243d715299b44a0b55c89fe617"}, + {file = "sqlalchemy-2.0.48-cp311-cp311-win_amd64.whl", hash = "sha256:583849c743e0e3c9bb7446f5b5addeacedc168d657a69b418063dfdb2d90081c"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:348174f228b99f33ca1f773e85510e08927620caa59ffe7803b37170df30332b"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:53667b5f668991e279d21f94ccfa6e45b4e3f4500e7591ae59a8012d0f010dcb"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34634e196f620c7a61d18d5cf7dc841ca6daa7961aed75d532b7e58b309ac894"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:546572a1793cc35857a2ffa1fe0e58571af1779bcc1ffa7c9fb0839885ed69a9"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:07edba08061bc277bfdc772dd2a1a43978f5a45994dd3ede26391b405c15221e"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-win32.whl", hash = "sha256:908a3fa6908716f803b86896a09a2c4dde5f5ce2bb07aacc71ffebb57986ce99"}, + {file = "sqlalchemy-2.0.48-cp312-cp312-win_amd64.whl", hash = "sha256:68549c403f79a8e25984376480959975212a670405e3913830614432b5daa07a"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e3070c03701037aa418b55d36532ecb8f8446ed0135acb71c678dbdf12f5b6e4"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2645b7d8a738763b664a12a1542c89c940daa55196e8d73e55b169cc5c99f65f"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b19151e76620a412c2ac1c6f977ab1b9fa7ad43140178345136456d5265b32ed"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b193a7e29fd9fa56e502920dca47dffe60f97c863494946bd698c6058a55658"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:36ac4ddc3d33e852da9cb00ffb08cea62ca05c39711dc67062ca2bb1fae35fd8"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-win32.whl", hash = "sha256:389b984139278f97757ea9b08993e7b9d1142912e046ab7d82b3fbaeb0209131"}, + {file = "sqlalchemy-2.0.48-cp313-cp313-win_amd64.whl", hash = "sha256:d612c976cbc2d17edfcc4c006874b764e85e990c29ce9bd411f926bbfb02b9a2"}, + {file = "sqlalchemy-2.0.48-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:69f5bc24904d3bc3640961cddd2523e361257ef68585d6e364166dfbe8c78fae"}, + {file = "sqlalchemy-2.0.48-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd08b90d211c086181caed76931ecfa2bdfc83eea3cfccdb0f82abc6c4b876cb"}, + {file = "sqlalchemy-2.0.48-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:1ccd42229aaac2df431562117ac7e667d702e8e44afdb6cf0e50fa3f18160f0b"}, + {file = "sqlalchemy-2.0.48-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f0dcbc588cd5b725162c076eb9119342f6579c7f7f55057bb7e3c6ff27e13121"}, + {file = "sqlalchemy-2.0.48-cp313-cp313t-win32.whl", hash = "sha256:9764014ef5e58aab76220c5664abb5d47d5bc858d9debf821e55cfdd0f128485"}, + {file = "sqlalchemy-2.0.48-cp313-cp313t-win_amd64.whl", hash = "sha256:e2f35b4cccd9ed286ad62e0a3c3ac21e06c02abc60e20aa51a3e305a30f5fa79"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:e2d0d88686e3d35a76f3e15a34e8c12d73fc94c1dea1cd55782e695cc14086dd"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:49b7bddc1eebf011ea5ab722fdbe67a401caa34a350d278cc7733c0e88fecb1f"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:426c5ca86415d9b8945c7073597e10de9644802e2ff502b8e1f11a7a2642856b"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:288937433bd44e3990e7da2402fabc44a3c6c25d3704da066b85b89a85474ae0"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8183dc57ae7d9edc1346e007e840a9f3d6aa7b7f165203a99e16f447150140d2"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-win32.whl", hash = "sha256:1182437cb2d97988cfea04cf6cdc0b0bb9c74f4d56ec3d08b81e23d621a28cc6"}, + {file = "sqlalchemy-2.0.48-cp314-cp314-win_amd64.whl", hash = "sha256:144921da96c08feb9e2b052c5c5c1d0d151a292c6135623c6b2c041f2a45f9e0"}, + {file = "sqlalchemy-2.0.48-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5aee45fd2c6c0f2b9cdddf48c48535e7471e42d6fb81adfde801da0bd5b93241"}, + {file = "sqlalchemy-2.0.48-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7cddca31edf8b0653090cbb54562ca027c421c58ddde2c0685f49ff56a1690e0"}, + {file = "sqlalchemy-2.0.48-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7a936f1bb23d370b7c8cc079d5fce4c7d18da87a33c6744e51a93b0f9e97e9b3"}, + {file = "sqlalchemy-2.0.48-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e004aa9248e8cb0a5f9b96d003ca7c1c0a5da8decd1066e7b53f59eb8ce7c62b"}, + {file = "sqlalchemy-2.0.48-cp314-cp314t-win32.whl", hash = "sha256:b8438ec5594980d405251451c5b7ea9aa58dda38eb7ac35fb7e4c696712ee24f"}, + {file = "sqlalchemy-2.0.48-cp314-cp314t-win_amd64.whl", hash = "sha256:d854b3970067297f3a7fbd7a4683587134aa9b3877ee15aa29eea478dc68f933"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f8649a14caa5f8a243628b1d61cf530ad9ae4578814ba726816adb1121fc493e"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6bb85c546591569558571aa1b06aba711b26ae62f111e15e56136d69920e1616"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a6b764fb312bd35e47797ad2e63f0d323792837a6ac785a4ca967019357d2bc7"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:7c998f2ace8bf76b453b75dbcca500d4f4b9dd3908c13e89b86289b37784848b"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d64177f443594c8697369c10e4bbcac70ef558e0f7921a1de7e4a3d1734bcf67"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-win32.whl", hash = "sha256:01f6bbd4308b23240cf7d3ef117557c8fd097ec9549d5d8a52977544e35b40ad"}, + {file = "sqlalchemy-2.0.48-cp38-cp38-win_amd64.whl", hash = "sha256:858e433f12b0e5b3ed2f8da917433b634f4937d0e8793e5cb33c54a1a01df565"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4599a95f9430ae0de82b52ff0d27304fe898c17cb5f4099f7438a51b9998ac77"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f27f9da0a7d22b9f981108fd4b62f8b5743423388915a563e651c20d06c1f457"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d8fcccbbc0c13c13702c471da398b8cd72ba740dca5859f148ae8e0e8e0d3e7e"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a5b429eb84339f9f05e06083f119ad814e6d85e27ecbdf9c551dfdbb128eaf8a"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:bcb8ebbf2e2c36cfe01a94f2438012c6a9d494cf80f129d9753bcdf33bfc35a6"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-win32.whl", hash = "sha256:e214d546c8ecb5fc22d6e6011746082abf13a9cf46eefb45769c7b31407c97b5"}, + {file = "sqlalchemy-2.0.48-cp39-cp39-win_amd64.whl", hash = "sha256:b8fc3454b4f3bd0a368001d0e968852dad45a873f8b4babd41bc302ec851a099"}, + {file = "sqlalchemy-2.0.48-py3-none-any.whl", hash = "sha256:a66fe406437dd65cacd96a72689a3aaaecaebbcd62d81c5ac1c0fdbeac835096"}, + {file = "sqlalchemy-2.0.48.tar.gz", hash = "sha256:5ca74f37f3369b45e1f6b7b06afb182af1fd5dde009e4ffd831830d98cbe5fe7"}, ] [package.dependencies] -greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} +greenlet = {version = ">=1", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +typing-extensions = ">=4.6.0" [package.extras] -aiomysql = ["aiomysql", "greenlet (!=0.4.17)"] -aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] -asyncio = ["greenlet (!=0.4.17)"] -asyncmy = ["asyncmy (>=0.2.3,!=0.2.4)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2)"] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (>=1)"] +aioodbc = ["aioodbc", "greenlet (>=1)"] +aiosqlite = ["aiosqlite", "greenlet (>=1)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (>=1)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (>=1)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] mssql = ["pyodbc"] mssql-pymssql = ["pymssql"] mssql-pyodbc = ["pyodbc"] -mypy = ["mypy (>=0.910)", "sqlalchemy2-stubs"] -mysql = ["mysqlclient (>=1.4.0)", "mysqlclient (>=1.4.0,<2)"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] mysql-connector = ["mysql-connector-python"] -oracle = ["cx-oracle (>=7)", "cx-oracle (>=7,<8)"] +oracle = ["cx_oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] postgresql = ["psycopg2 (>=2.7)"] -postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] -postgresql-pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"] +postgresql-asyncpg = ["asyncpg", "greenlet (>=1)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] postgresql-psycopg2binary = ["psycopg2-binary"] postgresql-psycopg2cffi = ["psycopg2cffi"] -pymysql = ["pymysql", "pymysql (<1)"] -sqlcipher = ["sqlcipher3-binary"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3_binary"] [[package]] -name = "stack-data" -version = "0.6.3" -description = "Extract data from python stack frames and tracebacks for informative displays" +name = "typing-extensions" +version = "4.15.0" +description = "Backported and Experimental Type Hints for Python 3.9+" optional = false -python-versions = "*" -files = [ - {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, - {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, -] - -[package.dependencies] -asttokens = ">=2.1.0" -executing = ">=1.2.0" -pure-eval = "*" - -[package.extras] -tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] - -[[package]] -name = "tornado" -version = "6.3.3" -description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -optional = false -python-versions = ">= 3.8" -files = [ - {file = "tornado-6.3.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d"}, - {file = "tornado-6.3.3-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a"}, - {file = "tornado-6.3.3-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f"}, - {file = "tornado-6.3.3-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a"}, - {file = "tornado-6.3.3-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2"}, - {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0"}, - {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16"}, - {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17"}, - {file = "tornado-6.3.3-cp38-abi3-win32.whl", hash = "sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3"}, - {file = "tornado-6.3.3-cp38-abi3-win_amd64.whl", hash = "sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5"}, - {file = "tornado-6.3.3.tar.gz", hash = "sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe"}, -] - -[[package]] -name = "traitlets" -version = "5.11.2" -description = "Traitlets Python configuration system" -optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "traitlets-5.11.2-py3-none-any.whl", hash = "sha256:98277f247f18b2c5cabaf4af369187754f4fb0e85911d473f72329db8a7f4fae"}, - {file = "traitlets-5.11.2.tar.gz", hash = "sha256:7564b5bf8d38c40fa45498072bf4dc5e8346eb087bbf1e2ae2d8774f6a0f078e"}, + {file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"}, + {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"}, ] -[package.extras] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] -test = ["argcomplete (>=3.0.3)", "mypy (>=1.5.1)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"] - [[package]] name = "urllib3" -version = "1.26.17" +version = "2.6.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +python-versions = ">=3.9" +groups = ["main", "dev"] files = [ - {file = "urllib3-1.26.17-py2.py3-none-any.whl", hash = "sha256:94a757d178c9be92ef5539b8840d48dc9cf1b2709c9d6b588232a055c524458b"}, - {file = "urllib3-1.26.17.tar.gz", hash = "sha256:24d6a242c28d29af46c3fae832c36db3bbebcc533dd1bb549172cd739c82df21"}, + {file = "urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4"}, + {file = "urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed"}, ] [package.extras] -brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] - -[[package]] -name = "wcwidth" -version = "0.2.8" -description = "Measures the displayed width of unicode strings in a terminal" -optional = false -python-versions = "*" -files = [ - {file = "wcwidth-0.2.8-py2.py3-none-any.whl", hash = "sha256:77f719e01648ed600dfa5402c347481c0992263b81a027344f3e1ba25493a704"}, - {file = "wcwidth-0.2.8.tar.gz", hash = "sha256:8705c569999ffbb4f6a87c6d1b80f324bd6db952f5eb0b95bc07517f4c1813d4"}, -] +brotli = ["brotli (>=1.2.0) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=1.2.0.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""] [metadata] -lock-version = "2.0" -python-versions = "^3.11" -content-hash = "0dd2e974c02e443a64a6ff94746abb6fc011eb81d6a62dfa22b03d7bff4f9cd1" +lock-version = "2.1" +python-versions = "^3.12" +content-hash = "1a838a06c2d452cf5cd57fa44cb4c195fc2fa1e0c75bce56b8b53312b2366ebb" diff --git a/pyproject.toml b/pyproject.toml index b7a407a..aef914d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,23 +1,65 @@ [tool.poetry] name = "syncbot" -version = "0.1.0" -description = "" -authors = ["Evan Petzoldt "] +version = "1.0.1" +description = "Sync chat threads between Slack Workspaces." +authors = ["Evan Petzoldt ", "Klint Van Tassel "] readme = "README.md" +[tool.poetry.requires-plugins] +poetry-plugin-export = ">=1.8" + [tool.poetry.dependencies] -python = "^3.11" -slack-bolt = "^1.18.0" -sqlalchemy = "<2.0" -pymysql = "^1.1.0" -cryptography = "^41.0.4" -requests = "^2.31.0" -pillow-heif = "^0.16.0" +python = "^3.12" +alembic = "^1.13" +python-dotenv = "^1.2.0" +slack-bolt = "^1.27.0" +sqlalchemy = "^2.0" +pymysql = "^1.1.2" +psycopg2-binary = "^2.9" +cryptography = "^46.0.0" +requests = "^2.32.0" [tool.poetry.group.dev.dependencies] -ipykernel = "^6.25.2" boto3 = "^1.28.57" +pytest = "^9.0" + +[tool.pytest.ini_options] +testpaths = ["tests", "infra/aws/tests", "infra/gcp/tests"] +pythonpath = ["syncbot", "infra/aws/db_setup"] + +[tool.ruff] +target-version = "py312" +line-length = 120 +src = ["syncbot", "tests", "infra/aws/tests", "infra/gcp/tests"] + +[tool.ruff.lint] +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "I", # isort + "UP", # pyupgrade + "B", # flake8-bugbear + "SIM", # flake8-simplify + "LOG", # flake8-logging +] +ignore = [ + "E501", # line-too-long (handled by formatter) + "B905", # zip-without-explicit-strict + "SIM108", # ternary operator (readability preference) +] + +[tool.ruff.lint.per-file-ignores] +"syncbot/app.py" = ["E402"] # load_dotenv() must run before app imports +"syncbot/builders/user_mapping.py" = ["I001"] # import block sort (slack.blocks shorthand used) + +[tool.ruff.lint.isort] +known-first-party = ["builders", "constants", "db", "federation", "handlers", "helpers", "logger", "routing", "slack"] + +[tool.ruff.format] +quote-style = "double" +indent-style = "space" [build-system] requires = ["poetry-core"] diff --git a/samconfig.toml b/samconfig.toml new file mode 100644 index 0000000..97fc6e7 --- /dev/null +++ b/samconfig.toml @@ -0,0 +1,43 @@ +# SAM CLI configuration for local deployment. +# +# Full list of CloudFormation parameters is in infra/aws/template.yaml (Parameters). +# Interactive deploy (infra/aws/scripts/deploy.sh) passes overrides on the CLI. +# This file is for quick sam build / sam deploy with minimal Stage-only overrides. +# +# Usage: +# sam build --config-env test +# sam deploy --config-env test +# sam deploy --config-env prod +# +version = 0.1 + +[default.build.parameters] +template_file = "infra/aws/template.yaml" +use_container = true + +[test.build.parameters] +template_file = "infra/aws/template.yaml" +use_container = true + +# test: minimal Stage; add other template parameters via --parameter-overrides or guided deploy. +[test.deploy.parameters] +stack_name = "syncbot-test" +resolve_s3 = true +s3_prefix = "syncbot-test" +region = "us-east-2" +capabilities = "CAPABILITY_IAM" +confirm_changeset = true +parameter_overrides = "Stage=test" + +[prod.build.parameters] +template_file = "infra/aws/template.yaml" +use_container = true + +[prod.deploy.parameters] +stack_name = "syncbot-prod" +resolve_s3 = true +s3_prefix = "syncbot-prod" +region = "us-east-2" +capabilities = "CAPABILITY_IAM" +confirm_changeset = true +parameter_overrides = "Stage=prod" diff --git a/slack-manifest.json b/slack-manifest.json new file mode 100644 index 0000000..1b24964 --- /dev/null +++ b/slack-manifest.json @@ -0,0 +1,88 @@ +{ + "_metadata": { + "major_version": 1, + "minor_version": 1 + }, + "display_information": { + "name": "SyncBot", + "description": "Sync chat threads between Slack Workspaces.", + "background_color": "#1A1D21" + }, + "features": { + "app_home": { + "home_tab_enabled": true, + "messages_tab_enabled": true, + "messages_tab_read_only_enabled": true + }, + "bot_user": { + "display_name": "SyncBot", + "always_online": true + } + }, + "oauth_config": { + "redirect_urls": [ + "https://your-random-tunnel-name.trycloudflare.com/slack/oauth_redirect" + ], + "scopes": { + "bot": [ + "app_mentions:read", + "channels:history", + "channels:join", + "channels:read", + "channels:manage", + "chat:write", + "chat:write.customize", + "files:read", + "files:write", + "groups:history", + "groups:read", + "groups:write", + "im:write", + "reactions:read", + "reactions:write", + "team:read", + "users:read", + "users:read.email" + ], + "user": [ + "chat:write", + "channels:history", + "channels:read", + "files:read", + "files:write", + "groups:history", + "groups:read", + "groups:write", + "im:write", + "reactions:read", + "reactions:write", + "team:read", + "users:read", + "users:read.email" + ] + } + }, + "settings": { + "event_subscriptions": { + "request_url": "https://your-random-tunnel-name.trycloudflare.com/slack/events", + "bot_events": [ + "app_home_opened", + "member_joined_channel", + "message.channels", + "message.groups", + "reaction_added", + "reaction_removed", + "team_join", + "tokens_revoked", + "user_profile_changed" + ] + }, + "interactivity": { + "is_enabled": true, + "request_url": "https://your-random-tunnel-name.trycloudflare.com/slack/events" + }, + "org_deploy_enabled": false, + "socket_mode_enabled": false, + "token_rotation_enabled": false + } +} diff --git a/syncbot/app.py b/syncbot/app.py index 1d34aa5..b0d234a 100644 --- a/syncbot/app.py +++ b/syncbot/app.py @@ -1,42 +1,241 @@ +"""SyncBot — Slack app that syncs messages across workspaces. + +This module is the entry point for both AWS Lambda (via :func:`handler`) and +container/local HTTP mode (``python app.py`` / Cloud Run: listens on :envvar:`PORT` +or port 3000 by default). + +All incoming Slack events, actions, view submissions, and slash commands are +dispatched through :func:`main_response`. In production (non-local), view +submissions first run :func:`view_ack` for the HTTP response, then :func:`main_response` +for the work phase (lazy). Handlers are looked up in :data:`routing.MAIN_MAPPER` +and :data:`routing.VIEW_ACK_MAPPER`. + +Federation API endpoints (``/api/federation/*``) handle cross-instance +communication and are dispatched separately from Slack events. +""" + import json import logging +import os import re -import sys +from importlib.metadata import PackageNotFoundError, version + +from dotenv import load_dotenv + +try: + __version__ = version("syncbot") +except PackageNotFoundError: + __version__ = "dev" + +# Load .env before any other app imports so env vars are available everywhere. +# In production (Lambda) there is no .env file and this is a harmless no-op. +load_dotenv(os.path.join(os.path.dirname(__file__), "..", ".env")) + +from http.server import BaseHTTPRequestHandler, HTTPServer from slack_bolt import App from slack_bolt.adapter.aws_lambda import SlackRequestHandler -from utils.constants import LOCAL_DEVELOPMENT -from utils.helpers import get_oauth_flow, get_request_type, safe_get -from utils.routing import MAIN_MAPPER +from slack_bolt.request import BoltRequest +from slack_bolt.response import BoltResponse +from slack_bolt.util.utils import get_boot_message + +from constants import ( + FEDERATION_ENABLED, + HAS_REAL_BOT_TOKEN, + LOCAL_DEVELOPMENT, + validate_config, +) +from db import initialize_database +from federation.api import dispatch_federation_request +from helpers import get_oauth_flow, get_request_type, safe_get +from logger import ( + configure_logging, + emit_metric, + get_request_duration_ms, + set_correlation_id, +) +from routing import MAIN_MAPPER, VIEW_ACK_MAPPER, VIEW_MAPPER + +_SENSITIVE_KEYS = frozenset({ + "token", "bot_token", "access_token", "shared_secret", + "public_key", "private_key", "private_key_encrypted", +}) + + +def _redact_sensitive(obj, _depth=0): + """Return a copy of *obj* with sensitive keys replaced by ``"[REDACTED]"``.""" + if _depth > 10: + return obj + if isinstance(obj, dict): + return { + k: "[REDACTED]" if k in _SENSITIVE_KEYS else _redact_sensitive(v, _depth + 1) + for k, v in obj.items() + } + if isinstance(obj, list): + return [_redact_sensitive(v, _depth + 1) for v in obj] + return obj + SlackRequestHandler.clear_all_log_handlers() -if logging.getLogger().hasHandlers(): - logging.getLogger().setLevel(logging.INFO) -else: - logging.basicConfig(level=logging.INFO, stream=sys.stdout) +configure_logging() + +validate_config() +initialize_database() app = App( process_before_response=not LOCAL_DEVELOPMENT, + token_verification_enabled=not LOCAL_DEVELOPMENT or HAS_REAL_BOT_TOKEN, oauth_flow=get_oauth_flow(), ) -def handler(event, context): +@app.middleware +def _capture_slack_retry_num(req, resp, next): + """Expose ``X-Slack-Retry-Num`` on context so message handlers can drop retries.""" + headers = getattr(req, "headers", None) or {} + vals = headers.get("x-slack-retry-num") + if vals: + try: + v = vals[0] if isinstance(vals, (list, tuple)) else vals + req.context["slack_retry_num"] = int(v) + except (ValueError, TypeError, IndexError): + pass + return next() + + +def handler(event: dict, context: dict) -> dict: + """AWS Lambda entry point. + + Receives an API Gateway proxy event. Federation API paths + (``/api/federation/*``) are handled directly; everything else + is delegated to the Slack Bolt request handler. + """ + path = event.get("path", "") or event.get("rawPath", "") + if path.startswith("/api/federation"): + return _lambda_federation_handler(event) + slack_request_handler = SlackRequestHandler(app=app) return slack_request_handler.handle(event, context) -def main_response(body, logger, client, ack, context): - ack() - logger.info(json.dumps(body, indent=4)) +def _lambda_federation_handler(event: dict) -> dict: + """Handle a federation API request inside Lambda.""" + method = event.get("httpMethod", "GET") + path = event.get("path", "") + body_str = event.get("body", "") or "" + raw_headers = event.get("headers", {}) or {} + headers = {k: v for k, v in raw_headers.items()} + + status, resp = dispatch_federation_request(method, path, body_str, headers) + return { + "statusCode": status, + "headers": {"Content-Type": "application/json"}, + "body": json.dumps(resp), + } + + +_logger = logging.getLogger(__name__) + + +def view_ack(body: dict, logger, client, ack, context: dict) -> None: + """Production ack handler for ``view_submission``: fast response to Slack (3s budget). + + Deferred-ack views use :data:`~routing.VIEW_ACK_MAPPER`; all others get an empty ``ack()``. + """ + set_correlation_id() + request_type, request_id = get_request_type(body) + _logger.info( + "request_received", + extra={ + "request_type": request_type, + "request_id": request_id, + "team_id": safe_get(body, "team_id"), + "phase": "view_ack", + }, + ) + _logger.debug("request_body", extra={"body": json.dumps(_redact_sensitive(body))}) + + ack_handler = VIEW_ACK_MAPPER.get(request_id) + if ack_handler: + result = ack_handler(body, client, context) + if isinstance(result, dict): + ack(**result) + else: + ack() + else: + ack() + + +def main_response(body: dict, logger, client, ack, context: dict) -> None: + """Central dispatcher for every Slack request (lazy work phase in production). + + In production, ``view_submission`` HTTP ack is sent by :func:`view_ack` first; + this function runs afterward and must not call ``ack()`` again for views. + + In local development, view ack + work run in one invocation: deferred views + call the ack handler from :data:`~routing.VIEW_ACK_MAPPER`, then the work handler. + + A unique correlation ID is assigned to every incoming request and + attached to all log entries emitted while processing it. + """ + set_correlation_id() request_type, request_id = get_request_type(body) - run_function = safe_get(safe_get(MAIN_MAPPER, request_type), request_id) + + if request_type == "view_submission": + if LOCAL_DEVELOPMENT: + ack_handler = VIEW_ACK_MAPPER.get(request_id) + if ack_handler: + result = ack_handler(body, client, context) + if isinstance(result, dict): + ack(**result) + else: + ack() + else: + ack() + # Production: ack already sent by view_ack + else: + ack() + + _logger.info( + "request_received", + extra={ + "request_type": request_type, + "request_id": request_id, + "team_id": safe_get(body, "team_id"), + }, + ) + _logger.debug("request_body", extra={"body": json.dumps(_redact_sensitive(body))}) + + run_function = MAIN_MAPPER.get(request_type, {}).get(request_id) if run_function: - run_function(body, client, logger, context) + try: + run_function(body, client, logger, context) + emit_metric( + "request_handled", + duration_ms=round(get_request_duration_ms(), 1), + request_type=request_type, + request_id=request_id, + ) + except Exception: + emit_metric( + "request_error", + request_type=request_type, + request_id=request_id, + ) + raise else: - logger.error( - f"no handler for path: {safe_get(safe_get(MAIN_MAPPER, request_type), request_id) or request_type+', '+request_id}" - ) + if not ( + request_type == "view_submission" + and request_id in VIEW_ACK_MAPPER + and request_id not in VIEW_MAPPER + ): + _logger.error( + "no_handler", + extra={ + "request_type": request_type, + "request_id": request_id, + }, + ) if LOCAL_DEVELOPMENT: @@ -52,9 +251,163 @@ def main_response(body, logger, client, ack, context): MATCH_ALL_PATTERN = re.compile(".*") app.event(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS) app.action(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS) -app.view(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS) -app.command(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS) +if LOCAL_DEVELOPMENT: + app.view(MATCH_ALL_PATTERN)(main_response) +else: + app.view(MATCH_ALL_PATTERN)(ack=view_ack, lazy=[main_response]) + + +def _http_listen_port() -> int: + """Port for Bolt container mode (Cloud Run sets ``PORT``; local default 3000).""" + raw = os.environ.get("PORT", "3000").strip() + try: + return int(raw) + except ValueError: + return 3000 + + +def run_syncbot_http_server( + *, + port: int | None = None, + bolt_path: str = "/slack/events", + http_server_logger_enabled: bool = True, +) -> None: + """Start the HTTP server used by Cloud Run and ``python app.py``. + + Serves Slack (``bolt_path``), OAuth install/callback, ``/health``, and + ``/api/federation/*`` when :data:`~constants.FEDERATION_ENABLED` is true. + Mirrors :class:`slack_bolt.app.app.SlackAppDevelopmentServer` routing with + extra paths for production parity with API Gateway + Lambda. + """ + listen_port = port if port is not None else _http_listen_port() + _bolt_app = app + _bolt_oauth_flow = app.oauth_flow + _bolt_endpoint_path = bolt_path + _fed_enabled = FEDERATION_ENABLED + _http_log = http_server_logger_enabled + _fed_max_body = 1_048_576 # 1 MB + + class SyncBotHTTPHandler(BaseHTTPRequestHandler): + def log_message(self, fmt: str, *args) -> None: + if _http_log: + super().log_message(fmt, *args) + + def _path_no_query(self) -> str: + return self.path.partition("?")[0] + + def _send_raw( + self, + status: int, + headers: dict[str, list[str]], + body: str | bytes = "", + ) -> None: + if isinstance(body, str): + body_bytes = body.encode("utf-8") + else: + body_bytes = body + self.send_response(status) + for k, vs in headers.items(): + for v in vs: + self.send_header(k, v) + self.send_header("Content-Length", str(len(body_bytes))) + self.end_headers() + self.wfile.write(body_bytes) + + def _send_bolt_response(self, bolt_resp: BoltResponse) -> None: + self._send_raw( + status=bolt_resp.status, + headers={k: list(vs) for k, vs in bolt_resp.headers.items()}, + body=bolt_resp.body, + ) + + def do_GET(self) -> None: + path = self._path_no_query() + if path == "/health": + self._send_raw( + 200, + {"Content-Type": ["application/json"]}, + json.dumps({"status": "ok"}), + ) + return + if _fed_enabled and path.startswith("/api/federation"): + self._handle_federation("GET") + return + if _bolt_oauth_flow: + query = self.path.partition("?")[2] + if path == _bolt_oauth_flow.install_path: + bolt_req = BoltRequest( + body="", + query=query, + headers=self.headers, + ) + bolt_resp = _bolt_oauth_flow.handle_installation(bolt_req) + self._send_bolt_response(bolt_resp) + return + if path == _bolt_oauth_flow.redirect_uri_path: + bolt_req = BoltRequest( + body="", + query=query, + headers=self.headers, + ) + bolt_resp = _bolt_oauth_flow.handle_callback(bolt_req) + self._send_bolt_response(bolt_resp) + return + self._send_raw(404, {}) + + def do_POST(self) -> None: + path = self._path_no_query() + if _fed_enabled and path.startswith("/api/federation"): + self._handle_federation("POST") + return + if path != _bolt_endpoint_path: + self._send_raw(404, {}) + return + try: + content_len = int(self.headers.get("Content-Length") or 0) + except (TypeError, ValueError): + content_len = 0 + query = self.path.partition("?")[2] + request_body = self.rfile.read(content_len).decode("utf-8") + bolt_req = BoltRequest( + body=request_body, + query=query, + headers=self.headers, + ) + bolt_resp = _bolt_app.dispatch(bolt_req) + self._send_bolt_response(bolt_resp) + + def _handle_federation(self, method: str) -> None: + try: + content_len = min( + int(self.headers.get("Content-Length", 0)), + _fed_max_body, + ) + except (TypeError, ValueError): + content_len = 0 + body_str = self.rfile.read(content_len).decode() if content_len else "" + headers = {k: v for k, v in self.headers.items()} + status, resp = dispatch_federation_request( + method, self._path_no_query(), body_str, headers + ) + self._send_raw( + status, + {"Content-Type": ["application/json"]}, + json.dumps(resp), + ) + + server = HTTPServer(("0.0.0.0", listen_port), SyncBotHTTPHandler) + if _bolt_app.logger.level > logging.INFO: + print(get_boot_message(development_server=True)) + else: + _bolt_app.logger.info( + "http_server_started", + extra={"port": listen_port, "bolt_path": bolt_path}, + ) + try: + server.serve_forever(0.05) + finally: + server.server_close() if __name__ == "__main__": - app.start(3000) + run_syncbot_http_server(http_server_logger_enabled=LOCAL_DEVELOPMENT) diff --git a/syncbot/builders/__init__.py b/syncbot/builders/__init__.py new file mode 100644 index 0000000..59acf13 --- /dev/null +++ b/syncbot/builders/__init__.py @@ -0,0 +1,46 @@ +"""Builders package – Slack modal and home-tab UI constructors. + +Re-exports every public symbol so that ``import builders`` / +``from builders import X`` continues to work after the split. +""" + +from builders._common import ( + _format_channel_ref, + _get_group_members, + _get_groups_for_workspace, + _get_workspace_info, +) +from builders.channel_sync import ( + _build_inline_channel_sync, +) +from builders.home import ( + _home_tab_content_hash, + build_home_tab, + refresh_home_tab_for_workspace, +) +from builders.sync import build_join_sync_form, build_new_sync_form +from builders.user_mapping import ( + _USER_MAPPING_REFRESH_BUTTON_INDEX, + _user_mapping_content_hash, + build_user_mapping_edit_modal, + build_user_mapping_screen, + build_user_matching_entry, +) + +__all__ = [ + "_build_inline_channel_sync", + "_format_channel_ref", + "_get_group_members", + "_get_groups_for_workspace", + "_get_workspace_info", + "_home_tab_content_hash", + "build_home_tab", + "build_join_sync_form", + "build_new_sync_form", + "_USER_MAPPING_REFRESH_BUTTON_INDEX", + "_user_mapping_content_hash", + "build_user_mapping_edit_modal", + "build_user_mapping_screen", + "build_user_matching_entry", + "refresh_home_tab_for_workspace", +] diff --git a/syncbot/builders/_common.py b/syncbot/builders/_common.py new file mode 100644 index 0000000..5097125 --- /dev/null +++ b/syncbot/builders/_common.py @@ -0,0 +1,164 @@ +"""Shared helpers for builder modules.""" + +import logging + +from slack_sdk.web import WebClient + +import helpers +from db import DbManager +from db.schemas import Workspace, WorkspaceGroup, WorkspaceGroupMember +from helpers import get_user_id_from_body, is_user_authorized, safe_get + +_logger = logging.getLogger(__name__) + + +def _get_user_id(body: dict) -> str | None: + """Extract the acting user ID from any Slack request body.""" + return safe_get(body, "event", "user") or safe_get(body, "user", "id") or safe_get(body, "user_id") + + +def _get_team_id(body: dict) -> str | None: + """Extract the team ID from any Slack request body.""" + return ( + safe_get(body, "view", "team_id") + or safe_get(body, "team_id") + or safe_get(body, "team", "id") + or safe_get(body, "event", "view", "team_id") + ) + + +def _deny_unauthorized(body: dict, client: WebClient, logger) -> bool: + """Check authorization and send an ephemeral denial if the user is not an admin. + + Returns *True* if the user was denied (caller should return early). + """ + user_id = get_user_id_from_body(body) + if not user_id: + logger.warning("authorization_denied: could not determine user_id from request body") + return True + + if is_user_authorized(client, user_id): + return False + + channel_id = safe_get(body, "channel_id") or safe_get(body, "channel", "id") + _logger.warning( + "authorization_denied", + extra={"user_id": user_id, "action": "config"}, + ) + + if channel_id: + try: + client.chat_postEphemeral( + channel=channel_id, + user=user_id, + text=":lock: Only workspace admins and owners can configure SyncBot.", + ) + except Exception: + _logger.debug("Could not send ephemeral denial — user may have invoked from a modal") + + return True + + +def _get_groups_for_workspace(workspace_id: int) -> list[tuple[WorkspaceGroup, WorkspaceGroupMember]]: + """Return all active groups the workspace belongs to, with membership info.""" + members = DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.workspace_id == workspace_id, + WorkspaceGroupMember.status == "active", + WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + results: list[tuple[WorkspaceGroup, WorkspaceGroupMember]] = [] + for member in members: + groups = DbManager.find_records( + WorkspaceGroup, + [WorkspaceGroup.id == member.group_id, WorkspaceGroup.status == "active"], + ) + if groups: + results.append((groups[0], member)) + return results + + +def _get_group_members(group_id: int) -> list[WorkspaceGroupMember]: + """Return all active members of a group.""" + return DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.group_id == group_id, + WorkspaceGroupMember.status == "active", + WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + + +def _get_workspace_info(workspace: Workspace) -> dict: + """Fetch workspace icon URL and domain from the Slack API (cached 24h).""" + result: dict[str, str | None] = {"icon_url": None, "domain": None, "raw_domain": None} + if not workspace or not workspace.bot_token: + return result + + cache_key = f"ws_info:{workspace.id}" + cached = helpers._cache_get(cache_key) + if cached: + return cached + + try: + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + info = ws_client.team_info() + result["icon_url"] = helpers.safe_get(info, "team", "icon", "image_88") or helpers.safe_get( + info, "team", "icon", "image_68" + ) + domain = helpers.safe_get(info, "team", "domain") + if domain: + result["domain"] = f"" + result["raw_domain"] = domain + helpers._cache_set(cache_key, result, ttl=86400) + except Exception as exc: + _logger.debug(f"_get_workspace_meta: team_info call failed: {exc}") + return result + + +def _format_channel_ref( + channel_id: str, + workspace: Workspace, + is_local: bool = True, + *, + include_workspace_in_link: bool = True, +) -> str: + """Format a channel reference for display in Block Kit mrkdwn.""" + if is_local: + return f"<#{channel_id}>" + + ws_name = workspace.workspace_name if workspace and workspace.workspace_name else "Partner" + + if not workspace or not workspace.bot_token: + return f"#{channel_id} ({ws_name})" if include_workspace_in_link else f"#{channel_id}" + + cache_key = f"chan_ref:{channel_id}:{include_workspace_in_link}" + cached = helpers._cache_get(cache_key) + if cached: + return cached + + ch_name = channel_id + try: + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + info = ws_client.conversations_info(channel=channel_id) + ch_name = helpers.safe_get(info, "channel", "name") or channel_id + except Exception as e: + _logger.warning( + "format_channel_ref_failed", + extra={"channel_id": channel_id, "workspace": ws_name, "error": str(e)}, + ) + + ws_info = _get_workspace_info(workspace) + domain = ws_info.get("raw_domain") + link_text = f"#{ch_name} ({ws_name})" if include_workspace_in_link else f"#{ch_name}" + if domain: + deep_link = f"https://{domain}.slack.com/archives/{channel_id}" + result = f"<{deep_link}|{link_text}>" + else: + result = f"`[{link_text}]`" + if ch_name != channel_id: + helpers._cache_set(cache_key, result, ttl=3600) + return result diff --git a/syncbot/builders/channel_sync.py b/syncbot/builders/channel_sync.py new file mode 100644 index 0000000..2a6db4b --- /dev/null +++ b/syncbot/builders/channel_sync.py @@ -0,0 +1,186 @@ +"""Channel sync form builders.""" + +import logging + +import helpers +from builders._common import ( + _format_channel_ref, +) +from db import DbManager +from db.schemas import PostMeta, Sync, SyncChannel, Workspace, WorkspaceGroup, WorkspaceGroupMember +from slack import actions, orm +from slack.blocks import ( + context as block_context, +) +from slack.blocks import ( + section, +) + +_logger = logging.getLogger(__name__) + + +def _build_inline_channel_sync( + blocks: list, + group: WorkspaceGroup, + workspace_record: Workspace, + other_members: list[WorkspaceGroupMember], + context: dict | None = None, +) -> None: + """Append channel-sync blocks inline under a group on the Home tab. + + Shows: + - Active synced channels with Pause/Stop buttons + - Paused synced channels with Resume/Stop buttons + - Channels waiting for a subscriber with Stop Syncing button + - Available channels from other members with Start Syncing button + """ + syncs_for_group = DbManager.find_records( + Sync, + [Sync.group_id == group.id], + ) + + published_syncs: list[tuple[Sync, SyncChannel, list[SyncChannel], bool]] = [] + waiting_syncs: list[tuple[Sync, SyncChannel]] = [] + available_syncs: list[tuple[Sync, list[SyncChannel]]] = [] + + for sync in syncs_for_group: + channels = DbManager.find_records( + SyncChannel, + [SyncChannel.sync_id == sync.id, SyncChannel.deleted_at.is_(None)], + ) + my_channel = next((c for c in channels if c.workspace_id == workspace_record.id), None) + other_channels = [c for c in channels if c.workspace_id != workspace_record.id] + + if my_channel and other_channels: + is_paused = my_channel.status == "paused" + published_syncs.append((sync, my_channel, other_channels, is_paused)) + elif my_channel and not other_channels: + waiting_syncs.append((sync, my_channel)) + elif not my_channel and other_channels: + if sync.sync_mode == "direct" and sync.target_workspace_id != workspace_record.id: + continue + available_syncs.append((sync, other_channels)) + + published_syncs.sort(key=lambda t: (t[0].title or "").lower()) + waiting_syncs.sort(key=lambda t: (t[0].title or "").lower()) + available_syncs.sort(key=lambda t: (t[0].title or "").lower()) + + if not published_syncs and not waiting_syncs and not available_syncs: + return + + blocks.append(section("*Synced Channels*")) + + for sync, my_ch, other_chs, is_paused in published_syncs: + my_ref = _format_channel_ref(my_ch.channel_id, workspace_record, is_local=True) + + # Workspace names for bracket: local first, then others; append (Paused) per workspace that paused + local_name = helpers.resolve_workspace_name(workspace_record) or f"Workspace {workspace_record.id}" + if my_ch.status == "paused": + local_name = f"{local_name} (Paused)" + other_names: list[str] = [] + for other_channel in other_chs: + other_ws = helpers.get_workspace_by_id(other_channel.workspace_id, context=context) + name = helpers.resolve_workspace_name(other_ws) if other_ws else f"Workspace {other_channel.workspace_id}" + if other_channel.status == "paused": + name = f"{name} (Paused)" + other_names.append(name) + all_ws_names = [local_name] + other_names + + if is_paused: + icon = ":double_vertical_bar:" + toggle_btn = orm.ButtonElement( + label="Resume Syncing", + action=f"{actions.CONFIG_RESUME_SYNC}_{sync.id}", + value=str(sync.id), + ) + else: + icon = ":arrows_counterclockwise:" + toggle_btn = orm.ButtonElement( + label="Pause Syncing", + action=f"{actions.CONFIG_PAUSE_SYNC}_{sync.id}", + value=str(sync.id), + ) + + blocks.append(section(f"{icon} {my_ref}")) + + context_parts: list[str] = [] + if is_paused: + status_tag = "Paused" + else: + status_tag = "Active" + + context_parts.append(f"Status: `{status_tag}`") + + if sync.sync_mode == "direct": + mode_tag = "1-to-1" + else: + mode_tag = "Available to Any" + + context_parts.append(f"Type: `{mode_tag}`") + + if all_ws_names: + context_parts.append(f"Members: `{', '.join(all_ws_names)}`") + + if getattr(my_ch, "created_at", None): + context_parts.append(f"Synced Since: `{my_ch.created_at:%B %d, %Y}`") + + msg_count = DbManager.count_records( + PostMeta, + [PostMeta.sync_channel_id == my_ch.id], + ) + context_parts.append(f"Messages Tracked: `{msg_count}`") + + if context_parts: + blocks.append(block_context("\n".join(context_parts))) + blocks.append( + orm.ActionsBlock( + elements=[ + toggle_btn, + orm.ButtonElement( + label="Stop Syncing", + action=f"{actions.CONFIG_STOP_SYNC}_{sync.id}", + value=str(sync.id), + style="danger", + ), + ] + ) + ) + + for sync, my_ch in waiting_syncs: + blocks.append(section(f":outbox_tray: <#{my_ch.channel_id}> — _waiting for subscribers_")) + is_publisher = sync.publisher_workspace_id == workspace_record.id + if is_publisher: + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label="Stop Syncing", + action=f"{actions.CONFIG_UNPUBLISH_CHANNEL}_{my_ch.id}", + value=str(sync.id), + style="danger", + ), + ] + ) + ) + + for sync, other_chs in available_syncs: + publisher_ws = helpers.get_workspace_by_id(other_chs[0].workspace_id, context=context) if other_chs else None + publisher_name = helpers.resolve_workspace_name(publisher_ws) if publisher_ws else " another Workspace" + if sync.sync_mode == "direct": + mode_tag = "1-to-1" + else: + mode_tag = "Available to Any" + + blocks.append(section(":inbox_tray: New Sync Available")) + blocks.append(block_context(f"Type: `{mode_tag}`\nPublisher: `{publisher_name}`\nChannel Name: `{sync.title}`")) + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label="Start Syncing", + action=f"{actions.CONFIG_SUBSCRIBE_CHANNEL}_{sync.id}", + value=str(sync.id), + ), + ] + ) + ) diff --git a/syncbot/builders/home.py b/syncbot/builders/home.py new file mode 100644 index 0000000..db9cb80 --- /dev/null +++ b/syncbot/builders/home.py @@ -0,0 +1,563 @@ +"""Home tab builder.""" + +import hashlib +import logging +from logging import Logger + +from slack_sdk.web import WebClient + +import constants +import helpers +from builders._common import ( + _get_group_members, + _get_groups_for_workspace, + _get_team_id, + _get_user_id, + _get_workspace_info, +) +from builders.channel_sync import _build_inline_channel_sync +from db import DbManager +from db.schemas import ( + FederatedWorkspace, + Sync, + SyncChannel, + UserMapping, + Workspace, + WorkspaceGroup, + WorkspaceGroupMember, +) +from slack import actions, orm +from slack.blocks import context as block_context +from slack.blocks import divider, header, section + +_logger = logging.getLogger(__name__) + + +def _home_tab_content_hash(workspace_record: Workspace) -> str: + """Compute a stable hash of the data that drives the Home tab. + + Includes groups, members, syncs, sync channels (id/workspace/status), mapped counts, + pending invite ids, and reset-button visibility so the hash changes when anything + visible on Home changes (including PRIMARY_WORKSPACE / ENABLE_DB_RESET for Reset). + """ + workspace_id = workspace_record.id + workspace_name = (workspace_record.workspace_name or "") or "" + reset_visible = helpers.is_db_reset_visible_for_workspace(workspace_record.team_id) + my_groups = _get_groups_for_workspace(workspace_id) + group_ids = sorted(g.id for g, _ in my_groups) + pending_invites = DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.workspace_id == workspace_id, + WorkspaceGroupMember.status == "pending", + WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + pending_ids = tuple(sorted(inv.id for inv in pending_invites)) + group_payload: list[tuple] = [] + for group, _ in my_groups: + members = _get_group_members(group.id) + syncs = DbManager.find_records(Sync, [Sync.group_id == group.id]) + sync_ids = [s.id for s in syncs] + # Sync channels drive the "Synced Channels" section + sync_channel_tuples: list[tuple] = [] + for sync in syncs: + channels = DbManager.find_records( + SyncChannel, + [ + SyncChannel.sync_id == sync.id, + SyncChannel.deleted_at.is_(None), + ], + ) + channel_sig = tuple( + (sync_channel.workspace_id, sync_channel.channel_id, sync_channel.status or "active") + for sync_channel in sorted(channels, key=lambda c: (c.workspace_id, c.channel_id)) + ) + sync_channel_tuples.append((sync.id, channel_sig)) + sync_channel_tuples.sort(key=lambda x: x[0]) + # Per-member channel_count and mapped_count (shown in group section) + member_sigs: list[tuple] = [] + for member in members: + ws_id = member.workspace_id or 0 + ch_count = 0 + if ws_id and sync_ids: + ch_count = len( + DbManager.find_records( + SyncChannel, + [ + SyncChannel.sync_id.in_(sync_ids), + SyncChannel.workspace_id == ws_id, + SyncChannel.deleted_at.is_(None), + ], + ) + ) + mapped_count = 0 + if ws_id: + mapped_count = len( + DbManager.find_records( + UserMapping, + [ + UserMapping.group_id == group.id, + UserMapping.target_workspace_id == ws_id, + UserMapping.match_method != "none", + ], + ) + ) + member_sigs.append((ws_id, ch_count, mapped_count)) + member_sigs.sort(key=lambda x: x[0]) + group_payload.append((group.id, len(members), len(syncs), tuple(sync_channel_tuples), tuple(member_sigs))) + group_payload.sort(key=lambda x: x[0]) + payload = ( + workspace_id, + workspace_name, + tuple(group_ids), + tuple(group_payload), + pending_ids, + reset_visible, + ) + return hashlib.sha256(repr(payload).encode()).hexdigest() + + +def refresh_home_tab_for_workspace(workspace: Workspace, logger: Logger, context: dict | None = None) -> None: + """Publish an updated Home tab for every admin in *workspace*.""" + if not workspace or not workspace.bot_token or workspace.deleted_at: + return + ctx = context if context is not None else {} + try: + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + admin_ids = helpers.get_admin_ids(ws_client, team_id=workspace.team_id, context=ctx) + except Exception as e: + _logger.warning(f"refresh_home_tab_for_workspace: failed to get admins: {e}") + return + + synthetic_body = {"team": {"id": workspace.team_id}} + for uid in admin_ids: + try: + build_home_tab(synthetic_body, ws_client, logger, ctx, user_id=uid) + except Exception as e: + _logger.warning( + "refresh_home_tab_for_workspace: failed for user %s in workspace %s: %s", + uid, + getattr(workspace, "team_id", workspace.id if workspace else None), + e, + ) + + +def build_home_tab( + body: dict, + client: WebClient, + logger: Logger, + context: dict, + *, + user_id: str | None = None, + return_blocks: bool = False, +) -> list[dict] | None: + """Build and publish the App Home tab. If return_blocks is True, return block dicts and do not publish.""" + team_id = _get_team_id(body) + user_id = user_id or _get_user_id(body) + if not team_id or not user_id: + _logger.warning("build_home_tab: missing team_id or user_id") + return None + + workspace_record: Workspace = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return None + + is_admin = helpers.is_user_authorized(client, user_id) + + blocks: list[orm.BaseBlock] = [] + + if not is_admin: + blocks.append(block_context(":lock: Only Workspace Admins can configure SyncBot.")) + block_dicts = orm.BlockView(blocks=blocks).as_form_field() + if return_blocks: + return block_dicts + client.views_publish(user_id=user_id, view={"type": "home", "blocks": block_dicts}) + return None + + # Compute hash for admin view so we can update cache after publish (manual or automatic) + current_hash = _home_tab_content_hash(workspace_record) + + # ── Workspace Groups ────────────────────────────────────── + blocks.append(header("Workspace Groups")) + blocks.append(block_context("_Groups of Workspaces that can Sync Channels._")) + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label="Create Group", + action=actions.CONFIG_CREATE_GROUP, + ), + orm.ButtonElement( + label="Join Group", + action=actions.CONFIG_JOIN_GROUP, + ), + ] + ) + ) + + my_groups = _get_groups_for_workspace(workspace_record.id) + + pending_invites = DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.workspace_id == workspace_record.id, + WorkspaceGroupMember.status == "pending", + WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + + if not my_groups and not pending_invites: + blocks.append( + block_context( + "You are not in any Workspace Groups yet. Create or join a Group before you can Sync Channels with other Workspaces." + ) + ) + else: + for group, my_membership in my_groups: + _build_group_section(blocks, group, my_membership, workspace_record, context) + + for invite in pending_invites: + _build_pending_invite_section(blocks, invite, context) + + # ── External Connections (federation) ───────────────────── + if constants.FEDERATION_ENABLED: + _build_federation_section(blocks, workspace_record) + + # ── SyncBot Configuration ──────────────────── + blocks.append(block_context("\u200b")) + blocks.append(divider()) + blocks.append(header("SyncBot Configuration")) + config_buttons = [ + orm.ButtonElement( + label="Refresh", + action=actions.CONFIG_REFRESH_HOME, + ), + ] + if helpers.is_backup_visible_for_workspace(workspace_record.team_id): + config_buttons.append( + orm.ButtonElement( + label="Backup/Restore", + action=actions.CONFIG_BACKUP_RESTORE, + ), + ) + if helpers.is_db_reset_visible_for_workspace(workspace_record.team_id): + config_buttons.append( + orm.ButtonElement( + label=":bomb: Reset Database", + action=actions.CONFIG_DB_RESET, + style="danger", + ), + ) + blocks.append(orm.ActionsBlock(elements=config_buttons)) + + block_dicts = orm.BlockView(blocks=blocks).as_form_field() + if return_blocks: + return block_dicts + client.views_publish(user_id=user_id, view={"type": "home", "blocks": block_dicts}) + # Update cache so next manual Refresh skips full rebuild when data unchanged + helpers.refresh_after_full( + f"home_tab_hash:{team_id}", + f"home_tab_blocks:{team_id}:{user_id}", + f"refresh_at:home:{team_id}:{user_id}", + current_hash, + block_dicts, + ) + return None + + +def _build_pending_invite_section( + blocks: list, + invite: WorkspaceGroupMember, + context: dict | None = None, +) -> None: + """Append blocks for an incoming group invite the workspace hasn't responded to yet.""" + group = DbManager.get_record(WorkspaceGroup, id=invite.group_id) + if not group: + return + + inviting_members = DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.group_id == group.id, + WorkspaceGroupMember.status == "active", + WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + inviter_workspace_names = [] + for member in inviting_members: + if member.workspace_id: + ws = helpers.get_workspace_by_id(member.workspace_id, context=context) + inviter_workspace_names.append( + helpers.resolve_workspace_name(ws) if ws else f"Workspace {member.workspace_id}" + ) + workspace_label = ", ".join(inviter_workspace_names) if inviter_workspace_names else "Another Workspace" + + inviter_label = workspace_label + if getattr(invite, "invited_by_slack_user_id", None) and getattr(invite, "invited_by_workspace_id", None): + inviter_ws = helpers.get_workspace_by_id(invite.invited_by_workspace_id, context=context) + if inviter_ws and inviter_ws.bot_token: + try: + ws_client = WebClient(token=helpers.decrypt_bot_token(inviter_ws.bot_token)) + admin_name, _ = helpers.get_user_info(ws_client, invite.invited_by_slack_user_id) + if admin_name: + inviter_label = f"{admin_name} from {workspace_label}" + except Exception as exc: + # Keep the workspace-level fallback label if we cannot resolve the + # inviter's display name from Slack. + _logger.debug( + "pending_invite_inviter_name_lookup_failed", + extra={"invite_id": invite.id, "workspace_id": invite.invited_by_workspace_id, "error": str(exc)}, + ) + + blocks.append(divider()) + blocks.append(header(f"{group.name}")) + blocks.append(section(f":punch: *{inviter_label}* has invited your Workspace to join this Group.")) + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label="Accept", + action=f"{actions.CONFIG_ACCEPT_GROUP_REQUEST}_{invite.id}", + value=str(invite.id), + style="primary", + ), + orm.ButtonElement( + label="Decline", + action=f"{actions.CONFIG_DECLINE_GROUP_REQUEST}_{invite.id}", + value=str(invite.id), + style="danger", + ), + ] + ) + ) + + +def _build_group_section( + blocks: list, + group: WorkspaceGroup, + my_membership: WorkspaceGroupMember, + workspace_record: Workspace, + context: dict | None = None, +) -> None: + """Append blocks for a single workspace group.""" + blocks.append(divider()) + + all_members = _get_group_members(group.id) + other_members = [member for member in all_members if member.workspace_id != workspace_record.id] + + blocks.append(header(f"{group.name}")) + + # Action buttons for this group + group_actions: list[orm.ButtonElement] = [ + orm.ButtonElement( + label="Invite Workspace", + action=actions.CONFIG_INVITE_WORKSPACE, + value=str(group.id), + ), + orm.ButtonElement( + label="Sync Channel", + action=actions.CONFIG_PUBLISH_CHANNEL, + value=str(group.id), + ), + orm.ButtonElement( + label="User Mapping", + action=actions.CONFIG_MANAGE_USER_MATCHING, + value=str(group.id), + ), + ] + group_actions.append( + orm.ButtonElement( + label="Leave Group", + action=f"{actions.CONFIG_LEAVE_GROUP}_{group.id}", + style="danger", + value=str(group.id), + ), + ) + blocks.append(orm.ActionsBlock(elements=group_actions)) + + syncs_for_group = DbManager.find_records(Sync, [Sync.group_id == group.id]) + sync_ids = [s.id for s in syncs_for_group] + + for member in all_members: + if member.workspace_id: + member_ws = helpers.get_workspace_by_id(member.workspace_id, context=context) + name = helpers.resolve_workspace_name(member_ws) if member_ws else f"Workspace {member.workspace_id}" + if member.role == "creator": + name += " _(Group Creator)_" + elif member.federated_workspace_id: + fed_ws = DbManager.get_record(FederatedWorkspace, id=member.federated_workspace_id) + name = f":globe_with_meridians: {fed_ws.name}" if fed_ws and fed_ws.name else "External" + else: + name = "Unknown" + + joined_str = f"{member.joined_at:%B %d, %Y}" if member.joined_at else "Unknown" + + ws_id = member.workspace_id + channel_count = 0 + if ws_id and sync_ids: + channels = DbManager.find_records( + SyncChannel, + [ + SyncChannel.sync_id.in_(sync_ids), + SyncChannel.workspace_id == ws_id, + SyncChannel.deleted_at.is_(None), + ], + ) + channel_count = len(channels) + + mapped_count = 0 + if ws_id: + mapped = DbManager.find_records( + UserMapping, + [ + UserMapping.group_id == group.id, + UserMapping.target_workspace_id == ws_id, + UserMapping.match_method != "none", + ], + ) + mapped_count = len(mapped) + + stats = f"Member Since: `{joined_str}`\nSynced Channels: `{channel_count}`\nMapped Users: `{mapped_count}` " + text = f"*{name}*\n{stats}" + if member.workspace_id and member_ws: + ws_info = _get_workspace_info(member_ws) + icon_url = ws_info.get("icon_url") + if icon_url: + blocks.append( + orm.SectionBlock( + label=text, + element=orm.ImageAccessoryElement( + image_url=icon_url, + alt_text=name.split(" ")[0] if name else "Workspace", + ), + ) + ) + else: + blocks.append(block_context(text)) + else: + blocks.append(block_context(text)) + + pending_members = DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.group_id == group.id, + WorkspaceGroupMember.status == "pending", + WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + for pending_member in pending_members: + pending_ws = None + if pending_member.workspace_id: + pending_ws = helpers.get_workspace_by_id(pending_member.workspace_id, context=context) + pname = ( + helpers.resolve_workspace_name(pending_ws) if pending_ws else f"Workspace {pending_member.workspace_id}" + ) + else: + pname = "Unknown" + stats_pending = "Member Since: `Pending Invite`" + text_pending = f"*{pname}*\n{stats_pending}" + if pending_member.workspace_id and pending_ws: + ws_info = _get_workspace_info(pending_ws) + icon_url = ws_info.get("icon_url") + if icon_url: + blocks.append( + orm.SectionBlock( + label=text_pending, + element=orm.ImageAccessoryElement( + image_url=icon_url, + alt_text=pname.split(" ")[0] if pname else "Workspace", + ), + ) + ) + else: + blocks.append(block_context(text_pending)) + else: + blocks.append(block_context(text_pending)) + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label="Cancel Invite", + action=f"{actions.CONFIG_CANCEL_GROUP_REQUEST}_{pending_member.id}", + value=str(pending_member.id), + style="danger", + ), + ] + ) + ) + + _build_inline_channel_sync(blocks, group, workspace_record, other_members, context) + + +def _build_federation_section( + blocks: list, + workspace_record: Workspace, +) -> None: + """Append the federation section to the home tab.""" + blocks.append(divider()) + blocks.append(block_context("\u200b")) + blocks.append(section("*External Connections*")) + blocks.append(block_context("Connect with Workspaces on other SyncBot deployments.")) + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label=":globe_with_meridians: Generate Connection Code", + action=actions.CONFIG_GENERATE_FEDERATION_CODE, + ), + orm.ButtonElement( + label=":link: Enter Connection Code", + action=actions.CONFIG_ENTER_FEDERATION_CODE, + ), + orm.ButtonElement( + label=":package: Data Migration", + action=actions.CONFIG_DATA_MIGRATION, + ), + ] + ) + ) + + fed_members = DbManager.find_records( + WorkspaceGroupMember, + [ + WorkspaceGroupMember.federated_workspace_id.isnot(None), + WorkspaceGroupMember.deleted_at.is_(None), + WorkspaceGroupMember.status == "active", + ], + ) + + shown_fed: set[int] = set() + for fed_member in fed_members: + if not fed_member.federated_workspace_id or fed_member.federated_workspace_id in shown_fed: + continue + my_groups = _get_groups_for_workspace(workspace_record.id) + my_group_ids = {g.id for g, _ in my_groups} + if fed_member.group_id not in my_group_ids: + continue + + shown_fed.add(fed_member.federated_workspace_id) + fed_ws = DbManager.get_record(FederatedWorkspace, id=fed_member.federated_workspace_id) + if not fed_ws: + continue + + fed_ws_name = fed_ws.name or f"Connection {fed_ws.instance_id[:8]}" + status_icon = ":white_check_mark:" if fed_ws.status == "active" else ":warning:" + + blocks.append(block_context("\u200b")) + label_text = f"{status_icon} *{fed_ws_name}*" + label_text += f"\n:globe_with_meridians: {fed_ws.webhook_url}" + blocks.append(section(label_text)) + + blocks.append( + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label="Remove Connection", + action=f"{actions.CONFIG_REMOVE_FEDERATION_CONNECTION}_{fed_member.id}", + style="danger", + value=str(fed_member.id), + ), + ] + ) + ) diff --git a/syncbot/builders/sync.py b/syncbot/builders/sync.py new file mode 100644 index 0000000..8cc7616 --- /dev/null +++ b/syncbot/builders/sync.py @@ -0,0 +1,95 @@ +"""Join/New sync form builders.""" + +import copy +import logging + +from slack_sdk.web import WebClient + +import helpers +from builders._common import _deny_unauthorized, _get_group_members, _get_groups_for_workspace +from db import DbManager +from db.schemas import Sync, SyncChannel, Workspace +from helpers import safe_get +from slack import actions, forms, orm + +_logger = logging.getLogger(__name__) + + +def build_join_sync_form( + body: dict, + client: WebClient, + logger, + context: dict, +) -> None: + """Pushes a new modal layer to join an existing sync.""" + if _deny_unauthorized(body, client, logger): + return + + trigger_id: str = safe_get(body, "trigger_id") + team_id = safe_get(body, "view", "team_id") + join_sync_form: orm.BlockView = copy.deepcopy(forms.JOIN_SYNC_FORM) + + workspace_record: Workspace = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + my_groups = _get_groups_for_workspace(workspace_record.id) + group_ws_ids: set[int] = {workspace_record.id} + for group, _ in my_groups: + for m in _get_group_members(group.id): + if m.workspace_id: + group_ws_ids.add(m.workspace_id) + + channel_sync_workspace_records: list[tuple[SyncChannel, Workspace]] = DbManager.find_join_records2( + left_cls=SyncChannel, + right_cls=Workspace, + filters=[Workspace.team_id == team_id, SyncChannel.deleted_at.is_(None)], + ) + already_joined_sync_ids = {record[0].sync_id for record in channel_sync_workspace_records} + + all_syncs: list[Sync] = DbManager.find_records(Sync, [True]) + eligible_syncs: list[Sync] = [] + + for sync in all_syncs: + if sync.id in already_joined_sync_ids: + continue + sync_channels = DbManager.find_records( + SyncChannel, + [SyncChannel.sync_id == sync.id, SyncChannel.deleted_at.is_(None)], + ) + if any(sc.workspace_id in group_ws_ids for sc in sync_channels): + eligible_syncs.append(sync) + + options = orm.as_selector_options( + [sync.title for sync in eligible_syncs], + [str(sync.id) for sync in eligible_syncs], + ) + join_sync_form.set_options({actions.CONFIG_JOIN_SYNC_SELECT: options}) + join_sync_form.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_JOIN_SYNC_SUBMIT, + title_text="Join Sync", + new_or_add="new", + ) + + +def build_new_sync_form( + body: dict, + client: WebClient, + logger, + context: dict, +) -> None: + """Pushes a new modal layer to create a new sync.""" + if _deny_unauthorized(body, client, logger): + return + + trigger_id: str = safe_get(body, "trigger_id") + new_sync_form: orm.BlockView = copy.deepcopy(forms.NEW_SYNC_FORM) + new_sync_form.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_NEW_SYNC_SUBMIT, + title_text="New Sync", + new_or_add="new", + ) diff --git a/syncbot/builders/user_mapping.py b/syncbot/builders/user_mapping.py new file mode 100644 index 0000000..f45ecfc --- /dev/null +++ b/syncbot/builders/user_mapping.py @@ -0,0 +1,365 @@ +"""User mapping screen builders.""" + +import contextlib # noqa: I001 +import hashlib +import logging + +from slack_sdk.web import WebClient + +import helpers +from builders._common import ( + _deny_unauthorized, + _get_group_members, + _get_groups_for_workspace, + _get_team_id, + _get_user_id, +) +from db import DbManager +from db.schemas import UserDirectory, UserMapping, Workspace, WorkspaceGroup +from slack import actions, orm +from slack.blocks import actions as blocks_actions, button, context as block_context, divider, header, section + +_logger = logging.getLogger(__name__) + +# Index of the Actions block that contains the Refresh button (after header at 0) +_USER_MAPPING_REFRESH_BUTTON_INDEX = 1 + + +def _user_mapping_content_hash(workspace_record: Workspace, group_id: int | None) -> str: + """Compute a stable hash of the data that drives the user mapping screen (minimal DB).""" + workspace_id = workspace_record.id + gid = group_id or 0 + if gid: + members = _get_group_members(gid) + linked_workspace_ids = {m.workspace_id for m in members if m.workspace_id and m.workspace_id != workspace_id} + else: + my_groups = _get_groups_for_workspace(workspace_id) + linked_workspace_ids = set() + for g, _ in my_groups: + for m in _get_group_members(g.id): + if m.workspace_id and m.workspace_id != workspace_id: + linked_workspace_ids.add(m.workspace_id) + + all_mappings: list[UserMapping] = [] + for source_ws_id in linked_workspace_ids: + mappings = DbManager.find_records( + UserMapping, + [ + UserMapping.source_workspace_id == source_ws_id, + UserMapping.target_workspace_id == workspace_id, + ], + ) + all_mappings.extend(mappings) + + payload = ( + workspace_id, + gid, + tuple((m.id, m.match_method, m.target_user_id) for m in sorted(all_mappings, key=lambda x: x.id)), + ) + return hashlib.sha256(repr(payload).encode()).hexdigest() + + +def build_user_matching_entry( + body: dict, + client: WebClient, + logger, + context: dict, +) -> None: + """Entry point when user clicks "User Mapping" on the Home tab.""" + if _deny_unauthorized(body, client, logger): + return + + raw_value = helpers.safe_get(body, "actions", 0, "value") + group_id = None + if raw_value: + with contextlib.suppress(TypeError, ValueError): + group_id = int(raw_value) + + user_id = _get_user_id(body) + team_id = _get_team_id(body) + if not user_id or not team_id: + return + + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + build_user_mapping_screen(client, workspace_record, user_id, group_id=group_id) + + +def build_user_mapping_screen( + client: WebClient, + workspace_record: Workspace, + user_id: str, + *, + group_id: int | None = None, + context: dict | None = None, + return_blocks: bool = False, +) -> list | None: + """Publish the user mapping screen on the Home tab. If return_blocks is True, return block dicts and do not publish.""" + group_name = "Group" + if group_id: + groups = DbManager.find_records(WorkspaceGroup, [WorkspaceGroup.id == group_id]) + if groups: + group_name = groups[0].name + + if group_id: + members = _get_group_members(group_id) + linked_workspace_ids = { + m.workspace_id for m in members if m.workspace_id and m.workspace_id != workspace_record.id + } + else: + my_groups = _get_groups_for_workspace(workspace_record.id) + linked_workspace_ids: set[int] = set() + for g, _ in my_groups: + for m in _get_group_members(g.id): + if m.workspace_id and m.workspace_id != workspace_record.id: + linked_workspace_ids.add(m.workspace_id) + + all_mappings: list[UserMapping] = [] + for source_ws_id in linked_workspace_ids: + mappings = DbManager.find_records( + UserMapping, + [ + UserMapping.source_workspace_id == source_ws_id, + UserMapping.target_workspace_id == workspace_record.id, + ], + ) + all_mappings.extend(mappings) + + unmapped = [m for m in all_mappings if m.target_user_id is None or m.match_method == "none"] + soft_matched = [m for m in all_mappings if m.match_method in ("name", "manual") and m.target_user_id is not None] + email_matched = [m for m in all_mappings if m.match_method == "email" and m.target_user_id is not None] + + _ws_name_lookup: dict[int, str] = {} + for source_ws_id in linked_workspace_ids: + ws = helpers.get_workspace_by_id(source_ws_id, context=context) + if ws: + _ws_name_lookup[source_ws_id] = helpers.resolve_workspace_name(ws) or "" + + def _display_for_mapping(m: UserMapping, ws_lookup: dict[int, str]) -> str: + """Formatted display string: normalized name + workspace in parens if present.""" + display = helpers.normalize_display_name(m.source_display_name or m.source_user_id) + ws_label = ws_lookup.get(m.source_workspace_id, "") + return f"{display} ({ws_label})" if ws_label else display + + unmapped.sort(key=lambda m: _display_for_mapping(m, _ws_name_lookup).lower()) + soft_matched.sort(key=lambda m: _display_for_mapping(m, _ws_name_lookup).lower()) + email_matched.sort(key=lambda m: _display_for_mapping(m, _ws_name_lookup).lower()) + + _email_lookup: dict[tuple[int, str], str] = {} + _avatar_lookup: dict[tuple[int, str], str] = {} + for source_ws_id in linked_workspace_ids: + ws = helpers.get_workspace_by_id(source_ws_id, context=context) + member_client = None + if ws and ws.bot_token: + with contextlib.suppress(Exception): + member_client = WebClient(token=helpers.decrypt_bot_token(ws.bot_token)) + dir_entries = DbManager.find_records( + UserDirectory, + [UserDirectory.workspace_id == source_ws_id, UserDirectory.deleted_at.is_(None)], + ) + for entry in dir_entries: + if entry.email: + _email_lookup[(source_ws_id, entry.slack_user_id)] = entry.email + if member_client: + with contextlib.suppress(Exception): + _, avatar_url = helpers.get_user_info(member_client, entry.slack_user_id) + if avatar_url: + _avatar_lookup[(source_ws_id, entry.slack_user_id)] = avatar_url + + def _user_context_block(mapping: UserMapping, label_text: str) -> orm.ContextBlock: + avatar_url = _avatar_lookup.get((mapping.source_workspace_id, mapping.source_user_id)) + elements: list = [] + if avatar_url: + elements.append( + orm.ImageContextElement( + image_url=avatar_url, + alt_text=mapping.source_display_name or "user", + ) + ) + elements.append(orm.ContextElement(initial_value=label_text)) + return orm.ContextBlock(elements=elements) + + group_val = str(group_id) if group_id else "0" + blocks: list[orm.BaseBlock] = [ + header(f"User Mapping for: {group_name}"), + block_context( + "_Users with the same email address between Workspaces will be mapped automatically. Other users can be mapped manually._" + ), + blocks_actions( + button(":arrow_left: Back", actions.CONFIG_USER_MAPPING_BACK, value=group_val), + button("Refresh", actions.CONFIG_USER_MAPPING_REFRESH, value=group_val), + ), + block_context(f"*Mapped: {len(soft_matched) + len(email_matched)}* \u00b7 *Unmapped: {len(unmapped)}*"), + divider(), + ] + + if unmapped: + blocks.append(section(":warning: *Unmapped Users*")) + blocks.append(block_context("\u200b")) + for m in unmapped: + blocks.append(_user_context_block(m, f"*{_display_for_mapping(m, _ws_name_lookup)}*")) + blocks.append(blocks_actions(button("Edit", f"{actions.CONFIG_USER_MAPPING_EDIT}_{m.id}", value=group_val))) + blocks.append(divider()) + + if soft_matched: + blocks.append(section("*Soft / Manual Matches*")) + blocks.append(block_context("\u200b")) + for m in soft_matched: + method_tag = "manual" if m.match_method == "manual" else "name" + blocks.append( + _user_context_block( + m, f"*{_display_for_mapping(m, _ws_name_lookup)}* \u2192 <@{m.target_user_id}> _[{method_tag}]_" + ) + ) + blocks.append(blocks_actions(button("Edit", f"{actions.CONFIG_USER_MAPPING_EDIT}_{m.id}", value=group_val))) + blocks.append(divider()) + + if email_matched: + blocks.append(section("*Email Matches*")) + blocks.append(block_context("\u200b")) + for m in email_matched: + email_addr = _email_lookup.get((m.source_workspace_id, m.source_user_id), "") + email_tag = f"_{email_addr}_" if email_addr else "_[email]_" + blocks.append( + _user_context_block( + m, f"*{_display_for_mapping(m, _ws_name_lookup)}* \u2192 <@{m.target_user_id}> {email_tag}" + ) + ) + blocks.append(divider()) + + if not unmapped and not soft_matched and not email_matched: + blocks.append(block_context("_No users have been mapped in this Workspace Group yet._")) + + block_dicts = orm.BlockView(blocks=blocks).as_form_field() + if return_blocks: + return block_dicts + client.views_publish(user_id=user_id, view={"type": "home", "blocks": block_dicts}) + return None + + +def build_user_mapping_edit_modal( + body: dict, + client: WebClient, + logger, + context: dict, +) -> None: + """Open a modal to edit a single user mapping.""" + if _deny_unauthorized(body, client, logger): + return + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + action_id = helpers.safe_get(body, "actions", 0, "action_id") or "" + mapping_id_str = action_id.replace(actions.CONFIG_USER_MAPPING_EDIT + "_", "") + try: + mapping_id = int(mapping_id_str) + except (TypeError, ValueError): + _logger.warning(f"build_user_mapping_edit_modal: invalid mapping_id: {mapping_id_str}") + return + + raw_group = helpers.safe_get(body, "actions", 0, "value") or "0" + try: + group_id = int(raw_group) + except (TypeError, ValueError): + group_id = 0 + + mapping = DbManager.get_record(UserMapping, id=mapping_id) + if not mapping: + _logger.warning(f"build_user_mapping_edit_modal: mapping {mapping_id} not found") + return + + team_id = _get_team_id(body) + workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None + if not workspace_record: + return + + source_ws = helpers.get_workspace_by_id(mapping.source_workspace_id) + source_ws_name = helpers.resolve_workspace_name(source_ws) if source_ws else "Partner" + display = helpers.normalize_display_name(mapping.source_display_name or mapping.source_user_id) + + existing_mappings = DbManager.find_records( + UserMapping, + [ + UserMapping.source_workspace_id == mapping.source_workspace_id, + UserMapping.target_workspace_id == mapping.target_workspace_id, + UserMapping.target_user_id.isnot(None), + UserMapping.match_method != "none", + UserMapping.id != mapping.id, + ], + ) + taken_target_ids = {m.target_user_id for m in existing_mappings} + + directory = DbManager.find_records( + UserDirectory, + [UserDirectory.workspace_id == workspace_record.id, UserDirectory.deleted_at.is_(None)], + ) + directory.sort(key=lambda u: (u.display_name or u.real_name or u.slack_user_id).lower()) + + has_mapping = mapping.target_user_id is not None and mapping.match_method != "none" + options: list[orm.SelectorOption] = [] + if has_mapping: + options.append(orm.SelectorOption(name="\u274c Remove Mapping", value="__remove__")) + for entry in directory: + if entry.slack_user_id in taken_target_ids: + continue + label = entry.display_name or entry.real_name or entry.slack_user_id + if entry.email: + label = f"{label} ({entry.email})" + if len(label) > 75: + label = label[:72] + "..." + options.append(orm.SelectorOption(name=label, value=entry.slack_user_id)) + + initial_value = None + if mapping.target_user_id and mapping.match_method != "none": + initial_value = mapping.target_user_id + + avatar_accessory = None + if source_ws and source_ws.bot_token: + with contextlib.suppress(Exception): + member_client = WebClient(token=helpers.decrypt_bot_token(source_ws.bot_token)) + _, avatar_url = helpers.get_user_info(member_client, mapping.source_user_id) + if avatar_url: + avatar_accessory = orm.ImageAccessoryElement(image_url=avatar_url, alt_text=display) + + blocks: list[orm.BaseBlock] = [ + orm.SectionBlock(label=f"*{display}*\n_{source_ws_name}_", element=avatar_accessory), + ] + if mapping.target_user_id and mapping.match_method != "none": + blocks.append(block_context(f"Currently mapped to <@{mapping.target_user_id}> _[{mapping.match_method}]_")) + blocks.append(divider()) + if options: + blocks.append( + orm.InputBlock( + label="Map to user", + action=actions.CONFIG_USER_MAPPING_EDIT_SELECT, + element=orm.StaticSelectElement( + placeholder="Select a user...", + options=options, + initial_value=initial_value, + ), + optional=True, + ) + ) + else: + blocks.append( + block_context( + "_No available users to map to. All users in your workspace are already mapped to other users._" + ) + ) + + meta = {"mapping_id": mapping_id, "group_id": group_id or 0} + modal_form = orm.BlockView(blocks=blocks) + modal_form.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_USER_MAPPING_EDIT_SUBMIT, + title_text="Edit Mapping", + submit_button_text="Save", + close_button_text="Cancel", + parent_metadata=meta, + new_or_add="new", + ) diff --git a/syncbot/constants.py b/syncbot/constants.py new file mode 100644 index 0000000..5c670a0 --- /dev/null +++ b/syncbot/constants.py @@ -0,0 +1,227 @@ +"""Application constants and startup configuration validation. + +This module defines: +1) environment-variable *name* constants, and +2) derived runtime flags computed from ``os.environ``. + +It also provides :func:`validate_config` to fail fast on missing +configuration at startup. +""" + +import logging +import os + +_logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Environment-variable name constants +# +# Each value is the *name* of the env var, not its value. The actual values +# are read from os.environ at runtime. +# --------------------------------------------------------------------------- + +SLACK_BOT_TOKEN = "SLACK_BOT_TOKEN" +SLACK_CLIENT_ID = "SLACK_CLIENT_ID" +SLACK_CLIENT_SECRET = "SLACK_CLIENT_SECRET" +SLACK_BOT_SCOPES = "SLACK_BOT_SCOPES" +SLACK_USER_SCOPES = "SLACK_USER_SCOPES" +SLACK_SIGNING_SECRET = "SLACK_SIGNING_SECRET" +TOKEN_ENCRYPTION_KEY = "TOKEN_ENCRYPTION_KEY" +REQUIRE_ADMIN = "REQUIRE_ADMIN" + +# Database: backend-agnostic (postgresql, mysql, or sqlite) +DATABASE_BACKEND = "DATABASE_BACKEND" +DATABASE_URL = "DATABASE_URL" + +# Network SQL backends (used when DATABASE_URL is unset) +DATABASE_HOST = "DATABASE_HOST" +DATABASE_PORT = "DATABASE_PORT" +DATABASE_USER = "DATABASE_USER" +DATABASE_PASSWORD = "DATABASE_PASSWORD" +DATABASE_SCHEMA = "DATABASE_SCHEMA" +DATABASE_SSL_CA_PATH = "DATABASE_SSL_CA_PATH" +DATABASE_TLS_ENABLED = "DATABASE_TLS_ENABLED" + +# Slack Team ID of the primary workspace (backup/restore and DB reset when enabled). +PRIMARY_WORKSPACE = "PRIMARY_WORKSPACE" + +# When "true"/"1"/"yes" and PRIMARY_WORKSPACE matches, show Reset Database on Home. +ENABLE_DB_RESET = "ENABLE_DB_RESET" + +# --------------------------------------------------------------------------- +# Derived runtime flags / computed values +# --------------------------------------------------------------------------- + +LOCAL_DEVELOPMENT = os.environ.get("LOCAL_DEVELOPMENT", "false").lower() == "true" + +_BOT_TOKEN_PLACEHOLDER = "xoxb-0-0" + + +def _has_real_bot_token() -> bool: + """Return *True* if SLACK_BOT_TOKEN looks like a genuine Slack token.""" + token = os.environ.get(SLACK_BOT_TOKEN, "").strip() + return token.startswith("xoxb-") and token != _BOT_TOKEN_PLACEHOLDER + + +HAS_REAL_BOT_TOKEN: bool = _has_real_bot_token() + +WARNING_BLOCK = "WARNING_BLOCK" + +# --------------------------------------------------------------------------- +# User-matching TTLs (seconds) +# +# How long a cached match result is considered "fresh" before re-checking. +# Manual matches never expire and can only be removed via the admin UI. +# --------------------------------------------------------------------------- + +MATCH_TTL_EMAIL = 30 * 24 * 3600 # 30 days for email-confirmed matches +MATCH_TTL_NAME = 14 * 24 * 3600 # 14 days for name-based matches +MATCH_TTL_NONE = 90 * 24 * 3600 # 90 days for no-match (team_join handles re-checks) +USER_DIR_REFRESH_TTL = 24 * 3600 # 24 hours per workspace directory refresh +USER_MATCHING_PAGE_SIZE = 40 # max unmatched users shown in the modal + +# Refresh button cooldown (seconds) when content hash unchanged +REFRESH_COOLDOWN_SECONDS = 60 + +SOFT_DELETE_RETENTION_DAYS = int(os.environ.get("SOFT_DELETE_RETENTION_DAYS", "30")) + +# --------------------------------------------------------------------------- +# Federation +# --------------------------------------------------------------------------- + +SYNCBOT_INSTANCE_ID = "SYNCBOT_INSTANCE_ID" +SYNCBOT_PUBLIC_URL = "SYNCBOT_PUBLIC_URL" +FEDERATION_ENABLED = os.environ.get("SYNCBOT_FEDERATION_ENABLED", "false").lower() == "true" + + +# --------------------------------------------------------------------------- +# Startup configuration validation +# +# Validates that all required environment variables are set before the app +# handles any requests. Fails fast in production; warns in local dev. +# --------------------------------------------------------------------------- + +def get_database_backend() -> str: + """Return ``postgresql``, ``mysql``, or ``sqlite``. + + Defaults to ``mysql`` when unset. + """ + return os.environ.get(DATABASE_BACKEND, "mysql").lower().strip() or "mysql" + + +def _env_bool(name: str, default: bool) -> bool: + """Parse common boolean env values with a safe default.""" + value = os.environ.get(name) + if value is None: + return default + return value.strip().lower() in {"1", "true", "yes", "on"} + + +def database_tls_enabled() -> bool: + """Return True when MySQL/PostgreSQL TLS should be used. + + Defaults: + - local dev: disabled + - non-local: enabled + Can be overridden with DATABASE_TLS_ENABLED=true/false. + """ + default = not LOCAL_DEVELOPMENT + return _env_bool(DATABASE_TLS_ENABLED, default) + + +def database_ssl_ca_path() -> str: + """Return CA bundle path for DB TLS verification, or empty string for system defaults. + + If :envvar:`DATABASE_SSL_CA_PATH` is set, that path is returned as-is (caller may + verify it exists). Otherwise the first existing file among common OS locations + is used (Amazon Linux, Debian, Alpine). + """ + explicit = os.environ.get(DATABASE_SSL_CA_PATH, "").strip() + if explicit: + return explicit + for candidate in ( + "/etc/pki/tls/certs/ca-bundle.crt", # RHEL / Amazon Linux / Lambda + "/etc/ssl/certs/ca-certificates.crt", # Debian / Ubuntu / Cloud Run image + "/etc/ssl/cert.pem", # Alpine / macOS + ): + if os.path.isfile(candidate): + return candidate + return "" + + +def get_required_db_vars() -> list: + """Return list of required env var names for the current database backend.""" + backend = get_database_backend() + if backend == "sqlite": + return [DATABASE_URL] + # mysql / postgresql: require URL or host/user/password/schema + if os.environ.get(DATABASE_URL): + return [] # URL is enough + return [ + DATABASE_HOST, + DATABASE_USER, + DATABASE_PASSWORD, + DATABASE_SCHEMA, + ] + + +# Required in all environments (non-DB vars; DB vars are backend-dependent) +_REQUIRED_ALWAYS_NON_DB: list = [] + +# Required only in production (non-local deployments). +_REQUIRED_PRODUCTION = [ + SLACK_SIGNING_SECRET, + SLACK_CLIENT_ID, + SLACK_CLIENT_SECRET, + SLACK_BOT_SCOPES, + TOKEN_ENCRYPTION_KEY, +] + + +# Minimum length for TOKEN_ENCRYPTION_KEY in production (reject weak/placeholder values). +_TOKEN_ENCRYPTION_KEY_MIN_LEN = 16 +_TOKEN_ENCRYPTION_KEY_PLACEHOLDERS = frozenset({"123", "changeme", "secret", "password"}) + + +def _encryption_active() -> bool: + """Return True if bot-token encryption is configured with a strong key. + + In non-local environments the key must be set, at least _TOKEN_ENCRYPTION_KEY_MIN_LEN + characters, and not a known placeholder. Local dev can use any value or leave unset. + """ + key = (os.environ.get(TOKEN_ENCRYPTION_KEY) or "").strip() + if not key or len(key) < _TOKEN_ENCRYPTION_KEY_MIN_LEN: + return False + return key.lower() not in _TOKEN_ENCRYPTION_KEY_PLACEHOLDERS + + +def validate_config() -> None: + """Check that required environment variables are present. + + In production this raises immediately so the Lambda fails on cold-start + rather than silently misbehaving. In local development it only warns. + DB requirements depend on DATABASE_BACKEND (postgresql, mysql, or sqlite). + """ + required = list(_REQUIRED_ALWAYS_NON_DB) + list(get_required_db_vars()) + if not LOCAL_DEVELOPMENT: + required.extend(_REQUIRED_PRODUCTION) + + missing = [var for var in required if not os.environ.get(var)] + + if missing: + msg = "Missing required environment variable(s): " + ", ".join(missing) + if LOCAL_DEVELOPMENT: + _logger.warning(msg + " (continuing in local-dev mode)") + else: + _logger.critical(msg) + raise OSError(msg) + + if not LOCAL_DEVELOPMENT and not _encryption_active(): + msg = ( + "TOKEN_ENCRYPTION_KEY is required in production and must be a secure, random value " + f"(at least {_TOKEN_ENCRYPTION_KEY_MIN_LEN} characters). " + "Use your provider's secret manager; the AWS template auto-generates it. " + "Back up the key after first deploy. In local dev you may set it manually or leave unset." + ) + _logger.critical(msg) + raise OSError(msg) diff --git a/syncbot/db/__init__.py b/syncbot/db/__init__.py new file mode 100644 index 0000000..63fb4df --- /dev/null +++ b/syncbot/db/__init__.py @@ -0,0 +1,568 @@ +"""Database engine, session management, and the :class:`DbManager` CRUD helper. + +Key design decisions: + +* **Connection pooling** — Uses :class:`~sqlalchemy.pool.QueuePool` with + ``pool_pre_ping=True`` so that warm Lambda containers reuse connections + while stale ones are transparently replaced. +* **Automatic retry** — The :func:`_with_retry` decorator retries any + :class:`~sqlalchemy.exc.OperationalError` up to ``_MAX_RETRIES`` times, + disposing the engine between attempts to force a fresh connection. +""" + +import logging +import os +import ssl +import time +from dataclasses import dataclass +from pathlib import Path +from typing import TypeVar +from urllib.parse import quote_plus + +from sqlalchemy import and_, create_engine, func, pool, text +from sqlalchemy.exc import OperationalError +from sqlalchemy.orm import sessionmaker + +import constants +from db.schemas import BaseClass + +_logger = logging.getLogger(__name__) + + +@dataclass +class DatabaseField: + name: str + value: object = None + + +GLOBAL_ENGINE = None +GLOBAL_SESSION = None +GLOBAL_SCHEMA = None + +# Maximum number of times to retry a DB operation on a transient connection error +_MAX_RETRIES = 2 +_DB_INIT_MAX_ATTEMPTS = 15 +_DB_INIT_RETRY_SECONDS = 2 +# Migrations live next to this package so they are included in the Lambda bundle (SAM CodeUri: syncbot/). +_ALEMBIC_SCRIPT_LOCATION = Path(__file__).resolve().parent / "alembic" + +# Repo root locally; Lambda deployment root (/var/task) in AWS — used for relative SQLite paths. +_syncbot_dir = Path(__file__).resolve().parent.parent +_PROJECT_ROOT = ( + _syncbot_dir if os.environ.get("AWS_LAMBDA_FUNCTION_NAME") else _syncbot_dir.parent +) + + +def _mysql_port() -> str: + return os.environ.get(constants.DATABASE_PORT, "3306") + + +def _pg_port() -> str: + return os.environ.get(constants.DATABASE_PORT, "5432") + + +def _build_mysql_url(include_schema: bool = False) -> tuple[str, dict]: + """Build MySQL URL and connect_args from DATABASE_* env vars.""" + host = os.environ[constants.DATABASE_HOST] + user = quote_plus(os.environ[constants.DATABASE_USER]) + passwd = quote_plus(os.environ[constants.DATABASE_PASSWORD]) + schema = os.environ.get(constants.DATABASE_SCHEMA, "syncbot") + path = f"/{schema}" if include_schema else "" + port = _mysql_port() + db_url = f"mysql+pymysql://{user}:{passwd}@{host}:{port}{path}?charset=utf8mb4" + connect_args: dict = {} + if constants.database_tls_enabled(): + ca_path = constants.database_ssl_ca_path() + if ca_path: + try: + ssl_ctx = ssl.create_default_context(cafile=ca_path) + except (OSError, ssl.SSLError): + ssl_ctx = ssl.create_default_context() + else: + ssl_ctx = ssl.create_default_context() + connect_args["ssl"] = ssl_ctx + return db_url, connect_args + + +def _build_postgresql_url(include_schema: bool = False) -> tuple[str, dict]: + """Build PostgreSQL URL and connect_args from DATABASE_* env vars.""" + host = os.environ[constants.DATABASE_HOST] + user = quote_plus(os.environ[constants.DATABASE_USER]) + passwd = quote_plus(os.environ[constants.DATABASE_PASSWORD]) + schema = os.environ.get(constants.DATABASE_SCHEMA, "syncbot") + port = _pg_port() + # Target database: schema name maps to PostgreSQL database name (same as MySQL DB name). + dbname = schema if include_schema else "postgres" + db_url = f"postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{dbname}" + connect_args: dict = {} + if constants.database_tls_enabled(): + ca_path = constants.database_ssl_ca_path() + connect_args["sslmode"] = "verify-full" + if ca_path and os.path.isfile(ca_path): + connect_args["sslrootcert"] = ca_path + return db_url, connect_args + + +def _network_sql_connect_args_from_url() -> dict: + """TLS connect_args when using DATABASE_URL for MySQL or PostgreSQL.""" + connect_args: dict = {} + if not constants.database_tls_enabled(): + return connect_args + backend = constants.get_database_backend() + ca_path = constants.database_ssl_ca_path() + if backend == "mysql": + if ca_path: + try: + ssl_ctx = ssl.create_default_context(cafile=ca_path) + except (OSError, ssl.SSLError): + ssl_ctx = ssl.create_default_context() + else: + ssl_ctx = ssl.create_default_context() + connect_args["ssl"] = ssl_ctx + elif backend == "postgresql": + connect_args["sslmode"] = "verify-full" + if ca_path and os.path.isfile(ca_path): + connect_args["sslrootcert"] = ca_path + return connect_args + + +def _get_database_url_and_args(schema: str = None) -> tuple[str, dict]: + """Return (url, connect_args) for the configured backend. Dialect-aware.""" + backend = constants.get_database_backend() + if backend == "sqlite": + url = os.environ.get(constants.DATABASE_URL) or "sqlite:///db.sqlite3" + # Ensure path is absolute for SQLite when file path is used + if url.startswith("sqlite:///") and not url.startswith("sqlite:////"): + path_part = url[10:] + if not path_part.startswith("/") and ":" not in path_part[:2]: + url = f"sqlite:///{_PROJECT_ROOT / path_part}" + connect_args = {"check_same_thread": False} + return url, connect_args + if backend == "postgresql": + if os.environ.get(constants.DATABASE_URL): + url = os.environ[constants.DATABASE_URL] + return url, _network_sql_connect_args_from_url() + return _build_postgresql_url(include_schema=True) + # mysql + if os.environ.get(constants.DATABASE_URL): + url = os.environ[constants.DATABASE_URL] + return url, _network_sql_connect_args_from_url() + return _build_mysql_url(include_schema=True) + + +def _is_sqlite(engine) -> bool: + return engine.dialect.name == "sqlite" + + +def _is_network_sql_backend() -> bool: + return constants.get_database_backend() in ("mysql", "postgresql") + + +def _ensure_database_exists() -> None: + """Create the configured database/schema if missing (MySQL or PostgreSQL).""" + backend = constants.get_database_backend() + if backend not in ("mysql", "postgresql"): + return + if os.environ.get(constants.DATABASE_URL): + return # URL already points at a database + schema = os.environ.get(constants.DATABASE_SCHEMA, "syncbot") + if backend == "mysql": + url_no_db, connect_args = _build_mysql_url(include_schema=False) + engine_no_db = create_engine(url_no_db, connect_args=connect_args, pool_pre_ping=True) + try: + with engine_no_db.begin() as conn: + conn.execute(text(f"CREATE DATABASE IF NOT EXISTS `{schema}` CHARACTER SET utf8mb4")) + finally: + engine_no_db.dispose() + return + + # postgresql: connect to maintenance DB, CREATE DATABASE if needed + url_admin, connect_args = _build_postgresql_url(include_schema=False) + safe = "".join(c for c in schema if c.isalnum() or c == "_") + if not safe or safe != schema: + raise ValueError(f"Invalid DATABASE_SCHEMA for PostgreSQL (use letters, digits, underscore): {schema}") + engine_admin = create_engine( + url_admin, + connect_args=connect_args, + pool_pre_ping=True, + isolation_level="AUTOCOMMIT", + ) + try: + with engine_admin.connect() as conn: + exists = conn.execute( + text("SELECT 1 FROM pg_database WHERE datname = :n"), + {"n": schema}, + ).scalar() + if exists is None: + conn.execute(text(f'CREATE DATABASE "{safe}"')) + finally: + engine_admin.dispose() + + +def _alembic_config(): + """Build Alembic config with script_location set to syncbot/db/alembic.""" + from alembic.config import Config # pyright: ignore[reportMissingImports] + config = Config() + config.set_main_option("script_location", str(_ALEMBIC_SCRIPT_LOCATION)) + return config + + +def _run_alembic_upgrade() -> None: + """Run Alembic upgrade head to apply pending migrations.""" + from alembic import command # pyright: ignore[reportMissingImports] + + config = _alembic_config() + command.upgrade(config, "head") + + +def initialize_database() -> None: + """Ensure the database exists (MySQL/PostgreSQL) and apply Alembic migrations. + + Runs ``alembic upgrade head`` so the schema matches the current revision. + """ + for attempt in range(1, _DB_INIT_MAX_ATTEMPTS + 1): + try: + _ensure_database_exists() + _run_alembic_upgrade() + return + except Exception as exc: + if attempt >= _DB_INIT_MAX_ATTEMPTS: + _logger.error( + "db_init_failed", + extra={"attempts": _DB_INIT_MAX_ATTEMPTS, "error": str(exc)}, + ) + raise + _logger.warning( + "db_init_retrying", + extra={"attempt": attempt, "max_attempts": _DB_INIT_MAX_ATTEMPTS, "error": str(exc)}, + ) + time.sleep(_DB_INIT_RETRY_SECONDS) + + +def _drop_all_tables_dialect_aware(engine) -> None: + """Drop all tables in the current schema. MySQL / PostgreSQL / SQLite dialect-aware.""" + if _is_sqlite(engine): + from sqlalchemy import MetaData + + meta = MetaData() + meta.reflect(bind=engine) + with engine.begin() as conn: + for table in reversed(meta.sorted_tables): + table.drop(conn, checkfirst=True) + return + if engine.dialect.name == "postgresql": + with engine.begin() as conn: + result = conn.execute( + text( + "SELECT tablename FROM pg_tables " + "WHERE schemaname = 'public' ORDER BY tablename" + ) + ) + for (table_name,) in result: + conn.execute(text(f'DROP TABLE IF EXISTS "{table_name}" CASCADE')) + return + with engine.begin() as conn: + conn.execute(text("SET FOREIGN_KEY_CHECKS = 0")) + result = conn.execute( + text( + "SELECT TABLE_NAME FROM information_schema.TABLES " + "WHERE TABLE_SCHEMA = DATABASE()" + ) + ) + for (table_name,) in result: + conn.execute(text(f"DROP TABLE IF EXISTS `{table_name}`")) + conn.execute(text("SET FOREIGN_KEY_CHECKS = 1")) + + +def drop_and_init_db() -> None: + """Empty the current schema and reinitialize via Alembic. All data is lost. + + Drops all tables dialect-aware, then runs Alembic upgrade head. + Called from the "Reset Database" UI button (gated by PRIMARY_WORKSPACE + ENABLE_DB_RESET). + Resets GLOBAL_ENGINE and GLOBAL_SESSION so the next get_engine() uses a fresh DB. + """ + global GLOBAL_ENGINE, GLOBAL_SESSION, GLOBAL_SCHEMA + + _logger.critical( + "DB RESET: emptying schema and reinitializing via Alembic. All data will be lost." + ) + + db_url, connect_args = _get_database_url_and_args() + engine = create_engine( + db_url, + connect_args=connect_args, + poolclass=pool.NullPool if constants.get_database_backend() == "sqlite" else pool.QueuePool, + pool_pre_ping=_is_network_sql_backend(), + ) + + _drop_all_tables_dialect_aware(engine) + + engine.dispose() + + GLOBAL_ENGINE = None + GLOBAL_SESSION = None + GLOBAL_SCHEMA = None + # Recreate schema via Alembic upgrade head. + initialize_database() + _logger.info("drop_and_init_db: schema emptied and reinitialized via Alembic") + + +def get_engine(echo: bool = False, schema: str = None): + """Return the global SQLAlchemy engine, creating it on first call. + + Uses QueuePool with pool_pre_ping for MySQL/PostgreSQL; NullPool for SQLite. + """ + global GLOBAL_ENGINE, GLOBAL_SCHEMA + + backend = constants.get_database_backend() + target_schema = ( + (schema or os.environ.get(constants.DATABASE_SCHEMA, "syncbot")) + if backend in ("mysql", "postgresql") + else "" + ) + cache_key = target_schema or backend + + if cache_key == GLOBAL_SCHEMA and GLOBAL_ENGINE is not None: + return GLOBAL_ENGINE + + db_url, connect_args = _get_database_url_and_args(schema=target_schema or None) + + if backend == "sqlite": + GLOBAL_ENGINE = create_engine( + db_url, + echo=echo, + poolclass=pool.NullPool, + connect_args=connect_args, + ) + else: + GLOBAL_ENGINE = create_engine( + db_url, + echo=echo, + poolclass=pool.QueuePool, + pool_size=1, + max_overflow=1, + pool_recycle=3600, + pool_pre_ping=True, + connect_args=connect_args, + ) + GLOBAL_SCHEMA = cache_key + return GLOBAL_ENGINE + + +def get_session(echo: bool = False, schema: str = None): + if GLOBAL_SESSION: + return GLOBAL_SESSION + engine = get_engine(echo=echo, schema=schema) + return sessionmaker(bind=engine)() + + +def close_session(session): + """Close the session (return the connection to the pool).""" + if session is not None: + session.close() + + +T = TypeVar("T") + + +def _with_retry(fn): + """Decorator that retries a DB operation on transient OperationalErrors. + + Relies on ``pool_pre_ping=True`` to replace stale connections between + retries. Only disposes the engine after all retries are exhausted to + avoid disrupting other in-flight queries sharing the pool. + """ + + def wrapper(*args, **kwargs): + last_exc = None + for attempt in range(_MAX_RETRIES + 1): + try: + return fn(*args, **kwargs) + except OperationalError as exc: + last_exc = exc + if attempt < _MAX_RETRIES: + _logger.warning(f"DB operation {fn.__name__} failed (attempt {attempt + 1}), retrying: {exc}") + else: + _logger.error(f"DB operation {fn.__name__} failed after {_MAX_RETRIES + 1} attempts") + global GLOBAL_ENGINE + if GLOBAL_ENGINE is not None: + GLOBAL_ENGINE.dispose() + raise last_exc + + wrapper.__name__ = fn.__name__ + return wrapper + + +class DbManager: + @staticmethod + @_with_retry + def get_record(cls: T, id, schema=None) -> T: + session = get_session(schema=schema) + try: + x = session.query(cls).filter(cls.get_id() == id).first() + if x: + session.expunge(x) + return x + finally: + session.rollback() + close_session(session) + + @staticmethod + @_with_retry + def find_records(cls: T, filters, schema=None) -> list[T]: + session = get_session(schema=schema) + try: + records = session.query(cls).filter(and_(*filters)).all() + for r in records: + session.expunge(r) + return records + finally: + session.rollback() + close_session(session) + + @staticmethod + @_with_retry + def count_records(cls: T, filters, schema=None) -> int: + session = get_session(schema=schema) + try: + return session.query(func.count(cls.id)).filter(and_(*filters)).scalar() or 0 + finally: + session.rollback() + close_session(session) + + @staticmethod + @_with_retry + def find_join_records2(left_cls: T, right_cls: T, filters, schema=None) -> list[tuple[T]]: + session = get_session(schema=schema) + try: + records = session.query(left_cls, right_cls).join(right_cls).filter(and_(*filters)).all() + session.expunge_all() + return records + finally: + session.rollback() + close_session(session) + + @staticmethod + @_with_retry + def find_join_records3( + left_cls: T, right_cls1: T, right_cls2: T, filters, schema=None, left_join=False + ) -> list[tuple[T]]: + session = get_session(schema=schema) + try: + records = ( + session.query(left_cls, right_cls1, right_cls2) + .select_from(left_cls) + .join(right_cls1, isouter=left_join) + .join(right_cls2, isouter=left_join) + .filter(and_(*filters)) + .all() + ) + session.expunge_all() + return records + finally: + session.rollback() + close_session(session) + + @staticmethod + @_with_retry + def update_record(cls: T, id, fields, schema=None): + session = get_session(schema=schema) + try: + session.query(cls).filter(cls.get_id() == id).update(fields, synchronize_session="fetch") + session.flush() + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + + @staticmethod + @_with_retry + def update_records(cls: T, filters, fields, schema=None): + session = get_session(schema=schema) + try: + session.query(cls).filter(and_(*filters)).update(fields, synchronize_session="fetch") + session.flush() + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + + @staticmethod + @_with_retry + def create_record(record: BaseClass, schema=None) -> BaseClass: + session = get_session(schema=schema) + try: + session.add(record) + session.flush() + session.expunge(record) + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + return record + + @staticmethod + @_with_retry + def merge_record(record: BaseClass, schema=None) -> BaseClass: + """Insert or update a record based on its primary key.""" + session = get_session(schema=schema) + try: + merged = session.merge(record) + session.flush() + session.expunge(merged) + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + return merged + + @staticmethod + @_with_retry + def create_records(records: list[BaseClass], schema=None): + session = get_session(schema=schema) + try: + session.add_all(records) + session.flush() + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + + @staticmethod + @_with_retry + def delete_record(cls: T, id, schema=None): + session = get_session(schema=schema) + try: + session.query(cls).filter(cls.get_id() == id).delete() + session.flush() + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + + @staticmethod + @_with_retry + def delete_records(cls: T, filters, schema=None): + session = get_session(schema=schema) + try: + session.query(cls).filter(and_(*filters)).delete() + session.flush() + session.commit() + except Exception: + session.rollback() + raise + finally: + close_session(session) + diff --git a/syncbot/db/alembic/env.py b/syncbot/db/alembic/env.py new file mode 100644 index 0000000..0ff5e25 --- /dev/null +++ b/syncbot/db/alembic/env.py @@ -0,0 +1,66 @@ +"""Alembic env: use SyncBot's engine from db.get_engine(). + +Run from repo root: ``alembic -c alembic.ini upgrade head`` +(with ``syncbot/`` on ``PYTHONPATH`` via ``prepend_sys_path`` in alembic.ini). +""" + +import sys +from pathlib import Path + +# syncbot/db/alembic/env.py -> syncbot/ (directory that must be on PYTHONPATH for ``import db``) +_SYNCBOT_DIR = Path(__file__).resolve().parent.parent.parent +_REPO_ROOT = _SYNCBOT_DIR.parent +if str(_SYNCBOT_DIR) not in sys.path: + sys.path.insert(0, str(_SYNCBOT_DIR)) + +# Load .env when running via CLI (alembic upgrade head) +try: + from dotenv import load_dotenv + + load_dotenv(_REPO_ROOT / ".env") +except ImportError: + pass + +from logging.config import fileConfig # noqa: E402 + +from alembic import context # noqa: E402 + +from db import get_engine # noqa: E402 + +config = context.config +if config.config_file_name is not None: + fileConfig(config.config_file_name) + +# Use SyncBot's engine (from env vars / DATABASE_URL). Do not use sqlalchemy.url from alembic.ini. +target_metadata = None + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode.""" + url = get_engine().url + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode.""" + connectable = get_engine() + with connectable.connect() as connection: + context.configure( + connection=connection, + target_metadata=target_metadata, + ) + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/syncbot/db/alembic/script.py.mako b/syncbot/db/alembic/script.py.mako new file mode 100644 index 0000000..fbc4b07 --- /dev/null +++ b/syncbot/db/alembic/script.py.mako @@ -0,0 +1,26 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + ${downgrades if downgrades else "pass"} diff --git a/syncbot/db/alembic/versions/001_baseline.py b/syncbot/db/alembic/versions/001_baseline.py new file mode 100644 index 0000000..eeec36e --- /dev/null +++ b/syncbot/db/alembic/versions/001_baseline.py @@ -0,0 +1,89 @@ +"""Baseline schema (all app tables + Slack OAuth tables). Supports MySQL and SQLite. + +Revision ID: 001_baseline +Revises: +Create Date: Baseline from ORM models + OAuth tables + +""" +from collections.abc import Sequence + +import sqlalchemy as sa +from alembic import op + +from db.schemas import BaseClass + +revision: str = "001_baseline" +down_revision: str | None = None +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None + + +def upgrade() -> None: + bind = op.get_bind() + BaseClass.metadata.create_all(bind) + + # Slack SDK OAuth tables (not in our ORM; dialect-neutral schema) + op.create_table( + "slack_bots", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("client_id", sa.String(32), nullable=False), + sa.Column("app_id", sa.String(32), nullable=False), + sa.Column("enterprise_id", sa.String(32), nullable=True), + sa.Column("enterprise_name", sa.String(200), nullable=True), + sa.Column("team_id", sa.String(32), nullable=True), + sa.Column("team_name", sa.String(200), nullable=True), + sa.Column("bot_token", sa.String(200), nullable=True), + sa.Column("bot_id", sa.String(32), nullable=True), + sa.Column("bot_user_id", sa.String(32), nullable=True), + sa.Column("bot_scopes", sa.String(1000), nullable=True), + sa.Column("bot_refresh_token", sa.String(200), nullable=True), + sa.Column("bot_token_expires_at", sa.DateTime(), nullable=True), + sa.Column("is_enterprise_install", sa.Boolean(), nullable=False, server_default=sa.false()), + sa.Column("installed_at", sa.DateTime(), nullable=False, server_default=sa.text("CURRENT_TIMESTAMP")), + sa.PrimaryKeyConstraint("id"), + ) + op.create_table( + "slack_installations", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("client_id", sa.String(32), nullable=False), + sa.Column("app_id", sa.String(32), nullable=False), + sa.Column("enterprise_id", sa.String(32), nullable=True), + sa.Column("enterprise_name", sa.String(200), nullable=True), + sa.Column("enterprise_url", sa.String(200), nullable=True), + sa.Column("team_id", sa.String(32), nullable=True), + sa.Column("team_name", sa.String(200), nullable=True), + sa.Column("bot_token", sa.String(200), nullable=True), + sa.Column("bot_id", sa.String(32), nullable=True), + sa.Column("bot_user_id", sa.String(32), nullable=True), + sa.Column("bot_scopes", sa.String(1000), nullable=True), + sa.Column("bot_refresh_token", sa.String(200), nullable=True), + sa.Column("bot_token_expires_at", sa.DateTime(), nullable=True), + sa.Column("user_id", sa.String(32), nullable=False), + sa.Column("user_token", sa.String(200), nullable=True), + sa.Column("user_scopes", sa.String(1000), nullable=True), + sa.Column("user_refresh_token", sa.String(200), nullable=True), + sa.Column("user_token_expires_at", sa.DateTime(), nullable=True), + sa.Column("incoming_webhook_url", sa.String(200), nullable=True), + sa.Column("incoming_webhook_channel", sa.String(200), nullable=True), + sa.Column("incoming_webhook_channel_id", sa.String(200), nullable=True), + sa.Column("incoming_webhook_configuration_url", sa.String(200), nullable=True), + sa.Column("is_enterprise_install", sa.Boolean(), nullable=False, server_default=sa.false()), + sa.Column("token_type", sa.String(32), nullable=True), + sa.Column("installed_at", sa.DateTime(), nullable=False, server_default=sa.text("CURRENT_TIMESTAMP")), + sa.PrimaryKeyConstraint("id"), + ) + op.create_table( + "slack_oauth_states", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("state", sa.String(200), nullable=False), + sa.Column("expire_at", sa.DateTime(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + + +def downgrade() -> None: + op.drop_table("slack_oauth_states") + op.drop_table("slack_installations") + op.drop_table("slack_bots") + bind = op.get_bind() + BaseClass.metadata.drop_all(bind) diff --git a/syncbot/db/schemas.py b/syncbot/db/schemas.py new file mode 100644 index 0000000..2f2f29c --- /dev/null +++ b/syncbot/db/schemas.py @@ -0,0 +1,225 @@ +"""SQLAlchemy ORM models for the SyncBot database. + +Tables: + +* **workspaces** — One row per Slack workspace that has installed SyncBot. +* **workspace_groups** — Named groups of workspaces that can sync channels. +* **workspace_group_members** — Membership records linking workspaces to groups. +* **syncs** — Named sync groups (e.g. "East Coast AOs"). +* **sync_channels** — Links a Slack channel to a sync group via its workspace. + Supports soft deletes via ``deleted_at``. +* **post_meta** — Maps each synced message to its channel-specific + timestamp so edits, deletes, and thread replies can be propagated. +* **user_directory** — Cached copy of each workspace's user profiles, + used for cross-workspace name-based matching. +* **user_mappings** — Cross-workspace user match results (including + confirmed matches, name-based matches, manual admin matches, and + explicit "no match" records to avoid redundant lookups). +""" + +from typing import Any + +from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text +from sqlalchemy.orm import declarative_base, relationship +from sqlalchemy.types import DECIMAL + +BaseClass = declarative_base() + + +class GetDBClass: + """Mixin providing helper accessors for ORM model classes.""" + + _column_keys: frozenset[str] | None = None + + @classmethod + def _get_column_keys(cls) -> frozenset[str]: + if cls._column_keys is None: + cls._column_keys = frozenset(c.key for c in cls.__table__.columns) + return cls._column_keys + + def get_id(self) -> Any: + return self.id + + def get(self, attr: str) -> Any: + if attr in self._get_column_keys(): + return getattr(self, attr) + return None + + def to_json(self) -> dict[str, Any]: + return {key: getattr(self, key) for key in self._get_column_keys()} + + def __repr__(self) -> str: + return str(self.to_json()) + + +class Workspace(BaseClass, GetDBClass): + __tablename__ = "workspaces" + id = Column(Integer, primary_key=True) + team_id = Column(String(100), unique=True) + workspace_name = Column(String(100)) + bot_token = Column(String(256)) + deleted_at = Column(DateTime, nullable=True, default=None) + + def get_id(): + return Workspace.team_id + + +class WorkspaceGroup(BaseClass, GetDBClass): + """A named group of workspaces that can sync channels together.""" + + __tablename__ = "workspace_groups" + id = Column(Integer, primary_key=True) + name = Column(String(100), nullable=False) + invite_code = Column(String(20), unique=True, nullable=False) + status = Column(String(20), nullable=False, default="active") + created_at = Column(DateTime, nullable=False) + created_by_workspace_id = Column(Integer, ForeignKey("workspaces.id"), nullable=False) + + def get_id(): + return WorkspaceGroup.id + + +class WorkspaceGroupMember(BaseClass, GetDBClass): + """Membership record linking a workspace (or federated workspace) to a group.""" + + __tablename__ = "workspace_group_members" + id = Column(Integer, primary_key=True) + group_id = Column(Integer, ForeignKey("workspace_groups.id"), nullable=False) + workspace_id = Column(Integer, ForeignKey("workspaces.id"), nullable=True) + federated_workspace_id = Column(Integer, ForeignKey("federated_workspaces.id"), nullable=True) + status = Column(String(20), nullable=False, default="active") + role = Column(String(20), nullable=False, default="member") + joined_at = Column(DateTime, nullable=True) + deleted_at = Column(DateTime, nullable=True, default=None) + dm_messages = Column(Text, nullable=True) + invited_by_slack_user_id = Column(String(32), nullable=True) + invited_by_workspace_id = Column(Integer, ForeignKey("workspaces.id"), nullable=True) + + group = relationship("WorkspaceGroup", backref="members") + workspace = relationship( + "Workspace", + backref="group_memberships", + foreign_keys=[workspace_id], + ) + + def get_id(): + return WorkspaceGroupMember.id + + +class Sync(BaseClass, GetDBClass): + __tablename__ = "syncs" + id = Column(Integer, primary_key=True) + title = Column(String(100)) + description = Column(String(100)) + group_id = Column(Integer, ForeignKey("workspace_groups.id"), nullable=True) + sync_mode = Column(String(20), nullable=False, default="group") + target_workspace_id = Column(Integer, ForeignKey("workspaces.id"), nullable=True) + publisher_workspace_id = Column(Integer, ForeignKey("workspaces.id"), nullable=True) + + def get_id(): + return Sync.id + + +class SyncChannel(BaseClass, GetDBClass): + __tablename__ = "sync_channels" + id = Column(Integer, primary_key=True) + sync_id = Column(Integer, ForeignKey("syncs.id")) + workspace_id = Column(Integer, ForeignKey("workspaces.id")) + workspace = relationship("Workspace", backref="sync_channels") + channel_id = Column(String(100)) + status = Column(String(20), nullable=False, default="active") + created_at = Column(DateTime, nullable=False) + deleted_at = Column(DateTime, nullable=True, default=None) + + def get_id(): + return SyncChannel.channel_id + + +class PostMeta(BaseClass, GetDBClass): + __tablename__ = "post_meta" + id = Column(Integer, primary_key=True) + post_id = Column(String(100)) + sync_channel_id = Column(Integer, ForeignKey("sync_channels.id")) + ts = Column(DECIMAL(16, 6)) + + def get_id(): + return PostMeta.post_id + + +class UserDirectory(BaseClass, GetDBClass): + """Cached user profile from a Slack workspace, used for name matching.""" + + __tablename__ = "user_directory" + id = Column(Integer, primary_key=True) + workspace_id = Column(Integer, ForeignKey("workspaces.id")) + slack_user_id = Column(String(100), nullable=False) + email = Column(String(320), nullable=True) + real_name = Column(String(200), nullable=True) + display_name = Column(String(200), nullable=True) + normalized_name = Column(String(200), nullable=True) + updated_at = Column(DateTime, nullable=False) + deleted_at = Column(DateTime, nullable=True, default=None) + + def get_id(): + return UserDirectory.id + + +class UserMapping(BaseClass, GetDBClass): + """Cross-workspace user match result (or explicit no-match).""" + + __tablename__ = "user_mappings" + id = Column(Integer, primary_key=True) + source_workspace_id = Column(Integer, ForeignKey("workspaces.id")) + source_user_id = Column(String(100), nullable=False) + target_workspace_id = Column(Integer, ForeignKey("workspaces.id")) + target_user_id = Column(String(100), nullable=True) + match_method = Column(String(20), nullable=False, default="none") + source_display_name = Column(String(200), nullable=True) + matched_at = Column(DateTime, nullable=False) + group_id = Column(Integer, ForeignKey("workspace_groups.id"), nullable=True) + + def get_id(): + return UserMapping.id + + +class InstanceKey(BaseClass, GetDBClass): + """This instance's Ed25519 keypair, auto-generated on first boot. + + The private key is stored Fernet-encrypted using TOKEN_ENCRYPTION_KEY. + The public key is shared with federated workspaces during connection setup. + """ + + __tablename__ = "instance_keys" + id = Column(Integer, primary_key=True) + public_key = Column(Text, nullable=False) + private_key_encrypted = Column(Text, nullable=False) + created_at = Column(DateTime, nullable=False) + + def get_id(): + return InstanceKey.id + + +class FederatedWorkspace(BaseClass, GetDBClass): + """A remote SyncBot instance that this instance can communicate with. + + Each federated workspace has a unique ``instance_id`` (UUID), a + ``webhook_url`` for pushing events, and a ``public_key`` (Ed25519 PEM) + used to verify inbound request signatures. + ``primary_team_id`` and ``primary_workspace_name`` are optional and set + when the connection is from a workspace that migrated to the remote instance. + """ + + __tablename__ = "federated_workspaces" + id = Column(Integer, primary_key=True) + instance_id = Column(String(64), unique=True, nullable=False) + webhook_url = Column(String(500), nullable=False) + public_key = Column(Text, nullable=False) + status = Column(String(20), nullable=False, default="active") + name = Column(String(200), nullable=True) + primary_team_id = Column(String(100), nullable=True) + primary_workspace_name = Column(String(100), nullable=True) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=True) + + def get_id(): + return FederatedWorkspace.id diff --git a/syncbot/federation/__init__.py b/syncbot/federation/__init__.py new file mode 100644 index 0000000..967fb35 --- /dev/null +++ b/syncbot/federation/__init__.py @@ -0,0 +1,58 @@ +"""Cross-instance federation for SyncBot. + +Re-exports public API from :mod:`federation.core` and +:mod:`federation.api` so callers can use ``import federation`` +and access all federation functions directly. +""" + +from federation.core import ( + FEDERATION_USER_AGENT, + build_delete_payload, + build_edit_payload, + build_message_payload, + build_reaction_payload, + federation_sign, + federation_verify, + generate_federation_code, + get_instance_id, + get_or_create_federated_workspace, + get_or_create_instance_keypair, + get_public_url, + initiate_federation_connect, + parse_federation_code, + ping_federated_workspace, + push_delete, + push_edit, + push_message, + push_reaction, + push_users, + sign_body, + validate_webhook_url, + verify_body, +) + +__all__ = [ + "FEDERATION_USER_AGENT", + "build_delete_payload", + "build_edit_payload", + "build_message_payload", + "build_reaction_payload", + "federation_sign", + "federation_verify", + "generate_federation_code", + "get_instance_id", + "get_or_create_federated_workspace", + "get_or_create_instance_keypair", + "get_public_url", + "initiate_federation_connect", + "parse_federation_code", + "ping_federated_workspace", + "push_delete", + "push_edit", + "push_message", + "push_reaction", + "push_users", + "sign_body", + "validate_webhook_url", + "verify_body", +] diff --git a/syncbot/federation/api.py b/syncbot/federation/api.py new file mode 100644 index 0000000..91fd017 --- /dev/null +++ b/syncbot/federation/api.py @@ -0,0 +1,743 @@ +"""Federation API request handlers. + +These handlers process incoming HTTP requests from remote SyncBot instances. +They are called by the federation HTTP server (local dev) or the Lambda +handler (production) and return ``(status_code, response_dict)`` tuples. + +All federation endpoints require the ``SyncBot-Federation`` User-Agent; +requests without it receive an opaque 404, making the endpoints invisible +to scanners. + +Endpoints: + +* ``POST /api/federation/pair`` -- Accept an incoming connection request +* ``POST /api/federation/message`` -- Receive a forwarded message +* ``POST /api/federation/message/edit`` -- Receive a message edit +* ``POST /api/federation/message/delete`` -- Receive a message delete +* ``POST /api/federation/message/react`` -- Receive a reaction +* ``POST /api/federation/users`` -- Exchange user directory +* ``GET /api/federation/ping`` -- Health check +""" + +import json +import logging +import re +from datetime import UTC, datetime + +from slack_sdk.errors import SlackApiError +from slack_sdk.web import WebClient + +import constants +import helpers +from db import DbManager, schemas +from federation import core as federation + +_logger = logging.getLogger(__name__) + +_NOT_FOUND = (404, {"message": "Not Found"}) + + +def _find_post_records(post_id: str, sync_channel_id: int) -> list[schemas.PostMeta]: + """Look up PostMeta records for a given post_id + sync channel.""" + pid = post_id if isinstance(post_id, bytes) else post_id.encode()[:100] + return DbManager.find_records( + schemas.PostMeta, + [schemas.PostMeta.post_id == pid, schemas.PostMeta.sync_channel_id == sync_channel_id], + ) + +_PAIRING_CODE_RE = re.compile(r"^FED-[0-9A-Fa-f]{8}$") + +_FIELD_MAX_LENGTHS = { + "channel_id": 20, + "text": 40_000, + "post_id": 100, + "reaction": 100, + "instance_id": 64, + "webhook_url": 500, + "code": 20, + "action": 10, +} + + +# --------------------------------------------------------------------------- +# Input validation helper +# --------------------------------------------------------------------------- + + +def _validate_fields(body: dict, required: list[str], extras: list[str] | None = None) -> str | None: + """Check required fields are present, non-empty, and within length limits. + + Returns an error string on failure, or *None* if valid. + """ + for field in required: + val = body.get(field) + if val is None or (isinstance(val, str) and not val.strip()): + return f"missing_{field}" + + for field in required + (extras or []): + val = body.get(field) + max_len = _FIELD_MAX_LENGTHS.get(field) + if max_len and isinstance(val, str) and len(val) > max_len: + return f"{field}_too_long" + + return None + + +def _pick_user_mapping_for_federated_target(source_user_id: str, target_workspace_id: int) -> schemas.UserMapping | None: + maps = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.target_workspace_id == target_workspace_id, + schemas.UserMapping.source_user_id == source_user_id, + ], + ) + if not maps: + return None + for m in maps: + if m.target_user_id: + return m + return maps[0] + + +def _resolve_mentions_for_federated(msg_text: str, target_workspace_id: int, remote_workspace_label: str) -> str: + """Replace ``<@U_REMOTE>`` with native local mentions using *UserMapping* / *UserDirectory* on this instance.""" + if not msg_text: + return msg_text + + user_ids = re.findall(r"<@(\w+)>", msg_text) + if not user_ids: + return msg_text + + for uid in dict.fromkeys(user_ids): + mapping = _pick_user_mapping_for_federated_target(uid, target_workspace_id) + if mapping and mapping.target_user_id: + rep = f"<@{mapping.target_user_id}>" + elif mapping and mapping.source_display_name: + rep = f"`[@{mapping.source_display_name} ({remote_workspace_label})]`" + else: + display: str | None = None + for entry in DbManager.find_records( + schemas.UserDirectory, + [schemas.UserDirectory.slack_user_id == uid, schemas.UserDirectory.deleted_at.is_(None)], + ): + display = entry.display_name or entry.real_name + if display: + break + if display: + rep = f"`[@{display} ({remote_workspace_label})]`" + else: + rep = f"`[@{uid} ({remote_workspace_label})]`" + msg_text = re.sub(rf"<@{re.escape(uid)}>", rep, msg_text) + + return msg_text + + +# --------------------------------------------------------------------------- +# Authentication helpers +# --------------------------------------------------------------------------- + + +def _has_federation_user_agent(headers: dict) -> bool: + ua = headers.get("User-Agent", "") or headers.get("user-agent", "") + return "SyncBot-Federation" in ua + + +def _verify_federated_request(body_str: str, headers: dict) -> schemas.FederatedWorkspace | None: + """Verify the Ed25519 signature on an incoming federation request. + + Returns the :class:`FederatedWorkspace` record if valid, or *None*. + """ + sig = headers.get("X-Federation-Signature", "") + ts = headers.get("X-Federation-Timestamp", "") + instance_id = headers.get("X-Federation-Instance", "") + + if not sig or not ts or not instance_id: + return None + + matches = DbManager.find_records( + schemas.FederatedWorkspace, + [schemas.FederatedWorkspace.instance_id == instance_id], + ) + fed_ws = matches[0] if matches else None + if not fed_ws or fed_ws.status != "active": + return None + + if not federation.federation_verify(body_str, sig, ts, fed_ws.public_key): + _logger.warning( + "federation_auth_failed — remote workspace may have regenerated its keypair; reconnection required", + extra={"instance_id": instance_id}, + ) + return None + + return fed_ws + + +# --------------------------------------------------------------------------- +# Channel access scoping +# --------------------------------------------------------------------------- + + +def _federated_has_channel_access(fed_ws: schemas.FederatedWorkspace, sync_channel: schemas.SyncChannel) -> bool: + """Return *True* if *fed_ws* is authorised to interact with *sync_channel*. + + The federated workspace must be linked to the sync's group via a + WorkspaceGroupMember whose ``federated_workspace_id`` matches. + """ + sync = DbManager.get_record(schemas.Sync, id=sync_channel.sync_id) + if not sync or not sync.group_id: + return False + fed_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == sync.group_id, + schemas.WorkspaceGroupMember.federated_workspace_id == fed_ws.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + return bool(fed_members) + + +def _resolve_channel_for_federated( + channel_id: str, + fed_ws: schemas.FederatedWorkspace, + *, + require_active: bool = False, +) -> tuple[schemas.SyncChannel, schemas.Workspace] | None: + """Look up a sync channel, verify federated access, and return the workspace. + + Returns ``(sync_channel, workspace)`` or *None* if any check fails. + """ + filters = [ + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.deleted_at.is_(None), + ] + if require_active: + filters.append(schemas.SyncChannel.status == "active") + + records = DbManager.find_records(schemas.SyncChannel, filters) + if not records: + return None + + sync_channel = records[0] + if not _federated_has_channel_access(fed_ws, sync_channel): + return None + + workspace = helpers.get_workspace_by_id(sync_channel.workspace_id) + if not workspace or not workspace.bot_token: + return None + + return sync_channel, workspace + + +def _get_local_workspace_ids(fed_ws: schemas.FederatedWorkspace) -> set[int]: + """Return local workspace IDs that participate in groups shared with *fed_ws*.""" + fed_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.federated_workspace_id == fed_ws.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + ws_ids: set[int] = set() + for fed_member in fed_members: + group_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == fed_member.group_id, + schemas.WorkspaceGroupMember.workspace_id.isnot(None), + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + for m in group_members: + if m.workspace_id: + ws_ids.add(m.workspace_id) + return ws_ids + + +# --------------------------------------------------------------------------- +# POST /api/federation/pair +# --------------------------------------------------------------------------- + + +def handle_pair(body: dict, body_str: str, headers: dict) -> tuple[int, dict]: + """Accept an incoming connection request from a remote instance. + + The remote instance sends its ``code``, ``webhook_url``, ``instance_id``, + and ``public_key``. The request must be signed with the sender's private + key so we can verify it matches the included public key. + """ + err = _validate_fields(body, ["code", "webhook_url", "instance_id", "public_key"]) + if err: + return 400, {"error": err} + + code = body["code"] + remote_url = body["webhook_url"] + remote_instance_id = body["instance_id"] + remote_public_key = body["public_key"] + + if not _PAIRING_CODE_RE.match(code): + return 400, {"error": "invalid_code_format"} + + if not federation.validate_webhook_url(remote_url): + return 400, {"error": "invalid_webhook_url"} + + sig = headers.get("X-Federation-Signature", "") + ts = headers.get("X-Federation-Timestamp", "") + if not sig or not ts: + return 401, {"error": "missing_signature"} + + if not federation.federation_verify(body_str, sig, ts, remote_public_key): + return 401, {"error": "invalid_signature"} + + groups = DbManager.find_records( + schemas.WorkspaceGroup, + [schemas.WorkspaceGroup.invite_code == code, schemas.WorkspaceGroup.status == "active"], + ) + if not groups: + return _NOT_FOUND + group = groups[0] + + existing_fed = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + for m in existing_fed: + if m.federated_workspace_id: + fed_ws_check = DbManager.get_record(schemas.FederatedWorkspace, id=m.federated_workspace_id) + if fed_ws_check and fed_ws_check.instance_id == remote_instance_id: + return 409, {"error": "already_connected"} + + fed_ws_name = f"Connection {remote_instance_id[:8]}" + _team_id = body.get("team_id") + primary_team_id = _team_id.strip() if isinstance(_team_id, str) and _team_id.strip() else None + primary_workspace_name = body.get("workspace_name") if isinstance(body.get("workspace_name"), str) else None + + fed_ws = federation.get_or_create_federated_workspace( + instance_id=remote_instance_id, + webhook_url=remote_url, + public_key=remote_public_key, + name=fed_ws_name, + primary_team_id=primary_team_id, + primary_workspace_name=primary_workspace_name, + ) + + now = datetime.now(UTC) + member = schemas.WorkspaceGroupMember( + group_id=group.id, + federated_workspace_id=fed_ws.id, + status="active", + role="member", + joined_at=now, + ) + DbManager.create_record(member) + + # Instance A detection: if the connecting side sent team_id, soft-delete the matching local workspace + if primary_team_id: + local_workspaces = DbManager.find_records( + schemas.Workspace, + [schemas.Workspace.team_id == primary_team_id], + ) + if local_workspaces: + local_ws = local_workspaces[0] + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == local_ws.id], + {schemas.Workspace.deleted_at: now}, + ) + _logger.info( + "federation_local_workspace_soft_deleted", + extra={"team_id": primary_team_id, "workspace_id": local_ws.id}, + ) + + _, our_public_key = federation.get_or_create_instance_keypair() + + _logger.info( + "federation_connection_accepted", + extra={ + "group_id": group.id, + "remote_instance": remote_instance_id, + }, + ) + + return 200, { + "ok": True, + "instance_id": federation.get_instance_id(), + "public_key": our_public_key, + "group_id": group.id, + } + + +# --------------------------------------------------------------------------- +# POST /api/federation/message +# --------------------------------------------------------------------------- + + +def handle_message(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, dict]: + """Receive and post a forwarded message from a federated workspace.""" + err = _validate_fields(body, ["channel_id"], extras=["text", "post_id"]) + if err: + return 400, {"error": err} + + channel_id = body["channel_id"] + text = body.get("text", "") + user = body.get("user", {}) + post_id = body.get("post_id", "") + thread_post_id = body.get("thread_post_id") + images = body.get("images", [])[:10] + + resolved = _resolve_channel_for_federated(channel_id, fed_ws, require_active=True) + if not resolved: + return _NOT_FOUND + sync_channel, workspace = resolved + + user_name = user.get("display_name", "Remote User") + user_avatar = user.get("avatar_url") + workspace_name = user.get("workspace_name", "Remote") + + text = _resolve_mentions_for_federated(text, workspace.id, workspace_name) + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + text = helpers.resolve_channel_references(text, ws_client, None, target_workspace_id=workspace.id) + + try: + thread_ts = None + if thread_post_id: + post_records = DbManager.find_records( + schemas.PostMeta, + [ + schemas.PostMeta.post_id == thread_post_id, + schemas.PostMeta.sync_channel_id == sync_channel.id, + ], + ) + if post_records: + thread_ts = str(post_records[0].ts) + + photo_blocks = [] + if images: + for img in images: + photo_blocks.append( + { + "type": "image", + "image_url": img.get("url", ""), + "alt_text": img.get("alt_text", "Shared image"), + } + ) + + res = helpers.post_message( + bot_token=helpers.decrypt_bot_token(workspace.bot_token), + channel_id=channel_id, + msg_text=text, + user_name=user_name, + user_profile_url=user_avatar, + workspace_name=workspace_name, + blocks=photo_blocks if photo_blocks else None, + thread_ts=thread_ts, + ) + + ts = helpers.safe_get(res, "ts") + + if post_id and ts: + post_meta = schemas.PostMeta( + post_id=post_id if isinstance(post_id, bytes) else post_id.encode()[:100], + sync_channel_id=sync_channel.id, + ts=float(ts), + ) + DbManager.create_record(post_meta) + + _logger.info( + "federation_message_received", + extra={"channel_id": channel_id, "remote": fed_ws.instance_id}, + ) + + return 200, {"ok": True, "ts": ts} + + except Exception: + _logger.exception("federation_message_error", extra={"channel_id": channel_id}) + return 500, {"error": "internal_error"} + + +# --------------------------------------------------------------------------- +# POST /api/federation/message/edit +# --------------------------------------------------------------------------- + + +def handle_message_edit(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, dict]: + """Receive and apply a message edit from a federated workspace.""" + err = _validate_fields(body, ["post_id", "channel_id"], extras=["text"]) + if err: + return 400, {"error": err} + + post_id = body["post_id"] + text = body.get("text", "") + channel_id = body["channel_id"] + + resolved = _resolve_channel_for_federated(channel_id, fed_ws) + if not resolved: + return _NOT_FOUND + sync_channel, workspace = resolved + + remote_label = fed_ws.primary_workspace_name or fed_ws.name or "Remote" + text = _resolve_mentions_for_federated(text, workspace.id, remote_label) + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + text = helpers.resolve_channel_references(text, ws_client, None, target_workspace_id=workspace.id) + + post_records = _find_post_records(post_id, sync_channel.id) + + updated = 0 + for post_meta in post_records: + try: + ws_client.chat_update(channel=channel_id, ts=str(post_meta.ts), text=text) + updated += 1 + except Exception: + _logger.warning("federation_edit_failed", extra={"channel_id": channel_id, "ts": str(post_meta.ts)}) + + return 200, {"ok": True, "updated": updated} + + +# --------------------------------------------------------------------------- +# POST /api/federation/message/delete +# --------------------------------------------------------------------------- + + +def handle_message_delete(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, dict]: + """Receive and apply a message deletion from a federated workspace.""" + err = _validate_fields(body, ["post_id", "channel_id"]) + if err: + return 400, {"error": err} + + post_id = body["post_id"] + channel_id = body["channel_id"] + + resolved = _resolve_channel_for_federated(channel_id, fed_ws) + if not resolved: + return _NOT_FOUND + sync_channel, workspace = resolved + + post_records = _find_post_records(post_id, sync_channel.id) + + deleted = 0 + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + for post_meta in post_records: + try: + ws_client.chat_delete(channel=channel_id, ts=str(post_meta.ts)) + deleted += 1 + except Exception: + _logger.warning("federation_delete_failed", extra={"channel_id": channel_id, "ts": str(post_meta.ts)}) + + return 200, {"ok": True, "deleted": deleted} + + +# --------------------------------------------------------------------------- +# POST /api/federation/message/react +# --------------------------------------------------------------------------- + + +def handle_message_react(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, dict]: + """Receive and apply a reaction add/remove from a federated workspace.""" + err = _validate_fields(body, ["post_id", "channel_id", "reaction"], extras=["action"]) + if err: + return 400, {"error": err} + + post_id = body["post_id"] + channel_id = body["channel_id"] + reaction = body["reaction"] + action = body.get("action", "add") + user_name = body.get("user_name") or "Remote User" + user_avatar_url = body.get("user_avatar_url") + workspace_name = body.get("workspace_name") or "Remote" + + resolved = _resolve_channel_for_federated(channel_id, fed_ws) + if not resolved: + return _NOT_FOUND + sync_channel, workspace = resolved + + post_records = _find_post_records(post_id, sync_channel.id) + + applied = 0 + bot_token = helpers.decrypt_bot_token(workspace.bot_token) + ws_client = WebClient(token=bot_token) + for post_meta in post_records: + try: + if action == "add": + ws_client.reactions_add(channel=channel_id, timestamp=str(post_meta.ts), name=reaction) + else: + ws_client.reactions_remove(channel=channel_id, timestamp=str(post_meta.ts), name=reaction) + applied += 1 + except SlackApiError as exc: + error_code = "" + if exc.response: + if isinstance(exc.response, dict): + error_code = str(exc.response.get("error") or "") + else: + error_code = str(getattr(exc.response, "get", lambda _k, _d=None: "")("error", "")) + + if action == "add" and error_code == "invalid_name": + try: + helpers.post_message( + bot_token=bot_token, + channel_id=channel_id, + msg_text=f"reacted with :{reaction}:", + user_name=user_name, + user_profile_url=user_avatar_url, + workspace_name=workspace_name, + thread_ts=str(post_meta.ts), + ) + applied += 1 + continue + except Exception: + _logger.warning( + "federation_react_fallback_failed", + extra={"channel_id": channel_id, "ts": str(post_meta.ts)}, + ) + + _logger.warning("federation_react_failed", extra={"channel_id": channel_id, "ts": str(post_meta.ts)}) + except Exception: + _logger.warning("federation_react_failed", extra={"channel_id": channel_id, "ts": str(post_meta.ts)}) + + return 200, {"ok": True, "applied": applied} + + +# --------------------------------------------------------------------------- +# POST /api/federation/users +# --------------------------------------------------------------------------- + + +def handle_users(body: dict, fed_ws: schemas.FederatedWorkspace) -> tuple[int, dict]: + """Exchange user directory with a federated workspace. + + Only returns users from workspaces that share groups with this federated workspace. + """ + remote_users = body.get("users", [])[:5000] + workspace_id = body.get("workspace_id") + + if remote_users and workspace_id: + now = datetime.now(UTC) + for u in remote_users: + existing = DbManager.find_records( + schemas.UserDirectory, + [ + schemas.UserDirectory.workspace_id == workspace_id, + schemas.UserDirectory.slack_user_id == u.get("user_id", ""), + ], + ) + if existing: + DbManager.update_records( + schemas.UserDirectory, + [schemas.UserDirectory.id == existing[0].id], + { + schemas.UserDirectory.email: u.get("email"), + schemas.UserDirectory.real_name: u.get("real_name"), + schemas.UserDirectory.display_name: u.get("display_name"), + schemas.UserDirectory.updated_at: now, + }, + ) + else: + record = schemas.UserDirectory( + workspace_id=workspace_id, + slack_user_id=u.get("user_id", ""), + email=u.get("email"), + real_name=u.get("real_name"), + display_name=u.get("display_name"), + updated_at=now, + ) + DbManager.create_record(record) + + _logger.info( + "federation_users_received", + extra={"remote": fed_ws.instance_id, "count": len(remote_users)}, + ) + + allowed_ws_ids = _get_local_workspace_ids(fed_ws) + + local_users = [] + for ws_id in allowed_ws_ids: + ws = helpers.get_workspace_by_id(ws_id) + if not ws or ws.deleted_at: + continue + users = DbManager.find_records( + schemas.UserDirectory, + [schemas.UserDirectory.workspace_id == ws_id, schemas.UserDirectory.deleted_at.is_(None)], + ) + for u in users: + local_users.append( + { + "user_id": u.slack_user_id, + "email": u.email, + "real_name": u.real_name, + "display_name": u.display_name, + "workspace_id": ws_id, + } + ) + + return 200, {"ok": True, "users": local_users} + + +# --------------------------------------------------------------------------- +# GET /api/federation/ping +# --------------------------------------------------------------------------- + + +def handle_ping() -> tuple[int, dict]: + """Health check -- returns instance identity.""" + return 200, { + "ok": True, + "instance_id": federation.get_instance_id(), + "timestamp": datetime.now(UTC).isoformat(), + } + + +# --------------------------------------------------------------------------- +# Request dispatcher +# --------------------------------------------------------------------------- + + +def dispatch_federation_request(method: str, path: str, body_str: str, headers: dict) -> tuple[int, dict]: + """Route an incoming federation HTTP request to the appropriate handler. + + Returns ``(status_code, response_dict)``. + + Requests without the ``SyncBot-Federation`` User-Agent receive a plain + 404 identical to API Gateway's response for non-existent paths. + """ + if not _has_federation_user_agent(headers): + return _NOT_FOUND + + if path == "/api/federation/ping" and method == "GET": + return handle_ping() + + if not constants.FEDERATION_ENABLED: + return _NOT_FOUND + + if method != "POST": + return _NOT_FOUND + + try: + body = json.loads(body_str) if body_str else {} + except json.JSONDecodeError: + return 400, {"error": "invalid_json"} + + if path == "/api/federation/pair": + return handle_pair(body, body_str, headers) + + fed_ws = _verify_federated_request(body_str, headers) + if not fed_ws: + return _NOT_FOUND + + if path == "/api/federation/message": + return handle_message(body, fed_ws) + elif path == "/api/federation/message/edit": + return handle_message_edit(body, fed_ws) + elif path == "/api/federation/message/delete": + return handle_message_delete(body, fed_ws) + elif path == "/api/federation/message/react": + return handle_message_react(body, fed_ws) + elif path == "/api/federation/users": + return handle_users(body, fed_ws) + + return _NOT_FOUND diff --git a/syncbot/federation/core.py b/syncbot/federation/core.py new file mode 100644 index 0000000..99800f4 --- /dev/null +++ b/syncbot/federation/core.py @@ -0,0 +1,680 @@ +"""Cross-instance federation for SyncBot. + +Provides: + +* **Ed25519 signing and verification** of inter-instance HTTP requests. +* **Auto-generated keypair** created on first boot and stored in the DB. +* **HTTP client** for pushing events (messages, edits, deletes, reactions, + user-directory exchanges) to federated workspaces. +* **Connection code** generation and parsing (encodes webhook URL + code + + instance ID + public key). +* **Payload builders** for standardised federation message formats. +""" + +import base64 +import ipaddress +import json +import logging +import os +import secrets +import time +import uuid +from datetime import UTC, datetime +from urllib.parse import urlparse + +import requests +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey +from cryptography.hazmat.primitives.serialization import ( + Encoding, + NoEncryption, + PrivateFormat, + PublicFormat, + load_pem_private_key, + load_pem_public_key, +) + +import constants +from db import DbManager, schemas + +_logger = logging.getLogger(__name__) + +FEDERATION_USER_AGENT = "SyncBot-Federation/1.0" + +# --------------------------------------------------------------------------- +# Instance identity +# --------------------------------------------------------------------------- + +_INSTANCE_ID: str | None = None + + +def get_instance_id() -> str: + """Return a persistent UUID identifying this SyncBot instance. + + Reads from ``SYNCBOT_INSTANCE_ID`` env var. If not set, generates one + and stores it in-memory for the lifetime of the process. + """ + global _INSTANCE_ID + if _INSTANCE_ID: + return _INSTANCE_ID + _INSTANCE_ID = os.environ.get("SYNCBOT_INSTANCE_ID") or str(uuid.uuid4()) + return _INSTANCE_ID + + +def get_public_url() -> str: + """Return the public base URL of this instance (no trailing slash).""" + url = os.environ.get("SYNCBOT_PUBLIC_URL", "").rstrip("/") + if not url: + _logger.warning("SYNCBOT_PUBLIC_URL is not set — federation will not work") + return url + + +# --------------------------------------------------------------------------- +# Ed25519 keypair management +# --------------------------------------------------------------------------- + +_cached_private_key = None +_cached_public_pem: str | None = None + + +def get_or_create_instance_keypair(): + """Return this instance's Ed25519 (private_key, public_key_pem). + + Auto-generates and persists the keypair on first call. The private key + is Fernet-encrypted at rest in the ``instance_keys`` table. + """ + global _cached_private_key, _cached_public_pem + if _cached_private_key and _cached_public_pem: + return _cached_private_key, _cached_public_pem + + from helpers import decrypt_bot_token, encrypt_bot_token + + existing = DbManager.find_records(schemas.InstanceKey, []) + if existing: + private_pem = decrypt_bot_token(existing[0].private_key_encrypted) + private_key = load_pem_private_key(private_pem.encode(), password=None) + _cached_private_key = private_key + _cached_public_pem = existing[0].public_key + return private_key, existing[0].public_key + + private_key = Ed25519PrivateKey.generate() + public_pem = private_key.public_key().public_bytes( + Encoding.PEM, PublicFormat.SubjectPublicKeyInfo + ).decode() + private_pem = private_key.private_bytes( + Encoding.PEM, PrivateFormat.PKCS8, NoEncryption() + ).decode() + + record = schemas.InstanceKey( + public_key=public_pem, + private_key_encrypted=encrypt_bot_token(private_pem), + created_at=datetime.now(UTC), + ) + DbManager.create_record(record) + + _cached_private_key = private_key + _cached_public_pem = public_pem + _logger.info("instance_keypair_generated") + return private_key, public_pem + + +# --------------------------------------------------------------------------- +# Ed25519 signing / verification +# --------------------------------------------------------------------------- + +_TIMESTAMP_MAX_AGE = 300 # 5 minutes + + +def federation_sign(body: str) -> tuple[str, str]: + """Sign *body* with this instance's Ed25519 private key. + + Returns ``(signature_b64, timestamp_str)``. + """ + private_key, _ = get_or_create_instance_keypair() + ts = str(int(time.time())) + signing_str = f"{ts}:{body}".encode() + sig = private_key.sign(signing_str) + return base64.b64encode(sig).decode(), ts + + +def federation_verify(body: str, signature_b64: str, timestamp: str, public_key_pem: str) -> bool: + """Verify an incoming federation request using the sender's public key. + + Returns *True* if the signature is valid and the timestamp is fresh. + """ + try: + ts_int = int(timestamp) + except (TypeError, ValueError): + return False + + if abs(time.time() - ts_int) > _TIMESTAMP_MAX_AGE: + _logger.warning("federation_verify: timestamp too old/future", extra={"ts": timestamp}) + return False + + try: + public_key = load_pem_public_key(public_key_pem.encode()) + signing_str = f"{timestamp}:{body}".encode() + public_key.verify(base64.b64decode(signature_b64), signing_str) + return True + except (InvalidSignature, ValueError, TypeError): + return False + + +def sign_body(body: str) -> str: + """Sign *body* only (no timestamp). Used for migration export integrity.""" + private_key, _ = get_or_create_instance_keypair() + sig = private_key.sign(body.encode()) + return base64.b64encode(sig).decode() + + +def verify_body(body: str, signature_b64: str, public_key_pem: str) -> bool: + """Verify a signature over *body* (no timestamp). Used for migration import.""" + try: + public_key = load_pem_public_key(public_key_pem.encode()) + public_key.verify(base64.b64decode(signature_b64), body.encode()) + return True + except (InvalidSignature, ValueError, TypeError): + return False + + +# --------------------------------------------------------------------------- +# URL validation (SSRF protection) +# --------------------------------------------------------------------------- + +_PRIVATE_NETWORKS = [ + ipaddress.ip_network("10.0.0.0/8"), + ipaddress.ip_network("172.16.0.0/12"), + ipaddress.ip_network("192.168.0.0/16"), + ipaddress.ip_network("127.0.0.0/8"), + ipaddress.ip_network("169.254.0.0/16"), + ipaddress.ip_network("::1/128"), + ipaddress.ip_network("fc00::/7"), + ipaddress.ip_network("fe80::/10"), +] + + +def validate_webhook_url(url: str) -> bool: + """Return *True* if *url* is safe to use as a federation webhook target. + + Rejects private/loopback IPs (SSRF protection) and requires HTTPS in + production. HTTP is allowed only when ``LOCAL_DEVELOPMENT`` is true. + """ + if not url: + return False + + try: + parsed = urlparse(url) + except Exception: + return False + + if constants.LOCAL_DEVELOPMENT: + if parsed.scheme not in ("http", "https"): + return False + else: + if parsed.scheme != "https": + return False + + hostname = parsed.hostname + if not hostname: + return False + + import socket + try: + addr_infos = socket.getaddrinfo(hostname, None) + for info in addr_infos: + addr = ipaddress.ip_address(info[4][0]) + for net in _PRIVATE_NETWORKS: + if addr in net: + _logger.warning( + "federation_ssrf_blocked", + extra={"url": url, "resolved_ip": str(addr)}, + ) + return False + except (socket.gaierror, ValueError): + return False + + return True + + +# --------------------------------------------------------------------------- +# Connection code generation / parsing +# --------------------------------------------------------------------------- + + +def generate_federation_code(workspace_id: int, label: str | None = None) -> tuple[str, str]: + """Generate a federation connection code and create a pending group record. + + Returns ``(encoded_payload, raw_code)`` where *encoded_payload* is the + base64-encoded JSON string the admin shares with the remote instance. + The payload includes this instance's public key for signature verification. + """ + raw_code = "FED-" + secrets.token_hex(4).upper() + public_url = get_public_url() + instance_id = get_instance_id() + _, public_key_pem = get_or_create_instance_keypair() + + payload = { + "code": raw_code, + "webhook_url": public_url, + "instance_id": instance_id, + "public_key": public_key_pem, + } + encoded = base64.urlsafe_b64encode(json.dumps(payload).encode()).decode() + + now = datetime.now(UTC) + group = schemas.WorkspaceGroup( + name=label or "External connection", + invite_code=raw_code, + status="active", + created_at=now, + created_by_workspace_id=workspace_id, + ) + DbManager.create_record(group) + + member = schemas.WorkspaceGroupMember( + group_id=group.id, + workspace_id=workspace_id, + status="active", + role="creator", + joined_at=now, + ) + DbManager.create_record(member) + + return encoded, raw_code + + +def parse_federation_code(encoded: str) -> dict | None: + """Decode a federation connection payload. + + Returns ``{"code": ..., "webhook_url": ..., "instance_id": ..., + "public_key": ...}`` or *None* if the payload is invalid. + """ + try: + decoded = base64.urlsafe_b64decode(encoded.encode()).decode() + payload = json.loads(decoded) + if all(k in payload for k in ("code", "webhook_url", "instance_id")): + return payload + except Exception as exc: + _logger.debug(f"decode_federation_code: invalid payload: {exc}") + return None + + +# --------------------------------------------------------------------------- +# Federated workspace management +# --------------------------------------------------------------------------- + + +def get_or_create_federated_workspace( + instance_id: str, + webhook_url: str, + public_key: str, + name: str | None = None, + *, + primary_team_id: str | None = None, + primary_workspace_name: str | None = None, +) -> schemas.FederatedWorkspace: + """Find or create a federated workspace record.""" + matches = DbManager.find_records( + schemas.FederatedWorkspace, + [schemas.FederatedWorkspace.instance_id == instance_id], + ) + existing = matches[0] if matches else None + if existing: + update_fields = { + schemas.FederatedWorkspace.webhook_url: webhook_url, + schemas.FederatedWorkspace.public_key: public_key, + schemas.FederatedWorkspace.status: "active", + schemas.FederatedWorkspace.updated_at: datetime.now(UTC), + } + if primary_team_id is not None: + update_fields[schemas.FederatedWorkspace.primary_team_id] = primary_team_id + if primary_workspace_name is not None: + update_fields[schemas.FederatedWorkspace.primary_workspace_name] = primary_workspace_name + DbManager.update_records( + schemas.FederatedWorkspace, + [schemas.FederatedWorkspace.id == existing.id], + update_fields, + ) + return DbManager.get_record(schemas.FederatedWorkspace, existing.id) + + fed_ws = schemas.FederatedWorkspace( + instance_id=instance_id, + webhook_url=webhook_url, + public_key=public_key, + status="active", + name=name, + primary_team_id=primary_team_id, + primary_workspace_name=primary_workspace_name, + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), + ) + DbManager.create_record(fed_ws) + return DbManager.get_record(schemas.FederatedWorkspace, fed_ws.id) + + +# --------------------------------------------------------------------------- +# HTTP client — push events to a federated workspace +# --------------------------------------------------------------------------- + +_REQUEST_TIMEOUT = 15 # seconds +_MAX_RETRIES = 3 +_RETRY_BACKOFF = [1, 2, 4] # seconds between retries + + +def _federation_request( + fed_ws: schemas.FederatedWorkspace, + path: str, + payload: dict, + method: str = "POST", +) -> dict | None: + """Send an authenticated request to a federated workspace. + + Signs the request with this instance's Ed25519 private key. + Retries up to :data:`_MAX_RETRIES` times on transient failures. + """ + url = fed_ws.webhook_url.rstrip("/") + path + body = json.dumps(payload) + + start_time = time.time() + + for attempt in range(_MAX_RETRIES): + try: + sig, ts = federation_sign(body) + headers = { + "Content-Type": "application/json", + "User-Agent": FEDERATION_USER_AGENT, + "X-Federation-Signature": sig, + "X-Federation-Timestamp": ts, + "X-Federation-Instance": get_instance_id(), + } + resp = requests.request(method, url, data=body, headers=headers, timeout=_REQUEST_TIMEOUT) + elapsed = round((time.time() - start_time) * 1000, 1) + + if resp.status_code == 200: + _logger.debug( + "federation_request_ok", + extra={"url": url, "elapsed_ms": elapsed, "attempts": attempt + 1}, + ) + try: + return resp.json() + except Exception as exc: + _logger.debug(f"federation_request: non-JSON success response: {exc}") + return {"ok": True} + elif resp.status_code >= 500: + _logger.warning( + "federation_request_retry", + extra={ + "url": url, + "status": resp.status_code, + "attempt": attempt + 1, + "remote": fed_ws.instance_id, + }, + ) + if attempt < _MAX_RETRIES - 1: + time.sleep(_RETRY_BACKOFF[attempt]) + continue + elif resp.status_code == 401: + _logger.error( + "federation_auth_rejected", + extra={ + "url": url, + "remote": fed_ws.instance_id, + "message": "Keypair may have changed — reconnection required", + }, + ) + return None + else: + _logger.error( + "federation_request_failed", + extra={ + "url": url, + "status": resp.status_code, + "body": resp.text[:500], + "remote": fed_ws.instance_id, + }, + ) + return None + except requests.exceptions.Timeout: + _logger.warning( + "federation_request_timeout", + extra={"url": url, "attempt": attempt + 1, "remote": fed_ws.instance_id}, + ) + if attempt < _MAX_RETRIES - 1: + time.sleep(_RETRY_BACKOFF[attempt]) + except requests.exceptions.ConnectionError as e: + _logger.warning( + "federation_connection_error", + extra={"url": url, "attempt": attempt + 1, "error": str(e), "remote": fed_ws.instance_id}, + ) + if attempt < _MAX_RETRIES - 1: + time.sleep(_RETRY_BACKOFF[attempt]) + except Exception as e: + _logger.error( + "federation_request_error", + extra={"url": url, "error": str(e), "remote": fed_ws.instance_id}, + ) + return None + + elapsed = round((time.time() - start_time) * 1000, 1) + _logger.error( + "federation_request_exhausted", + extra={"url": url, "elapsed_ms": elapsed, "attempts": _MAX_RETRIES, "remote": fed_ws.instance_id}, + ) + return None + + +def push_message(fed_ws: schemas.FederatedWorkspace, payload: dict) -> dict | None: + """Forward a message (new post, thread reply) to a federated workspace.""" + return _federation_request(fed_ws, "/api/federation/message", payload) + + +def push_edit(fed_ws: schemas.FederatedWorkspace, payload: dict) -> dict | None: + """Forward a message edit to a federated workspace.""" + return _federation_request(fed_ws, "/api/federation/message/edit", payload) + + +def push_delete(fed_ws: schemas.FederatedWorkspace, payload: dict) -> dict | None: + """Forward a message deletion to a federated workspace.""" + return _federation_request(fed_ws, "/api/federation/message/delete", payload) + + +def push_reaction(fed_ws: schemas.FederatedWorkspace, payload: dict) -> dict | None: + """Forward a reaction add/remove to a federated workspace.""" + return _federation_request(fed_ws, "/api/federation/message/react", payload) + + +def push_users(fed_ws: schemas.FederatedWorkspace, payload: dict) -> dict | None: + """Exchange user directory with a federated workspace.""" + return _federation_request(fed_ws, "/api/federation/users", payload) + + +def initiate_federation_connect( + remote_url: str, + code: str, + *, + team_id: str | None = None, + workspace_name: str | None = None, +) -> dict | None: + """Call the remote instance's /api/federation/pair endpoint. + + Signs the request with this instance's Ed25519 private key so the + receiver can verify we control the keypair advertised in the connection code. + Optionally sends team_id and workspace_name so the remote (Instance A) can + tag the connection and soft-delete the matching local workspace. + """ + _, public_key_pem = get_or_create_instance_keypair() + + url = remote_url.rstrip("/") + "/api/federation/pair" + payload = { + "code": code, + "webhook_url": get_public_url(), + "instance_id": get_instance_id(), + "public_key": public_key_pem, + } + if team_id: + payload["team_id"] = team_id + if workspace_name: + payload["workspace_name"] = workspace_name + body = json.dumps(payload) + sig, ts = federation_sign(body) + + for attempt in range(_MAX_RETRIES): + try: + resp = requests.post( + url, + data=body, + headers={ + "Content-Type": "application/json", + "User-Agent": FEDERATION_USER_AGENT, + "X-Federation-Signature": sig, + "X-Federation-Timestamp": ts, + "X-Federation-Instance": get_instance_id(), + }, + timeout=_REQUEST_TIMEOUT, + ) + if resp.status_code == 200: + _logger.info("federation_pair_success", extra={"url": url}) + return resp.json() + elif resp.status_code >= 500: + _logger.warning( + "federation_pair_retry", + extra={"url": url, "status": resp.status_code, "attempt": attempt + 1}, + ) + if attempt < _MAX_RETRIES - 1: + time.sleep(_RETRY_BACKOFF[attempt]) + continue + else: + _logger.error( + "federation_pair_failed", + extra={"url": url, "status": resp.status_code, "body": resp.text[:500]}, + ) + return None + except requests.exceptions.ConnectionError as e: + _logger.warning( + "federation_pair_connection_error", + extra={"url": url, "attempt": attempt + 1, "error": str(e)}, + ) + if attempt < _MAX_RETRIES - 1: + time.sleep(_RETRY_BACKOFF[attempt]) + except requests.exceptions.Timeout: + _logger.warning( + "federation_pair_timeout", + extra={"url": url, "attempt": attempt + 1}, + ) + if attempt < _MAX_RETRIES - 1: + time.sleep(_RETRY_BACKOFF[attempt]) + except Exception as e: + _logger.error("federation_pair_error", extra={"url": url, "error": str(e)}) + return None + + _logger.error("federation_pair_exhausted", extra={"url": url, "attempts": _MAX_RETRIES}) + return None + + +def ping_federated_workspace(fed_ws: schemas.FederatedWorkspace) -> bool: + """Check if a federated workspace is reachable.""" + url = fed_ws.webhook_url.rstrip("/") + "/api/federation/ping" + try: + resp = requests.get( + url, + headers={"User-Agent": FEDERATION_USER_AGENT}, + timeout=5, + ) + return resp.status_code == 200 + except Exception as exc: + _logger.debug(f"ping_federated_workspace: failed to reach {fed_ws.instance_id}: {exc}") + return False + + +# --------------------------------------------------------------------------- +# Payload builders +# --------------------------------------------------------------------------- + + +def build_message_payload( + *, + msg_type: str = "message", + sync_id: int, + post_id: str, + channel_id: str, + user_name: str, + user_avatar_url: str | None, + workspace_name: str, + text: str, + thread_post_id: str | None = None, + images: list[dict] | None = None, + timestamp: str | None = None, +) -> dict: + """Build a standardised federation message payload.""" + return { + "type": msg_type, + "sync_id": sync_id, + "post_id": post_id, + "channel_id": channel_id, + "user": { + "display_name": user_name, + "avatar_url": user_avatar_url, + "workspace_name": workspace_name, + }, + "text": text, + "thread_post_id": thread_post_id, + "images": images or [], + "timestamp": timestamp, + } + + +def build_edit_payload( + *, + post_id: str, + channel_id: str, + text: str, + timestamp: str, +) -> dict: + """Build a federation edit payload.""" + return { + "type": "edit", + "post_id": post_id, + "channel_id": channel_id, + "text": text, + "timestamp": timestamp, + } + + +def build_delete_payload( + *, + post_id: str, + channel_id: str, + timestamp: str, +) -> dict: + """Build a federation delete payload.""" + return { + "type": "delete", + "post_id": post_id, + "channel_id": channel_id, + "timestamp": timestamp, + } + + +def build_reaction_payload( + *, + post_id: str, + channel_id: str, + reaction: str, + action: str, + user_name: str, + user_avatar_url: str | None = None, + workspace_name: str | None = None, + timestamp: str, +) -> dict: + """Build a federation reaction payload.""" + return { + "type": "react", + "post_id": post_id, + "channel_id": channel_id, + "reaction": reaction, + "action": action, + "user_name": user_name, + "user_avatar_url": user_avatar_url, + "workspace_name": workspace_name, + "timestamp": timestamp, + } diff --git a/syncbot/handlers/__init__.py b/syncbot/handlers/__init__.py new file mode 100644 index 0000000..0bffebd --- /dev/null +++ b/syncbot/handlers/__init__.py @@ -0,0 +1,145 @@ +"""Handlers package – Slack event, action, and view-submission handlers. + +Re-exports every public symbol so that ``import handlers`` / +``from handlers import X`` continues to work after the split. +""" + +from handlers._common import ( + EventContext, + _get_authorized_workspace, + _parse_private_metadata, + _sanitize_text, +) +from handlers.channel_sync import ( + handle_pause_sync, + handle_publish_channel, + handle_publish_channel_submit_ack, + handle_publish_channel_submit_work, + handle_publish_mode_submit_ack, + handle_resume_sync, + handle_stop_sync, + handle_stop_sync_confirm, + handle_subscribe_channel, + handle_subscribe_channel_submit, + handle_unpublish_channel, +) +from handlers.export_import import ( + handle_backup_download, + handle_backup_restore, + handle_backup_restore_proceed, + handle_backup_restore_submit_ack, + handle_backup_restore_submit_work, + handle_data_migration, + handle_data_migration_export, + handle_data_migration_proceed, + handle_data_migration_submit_ack, + handle_data_migration_submit_work, +) +from handlers.federation_cmds import ( + handle_enter_federation_code, + handle_federation_code_submit, + handle_federation_label_submit, + handle_generate_federation_code, + handle_remove_federation_connection, +) +from handlers.group_manage import ( + handle_leave_group, + handle_leave_group_confirm, +) +from handlers.groups import ( + handle_accept_group_invite, + handle_create_group, + handle_create_group_submit, + handle_decline_group_invite, + handle_invite_workspace, + handle_invite_workspace_submit, + handle_join_group, + handle_join_group_submit, +) +from handlers.messages import ( + _handle_reaction, + _is_own_bot_message, + _parse_event_fields, + respond_to_message_event, +) +from handlers.sync import ( + check_join_sync_channel, + handle_app_home_opened, + handle_db_reset, + handle_db_reset_proceed, + handle_join_sync_submission, + handle_member_joined_channel, + handle_new_sync_submission, + handle_refresh_home, + handle_remove_sync, +) +from handlers.tokens import handle_tokens_revoked +from handlers.users import ( + handle_team_join, + handle_user_mapping_back, + handle_user_mapping_edit_submit, + handle_user_mapping_refresh, + handle_user_profile_changed, +) + +__all__ = [ + "EventContext", + "_get_authorized_workspace", + "_handle_reaction", + "_is_own_bot_message", + "_parse_event_fields", + "_parse_private_metadata", + "_sanitize_text", + "check_join_sync_channel", + "handle_app_home_opened", + "handle_backup_download", + "handle_backup_restore", + "handle_backup_restore_proceed", + "handle_backup_restore_submit_ack", + "handle_backup_restore_submit_work", + "handle_data_migration", + "handle_data_migration_proceed", + "handle_data_migration_export", + "handle_data_migration_submit_ack", + "handle_data_migration_submit_work", + "handle_db_reset", + "handle_db_reset_proceed", + "handle_accept_group_invite", + "handle_create_group", + "handle_create_group_submit", + "handle_decline_group_invite", + "handle_enter_federation_code", + "handle_federation_code_submit", + "handle_federation_label_submit", + "handle_generate_federation_code", + "handle_invite_workspace", + "handle_invite_workspace_submit", + "handle_join_group", + "handle_join_group_submit", + "handle_join_sync_submission", + "handle_leave_group", + "handle_leave_group_confirm", + "handle_member_joined_channel", + "handle_new_sync_submission", + "handle_pause_sync", + "handle_publish_channel", + "handle_publish_channel_submit_ack", + "handle_publish_channel_submit_work", + "handle_publish_mode_submit_ack", + "handle_refresh_home", + "handle_remove_federation_connection", + "handle_remove_sync", + "handle_resume_sync", + "handle_stop_sync", + "handle_stop_sync_confirm", + "handle_subscribe_channel", + "handle_subscribe_channel_submit", + "handle_team_join", + "handle_tokens_revoked", + "handle_unpublish_channel", + "handle_user_mapping_back", + "handle_user_mapping_edit_submit", + "handle_user_mapping_refresh", + "handle_user_profile_changed", + "respond_to_message_event", +] diff --git a/syncbot/handlers/_common.py b/syncbot/handlers/_common.py new file mode 100644 index 0000000..bd43ce5 --- /dev/null +++ b/syncbot/handlers/_common.py @@ -0,0 +1,113 @@ +"""Shared handler utilities and types.""" + +import logging +from typing import Any + +import helpers +from db import schemas + +_logger = logging.getLogger(__name__) + +try: + from typing import TypedDict +except ImportError: + from typing_extensions import TypedDict + + +class EventContext(TypedDict): + """Strongly-typed dict returned by ``_parse_event_fields``.""" + + team_id: str | None + channel_id: str | None + user_id: str | None + msg_text: str + mentioned_users: list[dict[str, Any]] + thread_ts: str | None + ts: str | None + event_subtype: str | None + + +def _parse_private_metadata(body: dict) -> dict: + """Extract and parse JSON ``private_metadata`` from a view submission.""" + import json as _json + + raw = helpers.safe_get(body, "view", "private_metadata") or "{}" + try: + return _json.loads(raw) + except Exception as exc: + _logger.debug(f"_parse_private_metadata: bad JSON: {exc}") + return {} + + +def _extract_team_id(body: dict) -> str | None: + """Return a workspace/team ID from common Slack payload locations.""" + return ( + helpers.safe_get(body, "view", "team_id") + or helpers.safe_get(body, "team", "id") + or helpers.safe_get(body, "team_id") + or helpers.safe_get(body, "user", "team_id") + ) + + +def _get_authorized_workspace( + body: dict, client, context: dict, action_name: str +) -> tuple[str, schemas.Workspace] | None: + """Validate authorization and return ``(user_id, workspace_record)``. + + Returns *None* and logs a warning if the user is not authorized or + the workspace cannot be resolved. + """ + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": action_name}) + return None + + team_id = _extract_team_id(body) + if not team_id: + _logger.warning("workspace_resolution_failed", extra={"user_id": user_id, "action": action_name}) + return None + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return None + + return user_id, workspace_record + + +def _iter_view_state_actions(body: dict): + """Yield ``(action_id, action_data)`` pairs from ``view.state.values``.""" + state_values = helpers.safe_get(body, "view", "state", "values") or {} + for block_data in state_values.values(): + yield from block_data.items() + + +def _get_selected_option_value(body: dict, action_id: str) -> str | None: + """Return ``selected_option.value`` for a view state action.""" + for aid, action_data in _iter_view_state_actions(body): + if aid == action_id: + return helpers.safe_get(action_data, "selected_option", "value") + return None + + +def _get_text_input_value(body: dict, action_id: str) -> str | None: + """Return plain-text ``value`` for a view state action.""" + for aid, action_data in _iter_view_state_actions(body): + if aid == action_id: + return action_data.get("value") + return None + + +def _get_selected_conversation_or_option(body: dict, action_id: str) -> str | None: + """Return selected conversation ID, falling back to selected option value.""" + for aid, action_data in _iter_view_state_actions(body): + if aid == action_id: + return action_data.get("selected_conversation") or helpers.safe_get( + action_data, "selected_option", "value" + ) + return None + + +def _sanitize_text(value: str, max_length: int = 100) -> str: + """Strip and truncate user-supplied text to prevent oversized DB writes.""" + if not value: + return value + return value.strip()[:max_length] diff --git a/syncbot/handlers/channel_sync.py b/syncbot/handlers/channel_sync.py new file mode 100644 index 0000000..d9fe8b4 --- /dev/null +++ b/syncbot/handlers/channel_sync.py @@ -0,0 +1,881 @@ +"""Channel sync handlers — publish, unpublish, subscribe, pause, resume, stop.""" + +import contextlib +import logging +from datetime import UTC, datetime +from logging import Logger + +from slack_sdk.web import WebClient + +import builders +import helpers +from builders._common import _format_channel_ref, _get_group_members +from db import DbManager, schemas +from handlers._common import ( + _extract_team_id, + _get_authorized_workspace, + _get_selected_conversation_or_option, + _get_selected_option_value, + _parse_private_metadata, + _sanitize_text, +) +from slack import actions, orm +from slack.blocks import context as block_context +from slack.blocks import section + +_logger = logging.getLogger(__name__) + +_MAX_PUBLISH_CHANNEL_OPTIONS = 100 + + +def _get_publishable_channel_options(client: WebClient, workspace_id: int) -> list[orm.SelectorOption]: + """Return selector options for channels that are not already published/synced in this workspace.""" + synced = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.workspace_id == workspace_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + synced_ids = {c.channel_id for c in synced} + + options: list[orm.SelectorOption] = [] + cursor = "" + try: + while len(options) < _MAX_PUBLISH_CHANNEL_OPTIONS: + resp = client.conversations_list( + types="public_channel,private_channel", + exclude_archived=True, + limit=200, + cursor=cursor or None, + ) + chs = helpers.safe_get(resp, "channels") or [] + for slack_channel in chs: + cid = slack_channel.get("id") + if not cid or cid in synced_ids: + continue + name = slack_channel.get("name") or cid + label = f"#{name}" + if len(label) > 75: + label = label[:72] + "..." + options.append(orm.SelectorOption(name=label, value=cid)) + if len(options) >= _MAX_PUBLISH_CHANNEL_OPTIONS: + break + cursor = helpers.safe_get(resp, "response_metadata", "next_cursor") or "" + if not cursor: + break + except Exception as e: + _logger.warning(f"_get_publishable_channel_options: {e}") + + return options + + +def _build_publish_step2( + client: WebClient, + group_id: int, + sync_mode: str, + other_members: list, + workspace_id: int, +) -> orm.BlockView: + """Build the step-2 modal blocks: channel picker (only unpublished channels) + optional target workspace.""" + modal_blocks: list[orm.BaseBlock] = [] + + channel_options = _get_publishable_channel_options(client, workspace_id) + if not channel_options: + channel_options = [ + orm.SelectorOption( + name="— No Channels available (all are already published or synced) —", value="__none__" + ), + ] + modal_blocks.append( + orm.InputBlock( + label="Channel to Publish", + action=actions.CONFIG_PUBLISH_CHANNEL_SELECT, + element=orm.StaticSelectElement( + placeholder="Select a Channel to publish", + options=channel_options, + ), + optional=False, + ) + ) + modal_blocks.append(block_context("Select a Channel from your Workspace to make available for Syncing.")) + + if sync_mode == "direct" and other_members: + ws_options: list[orm.SelectorOption] = [] + for other_member in other_members: + other_workspace = helpers.get_workspace_by_id(other_member.workspace_id) + name = ( + helpers.resolve_workspace_name(other_workspace) + if other_workspace + else f"Workspace {other_member.workspace_id}" + ) + ws_options.append(orm.SelectorOption(name=name, value=str(other_member.workspace_id))) + + if ws_options: + modal_blocks.append( + orm.InputBlock( + label="Target Workspace", + action=actions.CONFIG_PUBLISH_DIRECT_TARGET, + element=orm.StaticSelectElement( + placeholder="Select target Workspace", + options=ws_options, + ), + optional=False, + ) + ) + + return orm.BlockView(blocks=modal_blocks) + + +def handle_publish_channel( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open the publish-channel flow — always starts with step 1 (sync mode selection).""" + auth_result = _get_authorized_workspace(body, client, context, "publish_channel") + if not auth_result: + return + _, workspace_record = auth_result + + trigger_id = helpers.safe_get(body, "trigger_id") + raw_group_id = helpers.safe_get(body, "actions", 0, "value") + try: + group_id = int(raw_group_id) + except (TypeError, ValueError): + _logger.warning(f"publish_channel: invalid group_id: {raw_group_id!r}") + return + + mode_options = [ + orm.SelectorOption( + name="Available to All Workspaces\nAny current or future Workspace Group Member can Sync.", + value="group", + ), + orm.SelectorOption( + name="Only with Specific Workspace\nChoose a specific Workspace Group Member to allow Syncing.", + value="direct", + ), + ] + step1_blocks: list[orm.BaseBlock] = [ + orm.InputBlock( + label="Channel Sync Mode", + action=actions.CONFIG_PUBLISH_SYNC_MODE, + element=orm.RadioButtonsElement( + initial_value="group", + options=orm.as_selector_options( + [o.name for o in mode_options], + [o.value for o in mode_options], + ), + ), + optional=False, + ), + ] + orm.BlockView(blocks=step1_blocks).post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_PUBLISH_MODE_SUBMIT, + title_text="Sync Channel", + submit_button_text="Next", + parent_metadata={"group_id": group_id, "workspace_id": workspace_record.id}, + new_or_add="new", + ) + + +def handle_publish_mode_submit_ack( + body: dict, + client: WebClient, + context: dict, +) -> dict | None: + """Ack phase for step 1: read sync mode and return ``response_action=update`` for step 2.""" + auth_result = _get_authorized_workspace(body, client, context, "publish_mode_submit") + if not auth_result: + return None + _, workspace_record = auth_result + + metadata = _parse_private_metadata(body) + group_id = metadata.get("group_id") + if not group_id: + raw_pm = helpers.safe_get(body, "view", "private_metadata") or "" + _logger.warning( + "publish_mode_submit: missing group_id in metadata", + extra={ + "team_id": _extract_team_id(body), + "workspace_id": metadata.get("workspace_id"), + "private_metadata_len": len(raw_pm) if isinstance(raw_pm, str) else None, + }, + ) + return None + + sync_mode = _get_selected_option_value(body, actions.CONFIG_PUBLISH_SYNC_MODE) or "group" + + group_members = _get_group_members(group_id) + other_members = [ + member for member in group_members if member.workspace_id != workspace_record.id and member.workspace_id + ] + step2 = _build_publish_step2(client, group_id, sync_mode, other_members, workspace_record.id) + updated_view = step2.as_ack_update( + callback_id=actions.CONFIG_PUBLISH_CHANNEL_SUBMIT, + title_text="Sync Channel", + submit_button_text="Publish", + parent_metadata={"group_id": group_id, "sync_mode": sync_mode}, + ) + return {"response_action": "update", "view": updated_view} + + +def handle_publish_channel_submit_ack( + body: dict, + client: WebClient, + context: dict, +) -> dict | None: + """Ack phase for publish: validate and close modal (errors) or empty ack (success).""" + auth_result = _get_authorized_workspace(body, client, context, "publish_channel_submit") + if not auth_result: + return None + _, workspace_record = auth_result + + metadata = _parse_private_metadata(body) + group_id = metadata.get("group_id") + + if not group_id: + _logger.warning("publish_channel_submit: missing group_id in metadata") + return None + + sync_mode = metadata.get("sync_mode", "group") + target_workspace_id = None + selected_target = _get_selected_option_value(body, actions.CONFIG_PUBLISH_DIRECT_TARGET) + if selected_target: + with contextlib.suppress(TypeError, ValueError): + target_workspace_id = int(selected_target) + + if sync_mode == "direct" and not target_workspace_id: + sync_mode = "group" + + channel_id = _get_selected_conversation_or_option(body, actions.CONFIG_PUBLISH_CHANNEL_SELECT) + + if not channel_id or channel_id == "__none__": + return { + "response_action": "errors", + "errors": {actions.CONFIG_PUBLISH_CHANNEL_SELECT: "Select a Channel to publish."}, + } + + existing = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + if existing: + return { + "response_action": "errors", + "errors": {actions.CONFIG_PUBLISH_CHANNEL_SELECT: "This Channel is already being synced."}, + } + + return None + + +def handle_publish_channel_submit_work( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Lazy work phase: create Sync + SyncChannel after modal closed.""" + auth_result = _get_authorized_workspace(body, client, context, "publish_channel_submit") + if not auth_result: + return + _, workspace_record = auth_result + + metadata = _parse_private_metadata(body) + group_id = metadata.get("group_id") + + if not group_id: + return + + sync_mode = metadata.get("sync_mode", "group") + target_workspace_id = None + selected_target = _get_selected_option_value(body, actions.CONFIG_PUBLISH_DIRECT_TARGET) + if selected_target: + with contextlib.suppress(TypeError, ValueError): + target_workspace_id = int(selected_target) + + if sync_mode == "direct" and not target_workspace_id: + sync_mode = "group" + + channel_id = _get_selected_conversation_or_option(body, actions.CONFIG_PUBLISH_CHANNEL_SELECT) + + if not channel_id or channel_id == "__none__": + return + + existing = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + if existing: + return + + try: + conv_info = client.conversations_info(channel=channel_id) + channel_name = helpers.safe_get(conv_info, "channel", "name") or channel_id + except Exception as exc: + _logger.debug(f"handle_publish_channel_submit_work: conversations_info failed for {channel_id}: {exc}") + channel_name = channel_id + + try: + client.conversations_join(channel=channel_id) + + sync_record = schemas.Sync( + title=_sanitize_text(channel_name), + description=None, + group_id=group_id, + sync_mode=sync_mode, + target_workspace_id=target_workspace_id if sync_mode == "direct" else None, + publisher_workspace_id=workspace_record.id, + ) + DbManager.create_record(sync_record) + + sync_channel_record = schemas.SyncChannel( + sync_id=sync_record.id, + channel_id=channel_id, + workspace_id=workspace_record.id, + created_at=datetime.now(UTC), + ) + DbManager.create_record(sync_channel_record) + + _logger.info( + "channel_published", + extra={ + "workspace_id": workspace_record.id, + "channel_id": channel_id, + "group_id": group_id, + "sync_id": sync_record.id, + "sync_mode": sync_mode, + }, + ) + except Exception as e: + _logger.error(f"Failed to publish channel {channel_id}: {e}") + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + _refresh_group_member_homes(group_id, workspace_record.id, logger, context=context) + + +def handle_unpublish_channel( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Unpublish a channel: hard-delete the Sync record. + + DB cascades remove all ``SyncChannel`` and ``PostMeta`` rows. + Only the original publisher can unpublish. + """ + auth_result = _get_authorized_workspace(body, client, context, "unpublish_channel") + if not auth_result: + return + user_id, workspace_record = auth_result + + admin_name, admin_label = helpers.format_admin_label(client, user_id, workspace_record) + + raw_value = helpers.safe_get(body, "actions", 0, "value") + try: + sync_id = int(raw_value) + except (TypeError, ValueError): + _logger.warning(f"Invalid sync_id for unpublish: {raw_value!r}") + return + + sync_record = DbManager.get_record(schemas.Sync, id=sync_id) + if not sync_record: + return + + if workspace_record and sync_record.publisher_workspace_id != workspace_record.id: + _logger.warning("unpublish_denied: not the publisher") + return + + group_id = sync_record.group_id + + all_channels = DbManager.find_records( + schemas.SyncChannel, + [schemas.SyncChannel.sync_id == sync_id, schemas.SyncChannel.deleted_at.is_(None)], + ) + + for sync_channel in all_channels: + try: + member_ws = helpers.get_workspace_by_id(sync_channel.workspace_id) + if member_ws and member_ws.bot_token: + name = ( + admin_name if workspace_record and sync_channel.workspace_id == workspace_record.id else admin_label + ) + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) + helpers.notify_synced_channels( + member_client, + [sync_channel.channel_id], + f":octagonal_sign: *{name}* unpublished this Channel. Syncing is no longer available.", + ) + member_client.conversations_leave(channel=sync_channel.channel_id) + except Exception as e: + _logger.warning(f"Failed to notify/leave channel {sync_channel.channel_id}: {e}") + + DbManager.delete_records(schemas.Sync, [schemas.Sync.id == sync_id]) + + _logger.info( + "channel_unpublished", + extra={"sync_id": sync_id, "group_id": group_id}, + ) + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + if group_id: + _refresh_group_member_homes(group_id, workspace_record.id if workspace_record else 0, logger, context=context) + + +def _toggle_sync_status( + body: dict, + client: WebClient, + logger: Logger, + context: dict, + *, + action_prefix: str, + target_status: str, + emoji: str, + verb: str, + log_event: str, +) -> None: + """Shared logic for pausing or resuming a channel sync. Only the current workspace's channel is toggled.""" + action_id = helpers.safe_get(body, "actions", 0, "action_id") or "" + sync_id_str = action_id.replace(action_prefix + "_", "") + + try: + sync_id = int(sync_id_str) + except (TypeError, ValueError): + _logger.warning(f"{log_event}_invalid_id", extra={"action_id": action_id}) + return + + auth_result = _get_authorized_workspace(body, client, context, log_event) + if not auth_result: + return + user_id, workspace_record = auth_result + admin_name, admin_label = helpers.format_admin_label(client, user_id, workspace_record) + + all_channels = DbManager.find_records( + schemas.SyncChannel, + [schemas.SyncChannel.sync_id == sync_id, schemas.SyncChannel.deleted_at.is_(None)], + ) + my_sync_channel = next( + (c for c in all_channels if c.workspace_id == workspace_record.id), + None, + ) + if not my_sync_channel: + _logger.warning( + f"{log_event}_no_channel_for_workspace", extra={"sync_id": sync_id, "workspace_id": workspace_record.id} + ) + return + + DbManager.update_records( + schemas.SyncChannel, + [schemas.SyncChannel.id == my_sync_channel.id], + {schemas.SyncChannel.status: target_status}, + ) + helpers._cache_delete(f"sync_list:{my_sync_channel.channel_id}") + + ws_cache: dict[int, schemas.Workspace | None] = {} + for sync_channel in [my_sync_channel]: + try: + channel_ws = ws_cache.get(sync_channel.workspace_id) or helpers.get_workspace_by_id( + sync_channel.workspace_id + ) + ws_cache[sync_channel.workspace_id] = channel_ws + if channel_ws and channel_ws.bot_token: + ws_client = WebClient(token=helpers.decrypt_bot_token(channel_ws.bot_token)) + if target_status == "active": + with contextlib.suppress(Exception): + ws_client.conversations_join(channel=sync_channel.channel_id) + name = ( + admin_name if workspace_record and sync_channel.workspace_id == workspace_record.id else admin_label + ) + other_channels = [c for c in all_channels if c.workspace_id != sync_channel.workspace_id] + if other_channels: + other_ws = ws_cache.get(other_channels[0].workspace_id) or helpers.get_workspace_by_id( + other_channels[0].workspace_id + ) + ws_cache[other_channels[0].workspace_id] = other_ws + channel_ref = helpers.resolve_channel_name(other_channels[0].channel_id, other_ws) + msg = f":{emoji}: *{name}* {verb} syncing with *{channel_ref}*." + else: + msg = f":{emoji}: *{name}* {verb} channel syncing." + helpers.notify_synced_channels(ws_client, [sync_channel.channel_id], msg) + except Exception as e: + _logger.warning(f"Failed to notify channel {sync_channel.channel_id} about {verb}: {e}") + + _logger.info(log_event, extra={"sync_id": sync_id, "sync_channel_id": my_sync_channel.id}) + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + sync_record = DbManager.get_record(schemas.Sync, id=sync_id) + if sync_record and sync_record.group_id: + _refresh_group_member_homes( + sync_record.group_id, workspace_record.id if workspace_record else 0, logger, context=context + ) + + +def handle_pause_sync(body: dict, client: WebClient, logger: Logger, context: dict) -> None: + """Pause an active channel sync.""" + _toggle_sync_status( + body, + client, + logger, + context, + action_prefix=actions.CONFIG_PAUSE_SYNC, + target_status="paused", + emoji="double_vertical_bar", + verb="paused", + log_event="sync_paused", + ) + + +def handle_resume_sync(body: dict, client: WebClient, logger: Logger, context: dict) -> None: + """Resume a paused channel sync.""" + _toggle_sync_status( + body, + client, + logger, + context, + action_prefix=actions.CONFIG_RESUME_SYNC, + target_status="active", + emoji="arrow_forward", + verb="resumed", + log_event="sync_resumed", + ) + + +def handle_stop_sync( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Show a confirmation modal before stopping a channel sync.""" + action_id = helpers.safe_get(body, "actions", 0, "action_id") or "" + sync_id_str = action_id.replace(actions.CONFIG_STOP_SYNC + "_", "") + + try: + sync_id = int(sync_id_str) + except (TypeError, ValueError): + _logger.warning("stop_sync_invalid_id", extra={"action_id": action_id}) + return + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + confirm_form = orm.BlockView( + blocks=[ + section( + ":warning: *Are you sure you want to stop syncing this Channel?*\n\n" + "This will:\n" + "\u2022 Remove your Workspace's Sync history for this Channel\n" + "\u2022 Remove this Channel from the active Sync\n" + "\u2022 Other Workspaces in the Sync will continue uninterrupted\n\n" + "_No messages will be deleted from any Channel — only SyncBot's tracking history for your Workspace is removed._" + ), + ] + ) + + confirm_form.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_STOP_SYNC_CONFIRM, + title_text="Stop Syncing", + submit_button_text="Stop Syncing", + close_button_text="Cancel", + parent_metadata={"sync_id": sync_id}, + ) + + +def handle_stop_sync_confirm( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Execute channel sync stop after confirmation. + + Removes only this workspace's ``SyncChannel`` and its ``PostMeta``. + Other workspaces' data and the Sync record remain intact. + """ + auth_result = _get_authorized_workspace(body, client, context, "stop_sync_confirm") + if not auth_result: + return + user_id, workspace_record = auth_result + + meta = _parse_private_metadata(body) + sync_id = meta.get("sync_id") + if not sync_id: + _logger.warning("stop_sync_confirm: missing sync_id in metadata") + return + + admin_name, admin_label = helpers.format_admin_label(client, user_id, workspace_record) + + all_channels = DbManager.find_records( + schemas.SyncChannel, + [schemas.SyncChannel.sync_id == sync_id, schemas.SyncChannel.deleted_at.is_(None)], + ) + + my_channel = next((c for c in all_channels if c.workspace_id == workspace_record.id), None) + other_channels = [c for c in all_channels if c.workspace_id != workspace_record.id] + + for sync_channel in all_channels: + try: + channel_ws = helpers.get_workspace_by_id(sync_channel.workspace_id) + if channel_ws and channel_ws.bot_token: + if sync_channel.workspace_id == workspace_record.id and other_channels: + other_ws = helpers.get_workspace_by_id(other_channels[0].workspace_id) + channel_ref = helpers.resolve_channel_name(other_channels[0].channel_id, other_ws) + msg = f":octagonal_sign: *{admin_name}* stopped syncing with *{channel_ref}*." + elif sync_channel.workspace_id != workspace_record.id: + my_ref = ( + helpers.resolve_channel_name(my_channel.channel_id, workspace_record) + if my_channel + else "the other Workspace" + ) + msg = f":octagonal_sign: *{admin_label}* stopped syncing with *{my_ref}*." + else: + msg = f":octagonal_sign: *{admin_name}* stopped Channel Syncing." + ws_client = WebClient(token=helpers.decrypt_bot_token(channel_ws.bot_token)) + helpers.notify_synced_channels(ws_client, [sync_channel.channel_id], msg) + except Exception as e: + _logger.warning(f"Failed to notify channel {sync_channel.channel_id}: {e}") + + if my_channel: + DbManager.delete_records(schemas.PostMeta, [schemas.PostMeta.sync_channel_id == my_channel.id]) + DbManager.delete_records(schemas.SyncChannel, [schemas.SyncChannel.id == my_channel.id]) + try: + client.conversations_leave(channel=my_channel.channel_id) + except Exception as e: + _logger.warning(f"Failed to leave channel {my_channel.channel_id}: {e}") + + _logger.info( + "sync_stopped", + extra={ + "sync_id": sync_id, + "workspace_id": workspace_record.id, + "channel_id": my_channel.channel_id if my_channel else None, + }, + ) + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + sync_record = DbManager.get_record(schemas.Sync, id=sync_id) + if sync_record and sync_record.group_id: + _refresh_group_member_homes(sync_record.group_id, workspace_record.id, logger, context=context) + + +def handle_subscribe_channel( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Push the channel picker modal for subscribing to an available channel. + + The channel list only shows channels that are not already in any sync + (excluding already-synced and published-but-unsubscribed channels). + """ + auth_result = _get_authorized_workspace(body, client, context, "subscribe_channel") + if not auth_result: + return + _, workspace_record = auth_result + + trigger_id = helpers.safe_get(body, "trigger_id") + sync_id = helpers.safe_get(body, "actions", 0, "value") + + blocks: list[orm.BaseBlock] = [] + + if sync_id: + publisher_channels = DbManager.find_records( + schemas.SyncChannel, + [schemas.SyncChannel.sync_id == int(sync_id), schemas.SyncChannel.deleted_at.is_(None)], + ) + if publisher_channels: + pub_ch = publisher_channels[0] + pub_ws = helpers.get_workspace_by_id(pub_ch.workspace_id) + ch_ref = _format_channel_ref(pub_ch.channel_id, pub_ws, is_local=False) + blocks.append(section(f"Syncing with: {ch_ref}")) + + channel_options = _get_publishable_channel_options(client, workspace_record.id) + if not channel_options: + channel_options = [ + orm.SelectorOption( + name="— No Channels available to Sync in this Workspace. —", value="__none__" + ), + ] + blocks.append( + orm.InputBlock( + label="Channel for Sync", + action=actions.CONFIG_SUBSCRIBE_CHANNEL_SELECT, + element=orm.StaticSelectElement( + placeholder="Select a Channel to Sync with.", + options=channel_options, + ), + optional=False, + ) + ) + blocks.append(block_context("Choose a Channel in your Workspace to start Syncing.")) + + orm.BlockView(blocks=blocks).post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_SUBSCRIBE_CHANNEL_SUBMIT, + title_text="Sync Channel", + submit_button_text="Sync Channel", + parent_metadata={"sync_id": int(sync_id)} if sync_id else None, + new_or_add="new", + ) + + +def handle_subscribe_channel_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Subscribe to an available channel sync: create SyncChannel for subscriber.""" + auth_result = _get_authorized_workspace(body, client, context, "subscribe_channel_submit") + if not auth_result: + return + user_id, workspace_record = auth_result + + metadata = _parse_private_metadata(body) + sync_id = metadata.get("sync_id") + + if not sync_id: + _logger.warning("subscribe_channel_submit: missing sync_id") + return + + channel_id = _get_selected_conversation_or_option(body, actions.CONFIG_SUBSCRIBE_CHANNEL_SELECT) + + if not channel_id or channel_id == "__none__": + _logger.warning("subscribe_channel_submit: no channel selected") + return + + sync_record = DbManager.get_record(schemas.Sync, id=sync_id) + if not sync_record: + return + + group_id = sync_record.group_id + + existing_sub = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync_id, + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.status == "active", + ], + ) + if existing_sub: + _logger.info( + "subscribe_channel_duplicate_skip", + extra={ + "sync_id": sync_id, + "channel_id": channel_id, + "workspace_id": workspace_record.id, + }, + ) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + if group_id: + _refresh_group_member_homes(group_id, workspace_record.id, logger, context=context) + return + + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + admin_name, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) + + publisher_channels: list = [] + try: + client.conversations_join(channel=channel_id) + + sync_channel_record = schemas.SyncChannel( + sync_id=sync_id, + channel_id=channel_id, + workspace_id=workspace_record.id, + created_at=datetime.now(UTC), + ) + DbManager.create_record(sync_channel_record) + + publisher_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.workspace_id != workspace_record.id, + ], + ) + + try: + if publisher_channels: + pub_ch = publisher_channels[0] + pub_ws = helpers.get_workspace_by_id(pub_ch.workspace_id) + channel_ref = helpers.resolve_channel_name(pub_ch.channel_id, pub_ws) + else: + channel_ref = sync_record.title or "the other Channel" + client.chat_postMessage( + channel=channel_id, + text=f":arrows_counterclockwise: *{admin_name}* started syncing this Channel with *{channel_ref}*. Messages will be shared automatically.", + ) + except Exception as exc: + _logger.debug(f"subscribe_channel: failed to notify subscriber channel {channel_id}: {exc}") + + local_ref = helpers.resolve_channel_name(channel_id, workspace_record) + for pub_ch in publisher_channels: + try: + pub_ws = helpers.get_workspace_by_id(pub_ch.workspace_id) + if pub_ws: + pub_client = WebClient(token=helpers.decrypt_bot_token(pub_ws.bot_token)) + pub_client.chat_postMessage( + channel=pub_ch.channel_id, + text=f":arrows_counterclockwise: *{admin_label}* started syncing *{local_ref}* with this Channel. Messages will be shared automatically.", + ) + except Exception as exc: + _logger.debug(f"subscribe_channel: failed to notify publisher channel {pub_ch.channel_id}: {exc}") + + _logger.info( + "channel_subscribed", + extra={ + "workspace_id": workspace_record.id, + "channel_id": channel_id, + "sync_id": sync_id, + "group_id": group_id, + }, + ) + except Exception as e: + _logger.error(f"Failed to subscribe to channel sync {sync_id}: {e}") + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + if group_id: + _refresh_group_member_homes(group_id, workspace_record.id, logger, context=context) + + +def _refresh_group_member_homes( + group_id: int, + exclude_workspace_id: int, + logger: Logger, + context: dict | None = None, +) -> None: + """Refresh the Home tab for all group members except the acting workspace. + + Uses context=None when refreshing other members so admin lookups are always + fresh for each workspace (avoids request-scoped cache from the acting ws). + """ + members = _get_group_members(group_id) + refreshed: set[int] = set() + for member in members: + if not member.workspace_id or member.workspace_id == exclude_workspace_id or member.workspace_id in refreshed: + continue + member_ws = helpers.get_workspace_by_id(member.workspace_id, context=context) + if member_ws: + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) + refreshed.add(member.workspace_id) diff --git a/syncbot/handlers/export_import.py b/syncbot/handlers/export_import.py new file mode 100644 index 0000000..45148c0 --- /dev/null +++ b/syncbot/handlers/export_import.py @@ -0,0 +1,827 @@ +"""Backup/Restore and Data Migration handlers (modals and submissions).""" + +import contextlib +import json +import logging +from datetime import UTC, datetime +from logging import Logger + +from slack_sdk.web import WebClient + +import builders +import constants +import helpers +from db import DbManager, schemas +from helpers import export_import as ei +from slack import actions + +_logger = logging.getLogger(__name__) + +# Uploaded JSON (backup / migration) download limits — matches interaction-time budget. +_UPLOAD_DOWNLOAD_TIMEOUT = 10 +_MAX_IMPORT_BYTES = 50 * 1024 * 1024 # 50 MiB + + +def _download_uploaded_file(file_url: str, token: str) -> tuple[str | None, str | None]: + """Download a Slack-hosted uploaded file. Returns ``(utf8_text, None)`` or ``(None, error_message)``.""" + import urllib.error + import urllib.request + + req = urllib.request.Request(file_url, headers={"Authorization": f"Bearer {token}"}) + try: + with urllib.request.urlopen(req, timeout=_UPLOAD_DOWNLOAD_TIMEOUT) as resp: + chunks: list[bytes] = [] + total = 0 + while True: + chunk = resp.read(65536) + if not chunk: + break + total += len(chunk) + if total > _MAX_IMPORT_BYTES: + return None, "Uploaded file exceeds maximum size (50 MB)." + chunks.append(chunk) + raw = b"".join(chunks) + except urllib.error.HTTPError as e: + _logger.exception("upload download HTTP error: %s", e) + return None, "Failed to download the uploaded file." + except TimeoutError as e: + _logger.exception("upload download timed out: %s", e) + return None, "Failed to download the uploaded file." + except OSError as e: + _logger.exception("upload download failed: %s", e) + return None, "Failed to download the uploaded file." + try: + return raw.decode("utf-8"), None + except UnicodeDecodeError as e: + return None, f"Invalid encoding in uploaded file: {e}" + + +def _is_admin(client: WebClient, user_id: str, body: dict) -> bool: + return helpers.is_user_authorized(client, user_id) + + +def _team_id_for_backup_gate(body: dict) -> str | None: + """Slack team_id for primary-workspace backup/restore gating.""" + return ( + helpers.safe_get(body, "team", "id") + or helpers.safe_get(body, "view", "team_id") + or helpers.safe_get(body, "team_id") + ) + + +def _open_dm_channel(client: WebClient, user_id: str) -> str: + """Open (or reopen) a DM with *user_id* and return the channel ID.""" + resp = client.conversations_open(users=[user_id]) + return resp["channel"]["id"] + + +# --------------------------------------------------------------------------- +# Backup/Restore +# --------------------------------------------------------------------------- + + +def handle_backup_restore( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open Backup/Restore modal (admin only).""" + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return + if not helpers.is_backup_visible_for_workspace(_team_id_for_backup_gate(body)): + return + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + from slack import orm + + download_blocks = [ + orm.SectionBlock(label="*Backup*\nSend a JSON backup file as a SyncBot DM."), + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label=":floppy_disk: Send Backup File", + action=actions.CONFIG_BACKUP_DOWNLOAD, + ), + ], + ), + orm.DividerBlock(), + orm.SectionBlock( + label="*Restore*\nUpload a JSON backup file. The integrity of the file will be checked.", + ), + ] + + restore_block = { + "type": "input", + "block_id": actions.CONFIG_BACKUP_RESTORE_JSON_INPUT, + "label": {"type": "plain_text", "text": " "}, + "element": { + "type": "file_input", + "action_id": actions.CONFIG_BACKUP_RESTORE_JSON_INPUT, + "filetypes": ["json"], + "max_files": 1, + }, + } + + view = orm.BlockView(blocks=download_blocks) + modal_blocks = view.as_form_field() + modal_blocks.append(restore_block) + + client.views_open( + trigger_id=trigger_id, + view={ + "type": "modal", + "callback_id": actions.CONFIG_BACKUP_RESTORE_SUBMIT, + "title": {"type": "plain_text", "text": "Backup / Restore"}, + "submit": {"type": "plain_text", "text": "Restore"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "blocks": modal_blocks, + }, + ) + + +def handle_backup_download( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Generate backup and send to user's DM (called from modal button).""" + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return + if not helpers.is_backup_visible_for_workspace(_team_id_for_backup_gate(body)): + return + try: + payload = ei.build_full_backup() + json_str = json.dumps(payload, default=ei._json_serializer, indent=2) + dm_channel = _open_dm_channel(client, user_id) + client.files_upload_v2( + content=json_str, + filename=f"syncbot-backup-{datetime.now(UTC).strftime('%Y%m%d-%H%M%S')}.json", + channel=dm_channel, + initial_comment=":nerd_face: Here is your SyncBot JSON backup. Keep this file secure.", + ) + except Exception as e: + _logger.exception("backup_download failed: %s", e) + return + + view_id = helpers.safe_get(body, "view", "id") + if view_id: + with contextlib.suppress(Exception): + client.views_update( + view_id=view_id, + view={ + "type": "modal", + "title": {"type": "plain_text", "text": "Backup / Restore"}, + "close": {"type": "plain_text", "text": "Close"}, + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":white_check_mark: *Backup Sent!*\n\nCheck your SyncBot DMs to download the backup file.", + }, + }, + ], + }, + ) + + +def handle_backup_restore_submit_ack( + body: dict, + client: WebClient, + context: dict, +) -> dict | None: + """Ack phase: validate upload; return errors, push confirm modal, or ``None`` to close.""" + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return None + if not helpers.is_backup_visible_for_workspace(_team_id_for_backup_gate(body)): + return None + + values = helpers.safe_get(body, "view", "state", "values") or {} + file_data = helpers.safe_get( + values, actions.CONFIG_BACKUP_RESTORE_JSON_INPUT, actions.CONFIG_BACKUP_RESTORE_JSON_INPUT + ) + files = file_data.get("files") if file_data else None + + if not files: + return { + "response_action": "errors", + "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: "Upload a JSON backup file to restore."}, + } + + file_info = files[0] + file_url = file_info.get("url_private_download") or file_info.get("url_private") + if not file_url: + return { + "response_action": "errors", + "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: "Could not retrieve the uploaded file."}, + } + + json_text, dl_err = _download_uploaded_file(file_url, client.token) + if dl_err: + return { + "response_action": "errors", + "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: dl_err}, + } + + try: + data = json.loads(json_text) + except json.JSONDecodeError as e: + return { + "response_action": "errors", + "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: f"Invalid JSON in uploaded file: {e}"}, + } + + if data.get("version") != ei.BACKUP_VERSION: + return { + "response_action": "errors", + "errors": { + actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: f"Unsupported backup version (expected {ei.BACKUP_VERSION})." + }, + } + + hmac_ok = ei.verify_backup_hmac(data) + key_ok = ei.verify_backup_encryption_key(data) + + if not hmac_ok or not key_ok: + from helpers._cache import _cache_set + + cache_key = f"restore_pending:{user_id}" + _cache_set(cache_key, data, ttl=600) + return { + "response_action": "push", + "view": { + "type": "modal", + "title": {"type": "plain_text", "text": "Confirm Restore"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ( + ( + "*WARNING: Integrity Check Failed!* The file has been tampered with. Only proceed if you intentionally edited the file.\n\n" + if not hmac_ok + else "" + ) + + ( + "*WARNING: Encryption Key Mismatch!* Restored bot tokens will not be usable. Workspaces will have to reinstall the app.\n\n" + if not key_ok + else "" + ) + + "Do you want to proceed with the restore anyway?" + ), + }, + }, + { + "type": "actions", + "elements": [ + { + "type": "button", + "text": {"type": "plain_text", "text": "Proceed Anyway"}, + "style": "danger", + "action_id": actions.CONFIG_BACKUP_RESTORE_PROCEED, + "value": user_id, + }, + ], + }, + ], + }, + } + + return None + + +def handle_backup_restore_submit_work( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Lazy work phase: run restore after modal closed (happy path).""" + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return + if not helpers.is_backup_visible_for_workspace(_team_id_for_backup_gate(body)): + return + + values = helpers.safe_get(body, "view", "state", "values") or {} + file_data = helpers.safe_get( + values, actions.CONFIG_BACKUP_RESTORE_JSON_INPUT, actions.CONFIG_BACKUP_RESTORE_JSON_INPUT + ) + files = file_data.get("files") if file_data else None + if not files: + return + + file_info = files[0] + file_url = file_info.get("url_private_download") or file_info.get("url_private") + if not file_url: + return + + json_text, dl_err = _download_uploaded_file(file_url, client.token) + if dl_err: + return + + try: + data = json.loads(json_text) + except json.JSONDecodeError: + return + + if data.get("version") != ei.BACKUP_VERSION: + return + + hmac_ok = ei.verify_backup_hmac(data) + key_ok = ei.verify_backup_encryption_key(data) + if not hmac_ok or not key_ok: + return + + _do_restore(data, client, user_id) + + +def handle_backup_restore_proceed( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Proceed with restore after user clicked the danger button despite warnings.""" + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return + if not helpers.is_backup_visible_for_workspace(_team_id_for_backup_gate(body)): + return + from helpers._cache import _cache_get + + data = _cache_get(f"restore_pending:{user_id}") + if not data: + _logger.warning("backup_restore_proceed: restore data expired for user %s", user_id) + return + _do_restore(data, client, user_id) + + +def _do_restore(data: dict, client: WebClient, user_id: str) -> None: + """Run restore, invalidate caches, and refresh the Home tab for all restored workspaces.""" + try: + team_ids = ei.restore_full_backup(data, skip_hmac_check=True, skip_encryption_key_check=True) + ei.invalidate_home_tab_caches_for_all_teams(team_ids) + except Exception as e: + _logger.exception("restore failed: %s", e) + raise + + for team_id in team_ids: + workspace_rows = DbManager.find_records(schemas.Workspace, [schemas.Workspace.team_id == team_id]) + if workspace_rows: + try: + builders.refresh_home_tab_for_workspace(workspace_rows[0], _logger) + except Exception as e: + _logger.warning("_do_restore: failed to refresh home tab for %s: %s", team_id, e) + + +# --------------------------------------------------------------------------- +# Data Migration +# --------------------------------------------------------------------------- + + +def handle_data_migration( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open Data Migration modal (admin only, federation enabled).""" + if not constants.FEDERATION_ENABLED: + return + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + from slack import orm + + export_blocks = [ + orm.SectionBlock( + label="*Export*\nDownload your Workspace data for migration to another instance. You will receive a JSON file in your DM.", + ), + orm.ActionsBlock( + elements=[ + orm.ButtonElement( + label=":outbox_tray: Export my Workspace data", + action=actions.CONFIG_DATA_MIGRATION_EXPORT, + ), + ], + ), + orm.DividerBlock(), + orm.SectionBlock( + label="*Import*\nUpload a migration JSON file. Existing Sync Channels in the federated Group will be replaced.", + ), + ] + + import_block = { + "type": "input", + "block_id": actions.CONFIG_DATA_MIGRATION_JSON_INPUT, + "label": {"type": "plain_text", "text": " "}, + "element": { + "type": "file_input", + "action_id": actions.CONFIG_DATA_MIGRATION_JSON_INPUT, + "filetypes": ["json"], + "max_files": 1, + }, + } + + view = orm.BlockView(blocks=export_blocks) + modal_blocks = view.as_form_field() + modal_blocks.append(import_block) + + client.views_open( + trigger_id=trigger_id, + view={ + "type": "modal", + "callback_id": actions.CONFIG_DATA_MIGRATION_SUBMIT, + "title": {"type": "plain_text", "text": "Data Migration"}, + "submit": {"type": "plain_text", "text": "Import"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "blocks": modal_blocks, + }, + ) + + +def handle_data_migration_export( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Export workspace migration JSON and send to user's DM.""" + if not constants.FEDERATION_ENABLED: + return + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + team_id = helpers.safe_get(body, "team", "id") or helpers.safe_get(body, "team_id") + if not _is_admin(client, user_id, body): + return + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + try: + payload = ei.build_migration_export(workspace_record.id, include_source_instance=True) + json_str = json.dumps(payload, default=ei._json_serializer, indent=2) + dm_channel = _open_dm_channel(client, user_id) + client.files_upload_v2( + content=json_str, + filename=f"syncbot-migration-{workspace_record.team_id}-{datetime.now(UTC).strftime('%Y%m%d-%H%M%S')}.json", + channel=dm_channel, + initial_comment="Your SyncBot workspace migration file. Use it on the new instance after connecting via federation.", + ) + except Exception as e: + _logger.exception("data_migration_export failed: %s", e) + + +def _data_migration_prepare( + body: dict, + client: WebClient, + context: dict, +) -> tuple[dict | None, dict | None, int | None, dict[str, int] | None, object | None]: + """Shared validation for migration ack/work. + + Returns ``(error_ack_dict, data, group_id, team_id_to_workspace_id, workspace_record)``. + """ + if not constants.FEDERATION_ENABLED: + return None, None, None, None, None + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + team_id = helpers.safe_get(body, "view", "team_id") or helpers.safe_get(body, "team_id") + if not _is_admin(client, user_id, body): + return None, None, None, None, None + + values = helpers.safe_get(body, "view", "state", "values") or {} + file_data = helpers.safe_get( + values, actions.CONFIG_DATA_MIGRATION_JSON_INPUT, actions.CONFIG_DATA_MIGRATION_JSON_INPUT + ) + files = file_data.get("files") if file_data else None + + if not files: + return ( + { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Upload a migration JSON file to import."}, + }, + None, + None, + None, + None, + ) + + file_info = files[0] + file_url = file_info.get("url_private_download") or file_info.get("url_private") + if not file_url: + return ( + { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Could not retrieve the uploaded file."}, + }, + None, + None, + None, + None, + ) + + json_text, dl_err = _download_uploaded_file(file_url, client.token) + if dl_err: + return ( + { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: dl_err}, + }, + None, + None, + None, + None, + ) + + try: + data = json.loads(json_text) + except json.JSONDecodeError as e: + return ( + { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: f"Invalid JSON in uploaded file: {e}"}, + }, + None, + None, + None, + None, + ) + + if data.get("version") != ei.MIGRATION_VERSION: + return ( + { + "response_action": "errors", + "errors": { + actions.CONFIG_DATA_MIGRATION_JSON_INPUT: f"Unsupported migration version (expected {ei.MIGRATION_VERSION})." + }, + }, + None, + None, + None, + None, + ) + + workspace_payload = data.get("workspace", {}) + export_team_id = workspace_payload.get("team_id") + if not export_team_id: + return ( + { + "response_action": "errors", + "errors": {actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "Migration file missing workspace.team_id."}, + }, + None, + None, + None, + None, + ) + + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record or workspace_record.team_id != export_team_id: + return ( + { + "response_action": "errors", + "errors": { + actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "This migration file is for a different workspace. Open the app from the workspace that matches the migration file." + }, + }, + None, + None, + None, + None, + ) + + team_id_to_workspace_id = {workspace_record.team_id: workspace_record.id} + workspaces_b = DbManager.find_records(schemas.Workspace, [schemas.Workspace.deleted_at.is_(None)]) + for w in workspaces_b: + if w.team_id: + team_id_to_workspace_id[w.team_id] = w.id + + source = data.get("source_instance") + if source and source.get("connection_code"): + import secrets + + from federation import core as federation + + result = federation.initiate_federation_connect( + source["webhook_url"], + source["connection_code"], + team_id=workspace_record.team_id, + workspace_name=workspace_record.workspace_name or None, + ) + if result and result.get("ok"): + fed_ws = federation.get_or_create_federated_workspace( + instance_id=source["instance_id"], + webhook_url=source["webhook_url"], + public_key=source["public_key"], + name=f"Connection {source['instance_id'][:8]}", + ) + my_groups = helpers.get_groups_for_workspace(workspace_record.id) + my_group_ids = {g.id for g, _ in my_groups} + fed_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.federated_workspace_id == fed_ws.id, + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.status == "active", + ], + ) + found = False + for fm in fed_members: + if fm.group_id in my_group_ids: + found = True + break + if not found: + now = datetime.now(UTC) + new_group = schemas.WorkspaceGroup( + name=f"Federation — {fed_ws.name}", + invite_code=f"FED-{secrets.token_hex(4).upper()}", + status="active", + created_at=now, + created_by_workspace_id=workspace_record.id, + ) + DbManager.create_record(new_group) + DbManager.create_record( + schemas.WorkspaceGroupMember( + group_id=new_group.id, + workspace_id=workspace_record.id, + status="active", + role="creator", + joined_at=now, + ) + ) + DbManager.create_record( + schemas.WorkspaceGroupMember( + group_id=new_group.id, + federated_workspace_id=fed_ws.id, + status="active", + role="member", + joined_at=now, + ) + ) + + my_groups = helpers.get_groups_for_workspace(workspace_record.id) + my_group_ids = {g.id for g, _ in my_groups} + fed_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.federated_workspace_id.isnot(None), + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.status == "active", + ], + ) + candidate_groups = [fm.group_id for fm in fed_members if fm.group_id in my_group_ids] + group_id = candidate_groups[0] if candidate_groups else None + if not group_id: + return ( + { + "response_action": "errors", + "errors": { + actions.CONFIG_DATA_MIGRATION_JSON_INPUT: "No federation connection found. Connect to the other instance first (Enter Connection Code), then import." + }, + }, + None, + None, + None, + None, + ) + + return None, data, group_id, team_id_to_workspace_id, workspace_record + + +def handle_data_migration_submit_ack( + body: dict, + client: WebClient, + context: dict, +) -> dict | None: + """Ack phase: validate; return errors, push confirm, or ``None`` to close before lazy import.""" + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + err, data, group_id, team_id_to_workspace_id, workspace_record = _data_migration_prepare(body, client, context) + if err is not None: + return err + if data is None or group_id is None or team_id_to_workspace_id is None or workspace_record is None: + return None + + source = data.get("source_instance") + sig_ok = ei.verify_migration_signature(data) + if not sig_ok and source: + from helpers._cache import _cache_set + + cache_key = f"migration_import_pending:{user_id}" + _cache_set( + cache_key, + { + "data": data, + "group_id": group_id, + "workspace_id": workspace_record.id, + "team_id_to_workspace_id": team_id_to_workspace_id, + }, + ttl=600, + ) + return { + "response_action": "push", + "view": { + "type": "modal", + "title": {"type": "plain_text", "text": "Confirm Import"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Integrity check failed.* The file may have been modified or could be malicious. Only proceed if you intentionally edited the file.\n\nProceed with import anyway?", + }, + }, + { + "type": "actions", + "elements": [ + { + "type": "button", + "text": {"type": "plain_text", "text": "Proceed Anyway"}, + "style": "danger", + "action_id": actions.CONFIG_DATA_MIGRATION_PROCEED, + "value": user_id, + }, + ], + }, + ], + }, + } + + return None + + +def handle_data_migration_submit_work( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Lazy work phase: import migration data after modal closed.""" + if not constants.FEDERATION_ENABLED: + return + err, data, group_id, team_id_to_workspace_id, workspace_record = _data_migration_prepare(body, client, context) + if err is not None or data is None or group_id is None or team_id_to_workspace_id is None or workspace_record is None: + return + + source = data.get("source_instance") + sig_ok = ei.verify_migration_signature(data) + if not sig_ok and source: + return + + ei.import_migration_data( + data, + workspace_record.id, + group_id, + team_id_to_workspace_id=team_id_to_workspace_id, + ) + ei.invalidate_home_tab_caches_for_team(workspace_record.team_id) + + +def handle_data_migration_proceed( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Proceed with import after user clicked the danger button despite warnings.""" + if not constants.FEDERATION_ENABLED: + return + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not _is_admin(client, user_id, body): + return + from helpers._cache import _cache_get + + meta = _cache_get(f"migration_import_pending:{user_id}") + if not meta: + _logger.warning("data_migration_proceed: import data expired for user %s", user_id) + return + data = meta.get("data") + group_id = meta.get("group_id") + workspace_id = meta.get("workspace_id") + team_id_to_workspace_id = meta.get("team_id_to_workspace_id", {}) + if not data or not group_id or not workspace_id: + return + + workspace_record = DbManager.get_record(schemas.Workspace, workspace_id) + if not workspace_record: + return + + ei.import_migration_data( + data, + workspace_record.id, + group_id, + team_id_to_workspace_id=team_id_to_workspace_id, + ) + ei.invalidate_home_tab_caches_for_team(workspace_record.team_id) diff --git a/syncbot/handlers/federation_cmds.py b/syncbot/handlers/federation_cmds.py new file mode 100644 index 0000000..e0dd65d --- /dev/null +++ b/syncbot/handlers/federation_cmds.py @@ -0,0 +1,363 @@ +"""Federation command handlers — code generation, entry, and connection via Slack UI.""" + +import logging +import secrets +from datetime import UTC, datetime, timedelta +from logging import Logger + +from slack_sdk.web import WebClient + +import builders +import constants +import federation +import helpers +from db import DbManager, schemas +from slack import actions, orm + +_logger = logging.getLogger(__name__) + + +def _exchange_user_directory( + fed_ws: schemas.FederatedWorkspace, + workspace_record: schemas.Workspace, +) -> None: + """Push our local user directory to a federated workspace and store theirs.""" + local_users = DbManager.find_records( + schemas.UserDirectory, + [schemas.UserDirectory.workspace_id == workspace_record.id], + ) + users_payload = [ + { + "user_id": u.slack_user_id, + "email": u.email, + "real_name": u.real_name, + "display_name": u.display_name, + } + for u in local_users + ] + + result = federation.push_users( + fed_ws, + { + "users": users_payload, + "workspace_id": workspace_record.id, + }, + ) + + if result and result.get("users"): + remote_users = result["users"] + now = datetime.now(UTC) + for u in remote_users: + remote_ws_id = u.get("workspace_id") + if not remote_ws_id: + continue + existing = DbManager.find_records( + schemas.UserDirectory, + [ + schemas.UserDirectory.workspace_id == remote_ws_id, + schemas.UserDirectory.slack_user_id == u.get("user_id", ""), + ], + ) + if existing: + DbManager.update_records( + schemas.UserDirectory, + [schemas.UserDirectory.id == existing[0].id], + { + schemas.UserDirectory.email: u.get("email"), + schemas.UserDirectory.real_name: u.get("real_name"), + schemas.UserDirectory.display_name: u.get("display_name"), + schemas.UserDirectory.updated_at: now, + }, + ) + else: + record = schemas.UserDirectory( + workspace_id=remote_ws_id, + slack_user_id=u.get("user_id", ""), + email=u.get("email"), + real_name=u.get("real_name"), + display_name=u.get("display_name"), + updated_at=now, + ) + DbManager.create_record(record) + + _logger.info( + "federation_user_exchange_complete", + extra={"remote": fed_ws.instance_id, "sent": len(users_payload), "received": len(remote_users)}, + ) + + +def handle_generate_federation_code( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open a modal asking for a label before generating the connection code.""" + if not constants.FEDERATION_ENABLED: + return + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + blocks = [ + orm.InputBlock( + label="Name for this connection", + action=actions.CONFIG_FEDERATION_LABEL_INPUT, + element=orm.PlainTextInputElement( + placeholder="e.g. East Coast SyncBot, Partner Org...", + ), + optional=False, + ), + orm.ContextBlock( + element=orm.ContextElement( + initial_value="Give this connection a friendly name so you can identify it later.", + ), + ), + ] + + view = orm.BlockView(blocks=blocks) + client.views_open( + trigger_id=trigger_id, + view={ + "type": "modal", + "callback_id": actions.CONFIG_FEDERATION_LABEL_SUBMIT, + "title": {"type": "plain_text", "text": "New Connection"}, + "submit": {"type": "plain_text", "text": "Generate Code"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "blocks": view.as_form_field(), + }, + ) + + +def handle_federation_label_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Generate the connection code after the admin provides a label.""" + if not constants.FEDERATION_ENABLED: + return + + team_id = helpers.safe_get(body, "view", "team_id") or helpers.safe_get(body, "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + public_url = federation.get_public_url() + if not public_url: + _logger.warning("federation_no_public_url") + return + + values = helpers.safe_get(body, "view", "state", "values") or {} + label = "" + for block_data in values.values(): + for action_id, action_data in block_data.items(): + if action_id == actions.CONFIG_FEDERATION_LABEL_INPUT: + label = (action_data.get("value") or "").strip() + + encoded, raw_code = federation.generate_federation_code(workspace_record.id, label=label or None) + + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if user_id: + try: + dm = client.conversations_open(users=[user_id]) + dm_channel = helpers.safe_get(dm, "channel", "id") + if dm_channel: + expires_ts = int((datetime.now(UTC) + timedelta(hours=24)).timestamp()) + client.chat_postMessage( + channel=dm_channel, + text=":globe_with_meridians: *Connection Code Generated*" + + (f" — _{label}_" if label else "") + + f"\n\nShare this code with the admin of the other SyncBot instance:\n\n```{encoded}```" + + f"\nThis code expires .", + ) + except Exception as e: + _logger.warning(f"Failed to DM connection code: {e}") + + _logger.info( + "federation_code_generated", + extra={"workspace_id": workspace_record.id, "code": raw_code, "label": label}, + ) + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +def handle_enter_federation_code( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open a modal for the admin to paste a federation code.""" + if not constants.FEDERATION_ENABLED: + return + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + blocks = [ + orm.InputBlock( + label="Paste the connection code from the remote SyncBot instance", + action=actions.CONFIG_FEDERATION_CODE_INPUT, + element=orm.PlainTextInputElement( + placeholder="Paste the full code here...", + multiline=True, + ), + ), + ] + + view = orm.BlockView(blocks=blocks) + client.views_open( + trigger_id=trigger_id, + view={ + "type": "modal", + "callback_id": actions.CONFIG_FEDERATION_CODE_SUBMIT, + "title": {"type": "plain_text", "text": "Enter Connection Code"}, + "submit": {"type": "plain_text", "text": "Connect"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "blocks": view.as_form_field(), + }, + ) + + +def handle_federation_code_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Process a submitted federation code and initiate cross-instance connection.""" + if not constants.FEDERATION_ENABLED: + return + + team_id = helpers.safe_get(body, "view", "team_id") or helpers.safe_get(body, "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + values = helpers.safe_get(body, "view", "state", "values") or {} + code_text = "" + for _block_id, block_data in values.items(): + for action_id, action_data in block_data.items(): + if action_id == actions.CONFIG_FEDERATION_CODE_INPUT: + code_text = (action_data.get("value") or "").strip() + + if not code_text: + _logger.warning("federation_code_submit: empty code") + return + + payload = federation.parse_federation_code(code_text) + if not payload: + _logger.warning("federation_code_submit: invalid code format") + return + + remote_url = payload["webhook_url"] + remote_code = payload["code"] + remote_instance_id = payload["instance_id"] + + result = federation.initiate_federation_connect( + remote_url, + remote_code, + team_id=workspace_record.team_id, + workspace_name=workspace_record.workspace_name or None, + ) + if not result or not result.get("ok"): + _logger.error( + "federation_connect_failed", + extra={"remote_url": remote_url, "result": result}, + ) + return + + remote_public_key = result.get("public_key", "") + + fed_ws = federation.get_or_create_federated_workspace( + instance_id=remote_instance_id, + webhook_url=remote_url, + public_key=remote_public_key, + name=f"Connection {remote_instance_id[:8]}", + ) + + now = datetime.now(UTC) + group = schemas.WorkspaceGroup( + name=f"Federation — {fed_ws.name}", + invite_code=f"FED-{secrets.token_hex(4).upper()}", + status="active", + created_at=now, + created_by_workspace_id=workspace_record.id, + ) + DbManager.create_record(group) + + local_member = schemas.WorkspaceGroupMember( + group_id=group.id, + workspace_id=workspace_record.id, + status="active", + role="creator", + joined_at=now, + ) + DbManager.create_record(local_member) + + fed_member = schemas.WorkspaceGroupMember( + group_id=group.id, + federated_workspace_id=fed_ws.id, + status="active", + role="member", + joined_at=now, + ) + DbManager.create_record(fed_member) + + _logger.info( + "federation_connection_established", + extra={ + "workspace_id": workspace_record.id, + "remote_instance": remote_instance_id, + "federated_workspace_id": fed_ws.id, + "group_id": group.id, + }, + ) + + _exchange_user_directory(fed_ws, workspace_record) + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +def handle_remove_federation_connection( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Remove a federation connection (group membership).""" + action_data = helpers.safe_get(body, "actions", 0) or {} + action_id: str = action_data.get("action_id", "") + member_id_str = action_id.replace(f"{actions.CONFIG_REMOVE_FEDERATION_CONNECTION}_", "") + + try: + member_id = int(member_id_str) + except (TypeError, ValueError): + _logger.warning("remove_federation_connection_invalid_id", extra={"action_id": action_id}) + return + + member = DbManager.get_record(schemas.WorkspaceGroupMember, id=member_id) + if not member: + return + + from datetime import UTC, datetime + now = datetime.now(UTC) + DbManager.update_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == member_id], + { + schemas.WorkspaceGroupMember.status: "inactive", + schemas.WorkspaceGroupMember.deleted_at: now, + }, + ) + + _logger.info("federation_connection_removed", extra={"member_id": member_id}) + + team_id = helpers.safe_get(body, "team", "id") or helpers.safe_get(body, "view", "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) if team_id else None + if workspace_record: + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) diff --git a/syncbot/handlers/group_manage.py b/syncbot/handlers/group_manage.py new file mode 100644 index 0000000..1f279f4 --- /dev/null +++ b/syncbot/handlers/group_manage.py @@ -0,0 +1,210 @@ +"""Group management handlers — leave group with confirmation.""" + +import logging +from logging import Logger + +from slack_sdk.web import WebClient + +import builders +import helpers +from db import DbManager, schemas +from slack import actions, orm + +_logger = logging.getLogger(__name__) + + +def handle_leave_group( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Show a confirmation modal before leaving a workspace group.""" + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "leave_group"}) + return + + action_data = helpers.safe_get(body, "actions", 0) or {} + action_id: str = action_data.get("action_id", "") + group_id_str = action_id.replace(f"{actions.CONFIG_LEAVE_GROUP}_", "") + + try: + group_id = int(group_id_str) + except (TypeError, ValueError): + _logger.warning("leave_group_invalid_id", extra={"action_id": action_id}) + return + + groups = DbManager.find_records(schemas.WorkspaceGroup, [schemas.WorkspaceGroup.id == group_id]) + if not groups: + return + group = groups[0] + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + confirm_form = orm.BlockView( + blocks=[ + orm.SectionBlock( + label=( + f":warning: *Are you sure you want to leave the group \"{group.name}\"?*\n\n" + "This will:\n" + "\u2022 Stop all channel syncs you have in this group\n" + "\u2022 Remove your synced message history from this group\n" + "\u2022 Remove your user mappings for this group\n\n" + "_Other members will continue syncing uninterrupted._" + ), + ), + ] + ) + + confirm_form.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_LEAVE_GROUP_CONFIRM, + title_text="Leave Group", + submit_button_text="Leave", + close_button_text="Cancel", + parent_metadata={"group_id": group_id}, + ) + + +def handle_leave_group_confirm( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Execute group departure after confirmation. + + - Soft-deletes the membership record + - Removes this workspace's SyncChannels (and their PostMeta) for group syncs + - Leaves all affected Slack channels + - Cleans up syncs this workspace published (if all subscribers are gone) + - Removes user mappings scoped to this group + - Notifies remaining group members + """ + from handlers._common import _parse_private_metadata + + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "leave_group_confirm"}) + return + + meta = _parse_private_metadata(body) + group_id = meta.get("group_id") + if not group_id: + _logger.warning("leave_group_confirm: missing group_id in metadata") + return + + team_id = helpers.safe_get(body, "view", "team_id") + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + groups = DbManager.find_records(schemas.WorkspaceGroup, [schemas.WorkspaceGroup.id == group_id]) + if not groups: + return + group = groups[0] + + members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.workspace_id == workspace_record.id, + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + if not members: + _logger.warning("leave_group_confirm: not a member", extra={"group_id": group_id}) + return + + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + _, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) + + syncs_in_group = DbManager.find_records(schemas.Sync, [schemas.Sync.group_id == group_id]) + + for sync in syncs_in_group: + my_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync.id, + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + for ch in my_channels: + DbManager.delete_records(schemas.PostMeta, [schemas.PostMeta.sync_channel_id == ch.id]) + DbManager.delete_records(schemas.SyncChannel, [schemas.SyncChannel.id == ch.id]) + try: + client.conversations_leave(channel=ch.channel_id) + except Exception as e: + _logger.warning(f"Failed to leave channel {ch.channel_id}: {e}") + + if sync.publisher_workspace_id == workspace_record.id: + remaining = DbManager.find_records( + schemas.SyncChannel, + [schemas.SyncChannel.sync_id == sync.id, schemas.SyncChannel.deleted_at.is_(None)], + ) + if not remaining: + DbManager.delete_records(schemas.Sync, [schemas.Sync.id == sync.id]) + + DbManager.delete_records( + schemas.UserMapping, + [ + schemas.UserMapping.group_id == group_id, + ( + (schemas.UserMapping.source_workspace_id == workspace_record.id) + | (schemas.UserMapping.target_workspace_id == workspace_record.id) + ), + ], + ) + + from datetime import UTC, datetime + now = datetime.now(UTC) + for member in members: + DbManager.update_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == member.id], + { + schemas.WorkspaceGroupMember.status: "inactive", + schemas.WorkspaceGroupMember.deleted_at: now, + }, + ) + + _logger.info( + "group_left", + extra={"workspace_id": workspace_record.id, "group_id": group_id, "group_name": group.name}, + ) + + remaining_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + + if not remaining_members: + DbManager.delete_records(schemas.WorkspaceGroup, [schemas.WorkspaceGroup.id == group_id]) + _logger.info("group_deleted_empty", extra={"group_id": group_id}) + else: + for member in remaining_members: + if not member.workspace_id: + continue + member_ws = helpers.get_workspace_by_id(member.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at: + continue + try: + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) + helpers.notify_admins_dm( + member_client, + f":wave: *{admin_label}* left the group *{group.name}*.", + ) + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) + except Exception as e: + _logger.warning(f"Failed to notify group member {member.workspace_id}: {e}") + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) diff --git a/syncbot/handlers/groups.py b/syncbot/handlers/groups.py new file mode 100644 index 0000000..c9b2dca --- /dev/null +++ b/syncbot/handlers/groups.py @@ -0,0 +1,789 @@ +"""Workspace group handlers — create, join, accept, cancel.""" + +import contextlib +import logging +import secrets +import string +from datetime import UTC, datetime +from logging import Logger + +from slack_sdk.web import WebClient + +import builders +import helpers +from db import DbManager, schemas +from handlers._common import ( + _get_authorized_workspace, + _get_selected_option_value, + _get_text_input_value, + _parse_private_metadata, +) +from slack import actions, forms, orm +from slack.blocks import context as block_context +from slack.blocks import divider, section + +_logger = logging.getLogger(__name__) + +_INVITE_CODE_CHARS = string.ascii_uppercase + string.digits + + +def _generate_invite_code(length: int = 7) -> str: + """Generate a random alphanumeric invite code like ``A7X-K9M``.""" + raw = "".join(secrets.choice(_INVITE_CODE_CHARS) for _ in range(length)) + return f"{raw[:3]}-{raw[3:]}" if length >= 6 else raw + + +def _activate_group_membership( + client: WebClient, + workspace_record: "schemas.Workspace", + group: "schemas.WorkspaceGroup", +) -> None: + """Refresh user directories and seed mappings for all existing group members.""" + try: + helpers._refresh_user_directory(client, workspace_record.id) + except Exception as e: + _logger.warning(f"Failed to refresh user directory for workspace {workspace_record.id}: {e}") + + members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.workspace_id != workspace_record.id, + ], + ) + + member_clients: list[tuple[WebClient, int]] = [] + + for member in members: + if not member.workspace_id: + continue + member_ws = helpers.get_workspace_by_id(member.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at: + continue + + try: + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) + helpers._refresh_user_directory(member_client, member_ws.id) + member_clients.append((member_client, member_ws.id)) + except Exception as e: + _logger.warning(f"Failed to refresh user directory for workspace {member_ws.id}: {e}") + + try: + helpers.seed_user_mappings(workspace_record.id, member_ws.id, group_id=group.id) + helpers.seed_user_mappings(member_ws.id, workspace_record.id, group_id=group.id) + except Exception as e: + _logger.warning(f"Failed to seed user mappings: {e}") + + try: + helpers.run_auto_match_for_workspace(client, workspace_record.id) + except Exception as e: + _logger.warning(f"Auto-match failed for workspace {workspace_record.id}: {e}") + + for member_client, member_ws_id in member_clients: + try: + helpers.run_auto_match_for_workspace(member_client, member_ws_id) + except Exception as e: + _logger.warning(f"Auto-match failed for member workspace {member_ws_id}: {e}") + + +def handle_create_group( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open a modal for naming a new workspace group.""" + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "create_group"}) + return + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + view = orm.BlockView( + blocks=[ + orm.InputBlock( + label="Workspace Group Name", + action=actions.CONFIG_CREATE_GROUP_NAME, + element=orm.PlainTextInputElement(placeholder="e.g. Slack Syndicate, The Multiverse..."), + optional=False, + ), + orm.ContextBlock( + element=orm.ContextElement( + initial_value="_Give this Workspace Group a friendly and descriptive name._", + ), + ), + ] + ) + + view.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_CREATE_GROUP_SUBMIT, + title_text="Create Group", + submit_button_text="Create Group", + ) + + +def handle_create_group_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Create the workspace group and add this workspace as the creator.""" + auth_result = _get_authorized_workspace(body, client, context, "create_group_submit") + if not auth_result: + return + user_id, workspace_record = auth_result + + group_name = (_get_text_input_value(body, actions.CONFIG_CREATE_GROUP_NAME) or "").strip() + + if not group_name: + _logger.warning("create_group_submit: empty group name") + return + + if len(group_name) > 100: + group_name = group_name[:100] + + code = _generate_invite_code() + now = datetime.now(UTC) + + group = schemas.WorkspaceGroup( + name=group_name, + invite_code=code, + status="active", + created_at=now, + created_by_workspace_id=workspace_record.id, + ) + DbManager.create_record(group) + + member = schemas.WorkspaceGroupMember( + group_id=group.id, + workspace_id=workspace_record.id, + status="active", + role="creator", + joined_at=now, + ) + DbManager.create_record(member) + + _logger.info( + "group_created", + extra={ + "workspace_id": workspace_record.id, + "group_id": group.id, + "group_name": group_name, + "invite_code": code, + }, + ) + + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + if acting_user_id: + try: + dm = client.conversations_open(users=[acting_user_id]) + dm_channel = helpers.safe_get(dm, "channel", "id") + if dm_channel: + client.chat_postMessage( + channel=dm_channel, + text=f":raised_hands: *New Workspace Group Created!*\n\n*Group Name:* `{group_name}`\n\n*Invite Code:* `{code}`\n\n" + "You can share the Invite Code with an Admin from another Workspace and they can join the Group.", + ) + except Exception as e: + _logger.warning(f"Failed to DM invite code: {e}") + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +def handle_join_group( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open a modal for entering a group invite code.""" + import copy + + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "join_group"}) + return + + trigger_id = helpers.safe_get(body, "trigger_id") + enter_form = copy.deepcopy(forms.ENTER_GROUP_CODE_FORM) + enter_form.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_JOIN_GROUP_SUBMIT, + title_text="Join Group", + new_or_add="new", + ) + + +def handle_join_group_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Validate an invite code and join the workspace group.""" + auth_result = _get_authorized_workspace(body, client, context, "join_group_submit") + if not auth_result: + return + user_id, workspace_record = auth_result + + form_data = forms.ENTER_GROUP_CODE_FORM.get_selected_values(body) + raw_code = (helpers.safe_get(form_data, actions.CONFIG_JOIN_GROUP_CODE) or "").strip().upper() + + if "-" not in raw_code and len(raw_code) >= 6: + raw_code = f"{raw_code[:3]}-{raw_code[3:]}" + + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + + rate_key = f"group_join_attempts:{workspace_record.id}" + attempts = helpers._cache_get(rate_key) or 0 + if attempts >= 5: + _logger.warning("group_join_rate_limited", extra={"workspace_id": workspace_record.id}) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + return + + groups = DbManager.find_records( + schemas.WorkspaceGroup, + [ + schemas.WorkspaceGroup.invite_code == raw_code, + schemas.WorkspaceGroup.status == "active", + ], + ) + + if not groups: + helpers._cache_set(rate_key, attempts + 1, ttl=900) + _logger.warning( + "group_code_invalid", + extra={ + "workspace_id": workspace_record.id, + "attempt": attempts + 1, + "code_length": len(raw_code), + }, + ) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + return + + group = groups[0] + + if group.created_by_workspace_id == workspace_record.id: + _logger.warning("group_self_join", extra={"workspace_id": workspace_record.id}) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + return + + existing = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group.id, + schemas.WorkspaceGroupMember.workspace_id == workspace_record.id, + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + if existing: + _logger.info("group_already_member", extra={"workspace_id": workspace_record.id, "group_id": group.id}) + builders.build_home_tab(body, client, logger, context, user_id=acting_user_id) + return + + now = datetime.now(UTC) + member = schemas.WorkspaceGroupMember( + group_id=group.id, + workspace_id=workspace_record.id, + status="active", + role="member", + joined_at=now, + ) + DbManager.create_record(member) + + _logger.info( + "group_joined", + extra={ + "workspace_id": workspace_record.id, + "group_id": group.id, + "group_name": group.name, + }, + ) + + _activate_group_membership(client, workspace_record, group) + + _, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) + + other_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.workspace_id != workspace_record.id, + ], + ) + for other_member in other_members: + if not other_member.workspace_id: + continue + member_ws = helpers.get_workspace_by_id(other_member.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at: + continue + try: + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) + helpers.notify_admins_dm( + member_client, + f":punch: *{admin_label}* joined the Workspace Group called *{group.name}*.", + ) + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) + except Exception as e: + _logger.warning(f"Failed to notify group member {other_member.workspace_id}: {e}") + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +# --------------------------------------------------------------------------- +# Invite workspace to group +# --------------------------------------------------------------------------- + + +def handle_invite_workspace( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open a modal for inviting a workspace to a group.""" + import constants + + auth_result = _get_authorized_workspace(body, client, context, "invite_workspace") + if not auth_result: + return + _, workspace_record = auth_result + + trigger_id = helpers.safe_get(body, "trigger_id") + raw_group_id = helpers.safe_get(body, "actions", 0, "value") + try: + group_id = int(raw_group_id) + except (TypeError, ValueError): + _logger.warning(f"invite_workspace: invalid group_id: {raw_group_id!r}") + return + + group = DbManager.get_record(schemas.WorkspaceGroup, id=group_id) + if not group: + return + + current_workspace_id = workspace_record.id if workspace_record else None + + # Only active members count as "already in the group"; pending invites can be re-invited + current_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + member_ws_ids = {member.workspace_id for member in current_members if member.workspace_id} + + all_workspaces = DbManager.find_records( + schemas.Workspace, + [schemas.Workspace.deleted_at.is_(None)], + ) + eligible = [ws for ws in all_workspaces if ws.id not in member_ws_ids and ws.bot_token] + + # Show Oops only when there are no other installed workspaces at all (not when everyone is already in the group) + other_installed = [ws for ws in all_workspaces if ws.bot_token and ws.id != current_workspace_id] + if not other_installed and not constants.FEDERATION_ENABLED: + msg_blocks = [ + section( + "At least one other Slack Workspace needs to install this SyncBot app, or " + "External Connections need to be allowed, before you can invite another Workspace to this Group." + ), + ] + orm.BlockView(blocks=msg_blocks).post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_INVITE_WORKSPACE_SUBMIT, + title_text="Oops!", + submit_button_text=None, + new_or_add="new", + ) + return + + modal_blocks: list = [] + + if eligible: + workspace_options = [ + orm.SelectorOption( + name=helpers.resolve_workspace_name(workspace), + value=str(workspace.id), + ) + for workspace in eligible + ] + modal_blocks.append( + orm.InputBlock( + label="Send a SyncBot DM", + action=actions.CONFIG_INVITE_WORKSPACE_SELECT, + element=orm.StaticSelectElement( + placeholder="Select a Workspace", + options=workspace_options, + ), + optional=True, + ) + ) + modal_blocks.append( + block_context( + "A SyncBot DM will be sent to Admins in the other Workspace.", + ) + ) + + modal_blocks.append(block_context("\u200b")) + modal_blocks.append(divider()) + modal_blocks.append(section(":memo: *Invite Code*")) + modal_blocks.append( + block_context( + f"Alternatively, share this Invite Code with an Admin from another Workspace:\n\n`{group.invite_code}`" + ) + ) + + if constants.FEDERATION_ENABLED: + modal_blocks.append(divider()) + modal_blocks.append(section(":globe_with_meridians: *External Workspace*")) + modal_blocks.append( + block_context( + "For Workspaces running their own external SyncBot instance, " + f"share this Invite Code for them to join:\n\n`{group.invite_code}`" + ) + ) + + submit_text = "Send Invite" if eligible else None + view = orm.BlockView(blocks=modal_blocks) + view.post_modal( + client=client, + trigger_id=trigger_id, + callback_id=actions.CONFIG_INVITE_WORKSPACE_SUBMIT, + title_text="Invite Workspace", + submit_button_text=submit_text, + parent_metadata={"group_id": group_id}, + new_or_add="new", + ) + + +def handle_invite_workspace_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Send a DM invite to admins of the selected workspace.""" + auth_result = _get_authorized_workspace(body, client, context, "invite_workspace_submit") + if not auth_result: + return + user_id, workspace_record = auth_result + meta = _parse_private_metadata(body) + group_id = meta.get("group_id") + if not group_id: + return + + group = DbManager.get_record(schemas.WorkspaceGroup, id=group_id) + if not group: + return + + selected_ws_id = _get_selected_option_value(body, actions.CONFIG_INVITE_WORKSPACE_SELECT) + + if not selected_ws_id: + return + + try: + target_ws_id = int(selected_ws_id) + except (TypeError, ValueError): + return + + target_ws = helpers.get_workspace_by_id(target_ws_id) + if not target_ws or not target_ws.bot_token or target_ws.deleted_at: + _logger.warning(f"invite_workspace_submit: target workspace {target_ws_id} not available") + return + + existing = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.workspace_id == target_ws_id, + ], + ) + if existing: + _logger.info(f"invite_workspace_submit: workspace {target_ws_id} already in group {group_id}") + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + return + + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + member = schemas.WorkspaceGroupMember( + group_id=group_id, + workspace_id=target_ws_id, + status="pending", + role="member", + joined_at=None, + invited_by_slack_user_id=acting_user_id, + invited_by_workspace_id=workspace_record.id, + ) + DbManager.create_record(member) + + _, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) + + target_client = WebClient(token=helpers.decrypt_bot_token(target_ws.bot_token)) + + invite_blocks = [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": f":punch: *{admin_label}* has invited your Workspace to join a SyncBot Group!\n\n*Group Name:* `{group.name}`", + }, + }, + { + "type": "actions", + "elements": [ + { + "type": "button", + "text": {"type": "plain_text", "text": "Accept"}, + "style": "primary", + "action_id": f"{actions.CONFIG_ACCEPT_GROUP_REQUEST}_{member.id}", + "value": str(member.id), + }, + { + "type": "button", + "text": {"type": "plain_text", "text": "Decline"}, + "style": "danger", + "action_id": f"{actions.CONFIG_DECLINE_GROUP_REQUEST}_{member.id}", + "value": str(member.id), + }, + ], + }, + ] + + dm_entries = helpers.notify_admins_dm_blocks( + target_client, + f"{admin_label} has invited your Workspace to join a SyncBot Group!\n\n*Group Name:* `{group.name}`", + invite_blocks, + ) + helpers.save_dm_messages_to_group_member(member.id, dm_entries) + + _logger.info( + "group_invite_sent", + extra={ + "group_id": group_id, + "target_workspace_id": target_ws_id, + "member_id": member.id, + }, + ) + + builders.refresh_home_tab_for_workspace(target_ws, logger, context=None) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +# --------------------------------------------------------------------------- +# Accept / Decline group invite +# --------------------------------------------------------------------------- + + +def handle_accept_group_invite( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Accept a pending group invite from a DM button.""" + raw_member_id = helpers.safe_get(body, "actions", 0, "value") + try: + member_id = int(raw_member_id) + except (TypeError, ValueError): + _logger.warning(f"accept_group_invite: invalid member_id: {raw_member_id!r}") + return + + member = DbManager.get_record(schemas.WorkspaceGroupMember, id=member_id) + if not member or member.status != "pending": + _logger.info(f"accept_group_invite: member {member_id} not pending") + return + + group = DbManager.get_record(schemas.WorkspaceGroup, id=member.group_id) + if not group: + return + + workspace_record = helpers.get_workspace_by_id(member.workspace_id) + if not workspace_record: + return + + now = datetime.now(UTC) + DbManager.update_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == member_id], + { + schemas.WorkspaceGroupMember.status: "active", + schemas.WorkspaceGroupMember.joined_at: now, + }, + ) + + _activate_group_membership(client, workspace_record, group) + + _update_invite_dms( + member, + workspace_record, + f"Your Workspace has joined the SyncBot Group called *{group.name}*.", + ) + + other_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.workspace_id != workspace_record.id, + ], + ) + ws_name = helpers.resolve_workspace_name(workspace_record) + for other_member in other_members: + if not other_member.workspace_id: + continue + member_ws = helpers.get_workspace_by_id(other_member.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at: + continue + try: + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) + helpers.notify_admins_dm( + member_client, + f":punch: *{ws_name}* has joined the Workspace Group called *{group.name}*.", + ) + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) + except Exception as e: + _logger.warning(f"Failed to notify group member {other_member.workspace_id}: {e}") + + _logger.info( + "group_invite_accepted", + extra={ + "member_id": member_id, + "group_id": group.id, + "workspace_id": workspace_record.id, + }, + ) + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +def handle_decline_group_invite( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle Decline (invited workspace) or Cancel Invite (inviting workspace) for a pending group invite.""" + raw_member_id = helpers.safe_get(body, "actions", 0, "value") + try: + member_id = int(raw_member_id) + except (TypeError, ValueError): + _logger.warning(f"decline_group_invite: invalid member_id: {raw_member_id!r}") + return + + member = DbManager.get_record(schemas.WorkspaceGroupMember, id=member_id) + if not member or member.status != "pending": + _logger.info(f"decline_group_invite: member {member_id} not pending") + return + + group = DbManager.get_record(schemas.WorkspaceGroup, id=member.group_id) + group_name = group.name if group else "the group" + + action_id = helpers.safe_get(body, "actions", 0, "action_id") or "" + is_cancel = action_id.startswith(actions.CONFIG_CANCEL_GROUP_REQUEST) + outcome = "canceled" if is_cancel else "declined" + + target_ws = helpers.get_workspace_by_id(member.workspace_id) if member.workspace_id else None + + _update_invite_dms( + member, + target_ws, + f":x: The invitation to join *{group_name}* was {outcome}.", + ) + + group_id = member.group_id + + DbManager.delete_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == member_id], + ) + + _logger.info( + "group_invite_declined", + extra={"member_id": member_id, "group_id": group_id}, + ) + + all_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + for member in all_members: + if not member.workspace_id: + continue + member_ws = helpers.get_workspace_by_id(member.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at: + continue + with contextlib.suppress(Exception): + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) + + if target_ws and target_ws.bot_token and not target_ws.deleted_at: + with contextlib.suppress(Exception): + builders.refresh_home_tab_for_workspace(target_ws, logger, context=None) + + +def _update_invite_dms( + member: schemas.WorkspaceGroupMember, + workspace: schemas.Workspace | None, + new_text: str, +) -> None: + """Replace the original invite DM content with an updated message so the invite + is removed and replaced by the success message (e.g. workspace joined the group). + """ + import json as _json + + if not member.dm_messages: + _logger.debug("_update_invite_dms: no dm_messages on member %s", member.id) + return + if not workspace or not workspace.bot_token: + return + + try: + entries = _json.loads(member.dm_messages) + except (ValueError, TypeError): + _logger.warning("_update_invite_dms: invalid dm_messages JSON for member %s", member.id) + return + + if not entries: + return + + ws_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + blocks = [{"type": "section", "text": {"type": "mrkdwn", "text": new_text}}] + for entry in entries: + channel_id = entry.get("channel") + message_ts = entry.get("ts") + if not channel_id or message_ts is None: + continue + message_ts_str = str(message_ts).strip() + if not message_ts_str: + continue + try: + ws_client.chat_update( + channel=channel_id, + ts=message_ts_str, + text=new_text, + blocks=blocks, + ) + except Exception as e: + _logger.warning( + "_update_invite_dms: failed to update DM channel=%s ts=%s: %s", + channel_id, + message_ts_str, + e, + ) diff --git a/syncbot/handlers/messages.py b/syncbot/handlers/messages.py new file mode 100644 index 0000000..00aca22 --- /dev/null +++ b/syncbot/handlers/messages.py @@ -0,0 +1,841 @@ +"""Message sync handlers — new posts, replies, edits, deletes, reactions.""" + +import logging +import uuid +from logging import Logger + +from slack_sdk.web import WebClient + +import federation +import helpers +from db import DbManager, schemas +from handlers._common import EventContext +from logger import emit_metric +from slack import orm + + +def _find_source_workspace_id(records: list[tuple], channel_id: str, ws_index: int = 1) -> int | None: + """Return the workspace ID from the record whose channel matches *channel_id*.""" + for record in records: + sync_channel = record[ws_index - 1] if ws_index > 1 else record[0] + workspace = record[ws_index] + if sync_channel.channel_id == channel_id: + return workspace.id + return None + +_logger = logging.getLogger(__name__) + + +def _shared_by_file_initial_comment( + *, + user_id: str, + source_workspace_id: int, + target_workspace_id: int, + name_for_target: str, + target_client: WebClient, + channel_id: str, + text_message_ts: str | None, +) -> str: + """Build ``initial_comment`` for ``files_upload_v2`` (mention + optional permalink to text).""" + mapped_id = helpers.get_mapped_target_user_id(user_id or "", source_workspace_id or 0, target_workspace_id) + user_ref = f"<@{mapped_id}>" if mapped_id else name_for_target + if not text_message_ts: + return f"Shared by {user_ref}" + permalink = None + try: + plink_resp = target_client.chat_getPermalink(channel=channel_id, message_ts=text_message_ts) + permalink = helpers.safe_get(plink_resp, "permalink") + except Exception: + pass + if permalink: + return f"Shared by {user_ref} in <{permalink}|this message>" + return f"Shared by {user_ref}" + + +def _parse_event_fields(body: dict, client: WebClient) -> EventContext: + """Extract the common fields every message handler needs.""" + event: dict = body.get("event", {}) + msg_text: str = helpers.safe_get(event, "text") or helpers.safe_get(event, "message", "text") + msg_text = msg_text if msg_text else " " + + return EventContext( + team_id=helpers.safe_get(body, "team_id"), + channel_id=helpers.safe_get(event, "channel"), + user_id=(helpers.safe_get(event, "user") or helpers.safe_get(event, "message", "user")), + msg_text=msg_text, + mentioned_users=helpers.parse_mentioned_users(msg_text, client), + thread_ts=helpers.safe_get(event, "thread_ts"), + ts=( + helpers.safe_get(event, "message", "ts") + or helpers.safe_get(event, "previous_message", "ts") + or helpers.safe_get(event, "ts") + ), + event_subtype=helpers.safe_get(event, "subtype"), + ) + + +def _build_file_context(body: dict, client: WebClient, logger: Logger) -> tuple[list[dict], list[dict], list[dict]]: + """Process files attached to a message event. + + Returns ``(photo_list, photo_blocks, direct_files)`` where: + + * *photo_list* — always [] (kept for cleanup API; no S3). + * *photo_blocks* — Slack Block Kit ``image`` blocks for inline images + (e.g. GIF picker URLs), ready for ``chat.postMessage``. + * *direct_files* — files downloaded to ``/tmp`` for direct upload to + each target channel via ``files_upload_v2``. + """ + event = body.get("event", {}) + files = (helpers.safe_get(event, "files") or helpers.safe_get(event, "message", "files") or [])[:20] + event_subtype = helpers.safe_get(event, "subtype") + + images = [f for f in files if f.get("mimetype", "").startswith("image")] + videos = [f for f in files if f.get("mimetype", "").startswith("video")] + + photo_blocks: list[dict] = [] + direct_files: list[dict] = [] + + is_edit = event_subtype in ("message_changed", "message_deleted") + + if not is_edit: + direct_files = helpers.download_slack_files(images + videos, client, logger) + + # Handle GIFs/images from attachments (e.g. GIPHY bot, Slack GIF picker, + # unfurled URLs) when no file attachments are present. We always use + # image blocks for these since the URLs are publicly accessible — this + # avoids a download/re-upload round-trip and gives us a proper message + # ts for PostMeta so reactions work correctly. + if not files and not is_edit: + attachments = event.get("attachments") or helpers.safe_get(event, "message", "attachments") or [] + for att in attachments: + img_url = att.get("image_url") or att.get("thumb_url") + + # Slack's built-in GIF picker nests the image inside blocks + if not img_url: + for blk in att.get("blocks") or []: + if blk.get("type") == "image" and blk.get("image_url"): + img_url = blk["image_url"] + break + + # Also check top-level event blocks for image blocks + if not img_url: + for blk in event.get("blocks") or []: + if blk.get("type") == "image" and blk.get("image_url"): + img_url = blk["image_url"] + break + + if not img_url: + _logger.info( + "attachment_no_image_url", extra={"att_keys": list(att.keys()), "fallback": att.get("fallback")} + ) + continue + + name = att.get("fallback") or "attachment.gif" + photo_blocks.append(orm.ImageBlock(image_url=img_url, alt_text=name).as_form_field()) + + return [], photo_blocks, direct_files + + +def _get_workspace_name(records: list, channel_id: str, workspace_index: int) -> str | None: + """Pull the workspace name for the originating channel from a record list.""" + return helpers.safe_get( + [r[workspace_index].workspace_name for r in records if r[workspace_index - 1].channel_id == channel_id], + 0, + ) + + +def _handle_new_post( + body: dict, + client: WebClient, + logger: Logger, + ctx: EventContext, + photo_list: list[dict], + photo_blocks: list[dict], + direct_files: list[dict] | None = None, +) -> None: + """Sync a brand-new top-level message to all linked channels.""" + team_id = ctx["team_id"] + channel_id = ctx["channel_id"] + msg_text = ctx["msg_text"] + mentioned_users = ctx["mentioned_users"] + user_id = ctx["user_id"] + + sync_records = helpers.get_sync_list(team_id, channel_id) + if not sync_records: + any_sync_channel = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + if any_sync_channel: + return + if user_id: + try: + client.chat_postMessage( + channel=channel_id, + text=":wave: Hello! I'm SyncBot. I was added to this Channel, but this Channel " + "doesn't seem to be part of a Sync. I'm leaving now. Please open the SyncBot Home " + "tab to configure me.", + ) + client.conversations_leave(channel=channel_id) + except Exception as e: + logger.error(f"Failed to notify and leave unconfigured channel {channel_id}: {e}") + return + + if user_id: + user_name, user_profile_url = helpers.get_user_info(client, user_id) + else: + user_name, user_profile_url = helpers.get_bot_info_from_event(body) + + workspace_name = _get_workspace_name(sync_records, channel_id, workspace_index=1) + + post_uuid = uuid.uuid4().hex + post_list: list[schemas.PostMeta] = [] + channels_synced = 0 + + source_workspace_id = _find_source_workspace_id(sync_records, channel_id) + + fed_ws = None + if sync_records: + fed_ws = helpers.get_federated_workspace_for_sync(sync_records[0][0].sync_id) + + source_ws_fed = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + fed_adapted_text = helpers.resolve_channel_references(msg_text, client, source_ws_fed) + + for sync_channel, workspace in sync_records: + try: + split_file_ts: str | None = None + if sync_channel.channel_id == channel_id: + ts = helpers.safe_get(body, "event", "ts") + elif fed_ws and workspace.id != source_workspace_id: + image_payloads = [] + for block in photo_blocks or []: + if block.get("type") == "image": + image_payloads.append( + { + "url": block.get("image_url", ""), + "alt_text": block.get("alt_text", "Shared image"), + } + ) + payload = federation.build_message_payload( + sync_id=sync_channel.sync_id, + post_id=post_uuid, + channel_id=sync_channel.channel_id, + user_name=user_name, + user_avatar_url=user_profile_url, + workspace_name=workspace_name, + text=fed_adapted_text, + images=image_payloads, + timestamp=helpers.safe_get(body, "event", "ts"), + ) + result = federation.push_message(fed_ws, payload) + ts = helpers.safe_get(result, "ts") if result else helpers.safe_get(body, "event", "ts") + if not ts: + ts = helpers.safe_get(body, "event", "ts") + else: + bot_token = helpers.decrypt_bot_token(workspace.bot_token) + target_client = WebClient(token=bot_token) + adapted_text = helpers.apply_mentioned_users( + msg_text, + client, + target_client, + mentioned_users, + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + ) + source_ws = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + adapted_text = helpers.resolve_channel_references( + adapted_text, client, source_ws, target_workspace_id=workspace.id + ) + + target_display_name, target_icon_url = helpers.get_display_name_and_icon_for_synced_message( + user_id or "", + source_workspace_id or 0, + user_name, + user_profile_url, + target_client, + workspace.id, + ) + name_for_target = target_display_name or user_name or "Someone" + + if direct_files and not msg_text.strip(): + file_comment = _shared_by_file_initial_comment( + user_id=user_id or "", + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + name_for_target=name_for_target, + target_client=target_client, + channel_id=sync_channel.channel_id, + text_message_ts=None, + ) + _, file_ts = helpers.upload_files_to_slack( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + files=direct_files, + initial_comment=file_comment, + ) + ts = file_ts or helpers.safe_get(body, "event", "ts") + else: + res = helpers.post_message( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + msg_text=adapted_text, + user_name=name_for_target, + user_profile_url=target_icon_url or user_profile_url, + workspace_name=workspace_name, + blocks=photo_blocks, + ) + ts = helpers.safe_get(res, "ts") or helpers.safe_get(body, "event", "ts") + + if direct_files: + text_ts = str(ts) if ts else None + file_comment = _shared_by_file_initial_comment( + user_id=user_id or "", + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + name_for_target=name_for_target, + target_client=target_client, + channel_id=sync_channel.channel_id, + text_message_ts=text_ts, + ) + _, split_file_ts = helpers.upload_files_to_slack( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + files=direct_files, + thread_ts=ts, + initial_comment=file_comment, + ) + + if ts: + post_list.append(schemas.PostMeta(post_id=post_uuid, sync_channel_id=sync_channel.id, ts=float(ts))) + if split_file_ts: + post_list.append( + schemas.PostMeta(post_id=post_uuid, sync_channel_id=sync_channel.id, ts=float(split_file_ts)) + ) + if ts or split_file_ts: + channels_synced += 1 + except Exception as exc: + _logger.error(f"Failed to sync new post to channel {sync_channel.channel_id}: {exc}") + + synced = channels_synced + failed = len(sync_records) - synced + emit_metric("messages_synced", value=synced, sync_type="new_post") + if failed: + emit_metric("sync_failures", value=failed, sync_type="new_post") + + helpers.cleanup_temp_files(photo_list, direct_files) + + if post_list: + DbManager.create_records(post_list) + + +def _handle_thread_reply( + body: dict, + client: WebClient, + logger: Logger, + ctx: EventContext, + photo_blocks: list[dict], + direct_files: list[dict] | None = None, +) -> None: + """Sync a threaded reply to all linked channels.""" + channel_id = ctx["channel_id"] + msg_text = ctx["msg_text"] + mentioned_users = ctx["mentioned_users"] + user_id = ctx["user_id"] + thread_ts = ctx["thread_ts"] + + post_records = helpers.get_post_records(thread_ts) + if not post_records: + return + + workspace_name = _get_workspace_name(post_records, channel_id, workspace_index=2) + + if user_id: + user_name, user_profile_url = helpers.get_user_info(client, user_id) + else: + user_name, user_profile_url = helpers.get_bot_info_from_event(body) + + post_uuid = uuid.uuid4().hex + post_list: list[schemas.PostMeta] = [] + channels_synced = 0 + + source_workspace_id = _find_source_workspace_id(post_records, channel_id, ws_index=2) + + fed_ws = None + if post_records: + fed_ws = helpers.get_federated_workspace_for_sync(post_records[0][1].sync_id) + + thread_post_id = post_records[0][0].post_id if post_records else None + + source_ws_fed = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + fed_adapted_text = helpers.resolve_channel_references(msg_text, client, source_ws_fed) + + for post_meta, sync_channel, workspace in post_records: + try: + split_file_ts: str | None = None + if sync_channel.channel_id == channel_id: + ts = helpers.safe_get(body, "event", "ts") + elif fed_ws and workspace.id != source_workspace_id: + payload = federation.build_message_payload( + sync_id=sync_channel.sync_id, + post_id=post_uuid, + channel_id=sync_channel.channel_id, + user_name=user_name, + user_avatar_url=user_profile_url, + workspace_name=workspace_name, + text=fed_adapted_text, + thread_post_id=str(thread_post_id) if thread_post_id else None, + timestamp=helpers.safe_get(body, "event", "ts"), + ) + result = federation.push_message(fed_ws, payload) + ts = helpers.safe_get(result, "ts") if result else helpers.safe_get(body, "event", "ts") + if not ts: + ts = helpers.safe_get(body, "event", "ts") + else: + bot_token = helpers.decrypt_bot_token(workspace.bot_token) + target_client = WebClient(token=bot_token) + adapted_text = helpers.apply_mentioned_users( + msg_text, + client, + target_client, + mentioned_users, + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + ) + source_ws = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + adapted_text = helpers.resolve_channel_references( + adapted_text, client, source_ws, target_workspace_id=workspace.id + ) + parent_ts = f"{post_meta.ts:.6f}" + + target_display_name, target_icon_url = helpers.get_display_name_and_icon_for_synced_message( + user_id or "", + source_workspace_id or 0, + user_name, + user_profile_url, + target_client, + workspace.id, + ) + name_for_target = target_display_name or user_name or "Someone" + + if direct_files and not msg_text.strip(): + file_comment = _shared_by_file_initial_comment( + user_id=user_id or "", + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + name_for_target=name_for_target, + target_client=target_client, + channel_id=sync_channel.channel_id, + text_message_ts=None, + ) + _, file_ts = helpers.upload_files_to_slack( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + files=direct_files, + initial_comment=file_comment, + thread_ts=parent_ts, + ) + ts = file_ts or helpers.safe_get(body, "event", "ts") + else: + res = helpers.post_message( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + msg_text=adapted_text, + user_name=name_for_target, + user_profile_url=target_icon_url or user_profile_url, + thread_ts=parent_ts, + workspace_name=workspace_name, + blocks=photo_blocks, + ) + ts = helpers.safe_get(res, "ts") + + if direct_files: + text_ts = str(ts) if ts else None + file_comment = _shared_by_file_initial_comment( + user_id=user_id or "", + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + name_for_target=name_for_target, + target_client=target_client, + channel_id=sync_channel.channel_id, + text_message_ts=text_ts, + ) + _, split_file_ts = helpers.upload_files_to_slack( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + files=direct_files, + thread_ts=parent_ts, + initial_comment=file_comment, + ) + + if ts: + post_list.append(schemas.PostMeta(post_id=post_uuid, sync_channel_id=sync_channel.id, ts=float(ts))) + if split_file_ts: + post_list.append( + schemas.PostMeta(post_id=post_uuid, sync_channel_id=sync_channel.id, ts=float(split_file_ts)) + ) + if ts or split_file_ts: + channels_synced += 1 + except Exception as exc: + _logger.error(f"Failed to sync thread reply to channel {sync_channel.channel_id}: {exc}") + + synced = channels_synced + failed = len(post_records) - synced + emit_metric("messages_synced", value=synced, sync_type="thread_reply") + if failed: + emit_metric("sync_failures", value=failed, sync_type="thread_reply") + + helpers.cleanup_temp_files(None, direct_files) + + if post_list: + DbManager.create_records(post_list) + + +def _handle_message_edit( + client: WebClient, + logger: Logger, + ctx: EventContext, + photo_blocks: list[dict], +) -> None: + """Propagate an edited message to all linked channels.""" + channel_id = ctx["channel_id"] + msg_text = ctx["msg_text"] + mentioned_users = ctx["mentioned_users"] + ts = ctx["ts"] + + post_records = helpers.get_post_records(ts) + if not post_records: + return + + workspace_name = _get_workspace_name(post_records, channel_id, workspace_index=2) + + source_workspace_id = _find_source_workspace_id(post_records, channel_id, ws_index=2) + + fed_ws = None + if post_records: + fed_ws = helpers.get_federated_workspace_for_sync(post_records[0][1].sync_id) + + source_ws_fed = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + fed_adapted_text = helpers.resolve_channel_references(msg_text, client, source_ws_fed) + + synced = 0 + failed = 0 + for post_meta, sync_channel, workspace in post_records: + if sync_channel.channel_id == channel_id: + continue + try: + if fed_ws and workspace.id != source_workspace_id: + payload = federation.build_edit_payload( + post_id=post_meta.post_id.hex() if isinstance(post_meta.post_id, bytes) else str(post_meta.post_id), + channel_id=sync_channel.channel_id, + text=fed_adapted_text, + timestamp=f"{post_meta.ts:.6f}", + ) + federation.push_edit(fed_ws, payload) + else: + bot_token = helpers.decrypt_bot_token(workspace.bot_token) + target_client = WebClient(token=bot_token) + adapted_text = helpers.apply_mentioned_users( + msg_text, + client, + target_client, + mentioned_users, + source_workspace_id=source_workspace_id or 0, + target_workspace_id=workspace.id, + ) + source_ws = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + adapted_text = helpers.resolve_channel_references( + adapted_text, client, source_ws, target_workspace_id=workspace.id + ) + helpers.post_message( + bot_token=bot_token, + channel_id=sync_channel.channel_id, + msg_text=adapted_text, + update_ts=f"{post_meta.ts:.6f}", + workspace_name=workspace_name, + blocks=photo_blocks, + ) + synced += 1 + except Exception as exc: + failed += 1 + _logger.error(f"Failed to sync message edit to channel {sync_channel.channel_id}: {exc}") + + emit_metric("messages_synced", value=synced, sync_type="message_edit") + if failed: + emit_metric("sync_failures", value=failed, sync_type="message_edit") + + +def _handle_message_delete( + ctx: EventContext, + logger: Logger, +) -> None: + """Propagate a deleted message to all linked channels.""" + channel_id = ctx["channel_id"] + ts = ctx["ts"] + + post_records = helpers.get_post_records(ts) + if not post_records: + return + + fed_ws = None + if post_records: + fed_ws = helpers.get_federated_workspace_for_sync(post_records[0][1].sync_id) + + source_workspace_id = _find_source_workspace_id(post_records, channel_id, ws_index=2) + + synced = 0 + failed = 0 + for post_meta, sync_channel, workspace in post_records: + if sync_channel.channel_id == channel_id: + continue + try: + if fed_ws and workspace.id != source_workspace_id: + payload = federation.build_delete_payload( + post_id=post_meta.post_id.hex() if isinstance(post_meta.post_id, bytes) else str(post_meta.post_id), + channel_id=sync_channel.channel_id, + timestamp=f"{post_meta.ts:.6f}", + ) + federation.push_delete(fed_ws, payload) + else: + helpers.delete_message( + bot_token=helpers.decrypt_bot_token(workspace.bot_token), + channel_id=sync_channel.channel_id, + ts=f"{post_meta.ts:.6f}", + ) + synced += 1 + except Exception as exc: + failed += 1 + _logger.error(f"Failed to sync message delete to channel {sync_channel.channel_id}: {exc}") + + emit_metric("messages_synced", value=synced, sync_type="message_delete") + if failed: + emit_metric("sync_failures", value=failed, sync_type="message_delete") + + +def _handle_reaction( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Sync a reaction to all linked channels as a threaded message. + + Posts a short message (e.g. "reacted with :thumbsup: to ") using + the same Display Name (Workspace Name) impersonation used for synced + messages. The message is always threaded under the top-level synced + message, with a permalink to the exact message that was reacted to. + Only ``reaction_added`` events are synced. + """ + event = body.get("event", {}) + reaction = event.get("reaction") + user_id = event.get("user") + item = event.get("item", {}) + item_type = item.get("type") + channel_id = item.get("channel") + msg_ts = item.get("ts") + event_type = event.get("type") + + if event_type != "reaction_added": + return + + if not reaction or not channel_id or not msg_ts or item_type != "message": + return + + own_user_id = helpers.get_own_bot_user_id(client) + if own_user_id and user_id == own_user_id: + return + + reacted_records = helpers.get_post_records(msg_ts) + if not reacted_records: + _logger.info( + "reaction_no_post_meta", + extra={"msg_ts": msg_ts, "channel_id": channel_id, "float_ts": float(msg_ts)}, + ) + return + + fed_ws = helpers.get_federated_workspace_for_sync(reacted_records[0][1].sync_id) + + source_workspace_id = _find_source_workspace_id(reacted_records, channel_id, ws_index=2) + + user_name, user_profile_url = helpers.get_user_info(client, user_id) if user_id else (None, None) + source_ws = helpers.get_workspace_by_id(source_workspace_id) if source_workspace_id else None + ws_name = helpers.resolve_workspace_name(source_ws) if source_ws else None + posted_from = f"({ws_name})" if ws_name else "(via SyncBot)" + + post_uuid = uuid.uuid4().hex + post_list: list[schemas.PostMeta] = [] + + synced = 0 + failed = 0 + for post_meta, sync_channel, workspace in reacted_records: + try: + if fed_ws and workspace.id != source_workspace_id: + payload = federation.build_reaction_payload( + post_id=str(post_meta.post_id), + channel_id=sync_channel.channel_id, + reaction=reaction, + action="add", + user_name=user_name or user_id or "Someone", + user_avatar_url=user_profile_url, + workspace_name=ws_name, + timestamp=f"{post_meta.ts:.6f}", + ) + federation.push_reaction(fed_ws, payload) + else: + target_client = WebClient(token=helpers.decrypt_bot_token(workspace.bot_token)) + target_msg_ts = f"{post_meta.ts:.6f}" + + target_display_name, target_icon_url = helpers.get_display_name_and_icon_for_synced_message( + user_id or "", + source_workspace_id or 0, + user_name, + user_profile_url, + target_client, + workspace.id, + ) + display_name = target_display_name or user_name or user_id or "Someone" + + permalink = None + try: + plink_resp = target_client.chat_getPermalink( + channel=sync_channel.channel_id, + message_ts=target_msg_ts, + ) + permalink = helpers.safe_get(plink_resp, "permalink") + except Exception as exc: + # Permalink lookup is optional; if it fails we still post a + # reaction notice without the deep-link. + _logger.debug( + "reaction_permalink_lookup_failed", + extra={"channel_id": sync_channel.channel_id, "message_ts": target_msg_ts, "error": str(exc)}, + ) + + if permalink: + msg_text = f"reacted with :{reaction}: to <{permalink}|this message>" + else: + msg_text = f"reacted with :{reaction}:" + + resp = target_client.chat_postMessage( + channel=sync_channel.channel_id, + text=msg_text, + username=f"{display_name} {posted_from}", + icon_url=target_icon_url or user_profile_url, + thread_ts=target_msg_ts, + unfurl_links=False, + unfurl_media=False, + ) + ts = helpers.safe_get(resp, "ts") + if ts: + post_list.append(schemas.PostMeta(post_id=post_uuid, sync_channel_id=sync_channel.id, ts=float(ts))) + synced += 1 + except Exception as exc: + failed += 1 + _logger.error(f"Failed to sync reaction to channel {sync_channel.channel_id}: {exc}") + + if post_list: + DbManager.create_records(post_list) + + emit_metric("messages_synced", value=synced, sync_type="reaction_add") + if failed: + emit_metric("sync_failures", value=failed, sync_type="reaction_add") + + +def _is_own_bot_message(body: dict, client: WebClient, context: dict) -> bool: + """Return *True* if the event was generated by SyncBot itself. + + Compares the ``bot_id`` in the event payload against SyncBot's own + bot ID. This replaces the old blanket ``bot_message`` filter so + that messages from *other* bots are synced normally while SyncBot's + own re-posts are still ignored (preventing infinite loops). + """ + event = body.get("event", {}) + event_bot_id = ( + event.get("bot_id") + or helpers.safe_get(event, "message", "bot_id") + or helpers.safe_get(event, "previous_message", "bot_id") + ) + if not event_bot_id: + return False + + own_bot_id = helpers.get_own_bot_id(client, context) + return event_bot_id == own_bot_id + + +def _should_skip_slack_event_retry(body: dict, context: dict) -> bool: + """Return True if Slack delivered this event as a retry (duplicate work).""" + rn = context.get("slack_retry_num") + if rn is not None: + try: + if int(rn) >= 1: + return True + except (TypeError, ValueError): + pass + ra = helpers.safe_get(body, "retry_attempt") + if ra is not None: + try: + if int(ra) >= 1: + return True + except (TypeError, ValueError): + pass + return False + + +def respond_to_message_event( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Dispatch incoming message events to the appropriate sub-handler.""" + ctx = _parse_event_fields(body, client) + event_type = helpers.safe_get(body, "event", "type") + event_subtype = ctx["event_subtype"] + + if event_type != "message": + return + + # Skip messages from SyncBot itself to prevent infinite sync loops. + # Messages from OTHER bots are synced normally. + if _is_own_bot_message(body, client, context): + return + + if _should_skip_slack_event_retry(body, context): + _logger.info( + "skipping_slack_event_retry", + extra={ + "slack_retry_num": context.get("slack_retry_num"), + "retry_attempt": helpers.safe_get(body, "retry_attempt"), + }, + ) + return + + # Slack sends a plain message event and then a file_share for the same post; process only file_share + # so we do not sync twice (and avoid downloading files twice). + event_has_files = bool( + helpers.safe_get(body, "event", "files") or helpers.safe_get(body, "event", "message", "files") + ) + if not event_subtype and event_has_files: + _logger.debug( + "skip_message_pending_file_share", + extra={"channel": helpers.safe_get(body, "event", "channel")}, + ) + return + + photo_list, photo_blocks, direct_files = _build_file_context(body, client, logger) + + has_files = bool(photo_blocks or direct_files) + if ( + (not event_subtype and not event_has_files) + or event_subtype == "bot_message" + or (event_subtype == "file_share" and (ctx["msg_text"] != "" or has_files)) + ): + if not ctx["thread_ts"]: + _handle_new_post(body, client, logger, ctx, photo_list, photo_blocks, direct_files) + else: + _handle_thread_reply(body, client, logger, ctx, photo_blocks, direct_files) + elif event_subtype == "message_changed": + _handle_message_edit(client, logger, ctx, photo_blocks) + elif event_subtype == "message_deleted": + _handle_message_delete(ctx, logger) diff --git a/syncbot/handlers/sync.py b/syncbot/handlers/sync.py new file mode 100644 index 0000000..4f8878a --- /dev/null +++ b/syncbot/handlers/sync.py @@ -0,0 +1,570 @@ +"""Sync management handlers — create, join, remove syncs and Home tab.""" + +import logging +import time +from datetime import UTC, datetime +from logging import Logger + +from slack_sdk.web import WebClient + +import builders +import constants +import helpers +from db import DbManager, schemas +from handlers._common import _sanitize_text +from slack import actions, forms, orm + +_logger = logging.getLogger(__name__) + + +def handle_remove_sync( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +): + """Handles the "DeSync" button action by removing the SyncChannel record from the database. + + Requires admin/owner authorization (defense-in-depth). + """ + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "remove_sync"}) + return + + raw_value = helpers.safe_get(body, "actions", 0, "value") + try: + sync_channel_id = int(raw_value) + except (TypeError, ValueError): + _logger.warning(f"Invalid sync_channel_id value: {raw_value!r}") + return + + sync_channel_record = DbManager.get_record(schemas.SyncChannel, id=sync_channel_id) + if not sync_channel_record: + return + + team_id = helpers.safe_get(body, "team_id") + workspace_record = DbManager.get_record(schemas.Workspace, team_id=team_id) if team_id else None + if not workspace_record or sync_channel_record.workspace_id != workspace_record.id: + _logger.warning( + "ownership_denied", + extra={"sync_channel_id": sync_channel_id, "team_id": team_id, "action": "remove_sync"}, + ) + return + + DbManager.update_records( + schemas.SyncChannel, + [schemas.SyncChannel.id == sync_channel_id], + {schemas.SyncChannel.deleted_at: datetime.now(UTC)}, + ) + try: + client.conversations_leave(channel=sync_channel_record.channel_id) + except Exception as e: + logger.warning(f"Failed to leave channel {sync_channel_record.channel_id}: {e}") + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + other_sync_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync_channel_record.sync_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.workspace_id != workspace_record.id, + ], + ) + for sync_channel in other_sync_channels: + member_ws = helpers.get_workspace_by_id(sync_channel.workspace_id, context=context) + if member_ws: + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) + + +def handle_app_home_opened( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle the ``app_home_opened`` event by publishing the Home tab.""" + helpers.purge_stale_soft_deletes() + builders.build_home_tab(body, client, logger, context) + + +def handle_refresh_home( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle the Refresh button on the Home tab. + + Uses content hash and cached blocks: full refresh only when data changed. + When hash matches and within 60s cooldown, re-publishes with cooldown message. + """ + team_id = helpers.safe_get(body, "view", "team_id") or helpers.safe_get(body, "team", "id") + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not team_id or not user_id: + return + + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + return + + current_hash = builders._home_tab_content_hash(workspace_record) + hash_key = f"home_tab_hash:{team_id}" + blocks_key = f"home_tab_blocks:{team_id}:{user_id}" + refresh_at_key = f"refresh_at:home:{team_id}:{user_id}" + + action, cached_blocks, remaining = helpers.refresh_cooldown_check( + current_hash, hash_key, blocks_key, refresh_at_key + ) + cooldown_sec = getattr(constants, "REFRESH_COOLDOWN_SECONDS", 60) + + if action == "cooldown" and cached_blocks is not None and remaining is not None: + refresh_idx = helpers.index_of_block_with_action( + cached_blocks, actions.CONFIG_REFRESH_HOME + ) + blocks_with_message = helpers.inject_cooldown_message( + cached_blocks, refresh_idx, remaining + ) + client.views_publish(user_id=user_id, view={"type": "home", "blocks": blocks_with_message}) + return + if action == "cached" and cached_blocks is not None: + client.views_publish(user_id=user_id, view={"type": "home", "blocks": cached_blocks}) + helpers._cache_set(refresh_at_key, time.monotonic(), ttl=cooldown_sec * 2) + return + + # Full refresh: clear workspace name caches and refresh all workspace names + stale_keys = [k for k in helpers._CACHE if k.startswith("ws_name_refresh:")] + for k in stale_keys: + helpers._CACHE.pop(k, None) + + all_workspaces = DbManager.find_records( + schemas.Workspace, + [schemas.Workspace.deleted_at.is_(None)], + ) + for ws in all_workspaces: + try: + if ws.id == workspace_record.id: + ws_client = client + elif ws.bot_token: + ws_client = WebClient(token=helpers.decrypt_bot_token(ws.bot_token)) + else: + continue + + info = ws_client.team_info() + current_name = info["team"]["name"] + if current_name and current_name != ws.workspace_name: + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == ws.id], + {schemas.Workspace.workspace_name: current_name}, + ) + _logger.info( + "workspace_name_refreshed", + extra={"workspace_id": ws.id, "new_name": current_name}, + ) + except Exception as e: + ws_label = f"{ws.workspace_name} ({ws.team_id})" + _logger.warning(f"Failed to refresh name for {ws_label}: {e}") + + block_dicts = builders.build_home_tab(body, client, logger, context, user_id=user_id, return_blocks=True) + if block_dicts is None: + return + client.views_publish(user_id=user_id, view={"type": "home", "blocks": block_dicts}) + helpers.refresh_after_full(hash_key, blocks_key, refresh_at_key, current_hash, block_dicts) + + +def handle_join_sync_submission( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handles the join sync form submission by appending to the SyncChannel table. + + Requires admin/owner authorization (defense-in-depth). + The bot joins the channel *before* the DB record is created so that + a failed join doesn't leave an orphaned record. + """ + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "join_sync"}) + return + + form_data = forms.JOIN_SYNC_FORM.get_selected_values(body) + sync_id = helpers.safe_get(form_data, actions.CONFIG_JOIN_SYNC_SELECT) + channel_id = helpers.safe_get(form_data, actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT) + team_id = helpers.safe_get(body, "view", "team_id") + + if not sync_id or not channel_id or not team_id: + logger.warning(f"Rejected join-sync: missing required field (sync_id={sync_id}, channel_id={channel_id})") + return + + workspace_record: schemas.Workspace = DbManager.get_record(schemas.Workspace, id=team_id) + sync_record: schemas.Sync = DbManager.get_record(schemas.Sync, id=sync_id) + + if not workspace_record or not sync_record: + logger.warning("Rejected join-sync: workspace or sync record not found") + return + + existing_join = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync_id, + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.status == "active", + ], + ) + if existing_join: + _logger.info( + "join_sync_duplicate_skip", + extra={ + "sync_id": sync_id, + "channel_id": channel_id, + "workspace_id": workspace_record.id, + }, + ) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + return + + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + admin_name, admin_label = helpers.format_admin_label(client, acting_user_id, workspace_record) + + other_sync_channels: list = [] + try: + client.conversations_join(channel=channel_id) + channel_sync_record = schemas.SyncChannel( + sync_id=sync_id, + channel_id=channel_id, + workspace_id=workspace_record.id, + created_at=datetime.now(UTC), + ) + DbManager.create_record(channel_sync_record) + other_sync_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.workspace_id != workspace_record.id, + ], + ) + if other_sync_channels: + first_channel = other_sync_channels[0] + first_ws = helpers.get_workspace_by_id(first_channel.workspace_id) + channel_ref = helpers.resolve_channel_name(first_channel.channel_id, first_ws) + else: + channel_ref = sync_record.title or "the other Channel" + client.chat_postMessage( + channel=channel_id, + text=f":arrows_counterclockwise: *{admin_name}* started syncing this Channel with *{channel_ref}*. Messages will be shared automatically.", + ) + + local_ref = helpers.resolve_channel_name(channel_id, workspace_record) + for sync_channel in other_sync_channels: + try: + member_ws = helpers.get_workspace_by_id(sync_channel.workspace_id) + if member_ws and member_ws.bot_token: + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) + member_client.chat_postMessage( + channel=sync_channel.channel_id, + text=f":arrows_counterclockwise: *{admin_label}* started syncing *{local_ref}* with this Channel. Messages will be shared automatically.", + ) + except Exception as exc: + _logger.debug(f"join_sync: failed to notify channel {sync_channel.channel_id}: {exc}") + except Exception as e: + logger.error(f"Failed to join sync channel {channel_id}: {e}") + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + for sync_channel in other_sync_channels: + member_ws = helpers.get_workspace_by_id(sync_channel.workspace_id, context=context) + if member_ws: + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) + + +def handle_new_sync_submission( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handles the new sync form submission. + + Creates a Sync named after the selected channel, links the channel + to the sync, joins the channel, and posts a welcome message. + Requires admin/owner authorization (defense-in-depth). + """ + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + _logger.warning("authorization_denied", extra={"user_id": user_id, "action": "new_sync"}) + return + + form_data = forms.NEW_SYNC_FORM.get_selected_values(body) + channel_id = helpers.safe_get(form_data, actions.CONFIG_NEW_SYNC_CHANNEL_SELECT) + team_id = helpers.safe_get(body, "view", "team_id") + + if not channel_id or not team_id: + logger.warning(f"Rejected sync creation: missing field (channel_id={channel_id})") + return + + workspace_record = helpers.get_workspace_record(team_id, body, context, client) + if not workspace_record: + logger.warning("Rejected sync creation: workspace record not found") + return + + try: + conv_info = client.conversations_info(channel=channel_id) + channel_name = helpers.safe_get(conv_info, "channel", "name") or channel_id + except Exception as exc: + _logger.debug(f"handle_create_sync: conversations_info failed for {channel_id}: {exc}") + channel_name = channel_id + + sync_title = _sanitize_text(channel_name) + if not sync_title: + logger.warning("Rejected sync creation: could not determine channel name") + return + + acting_user_id = helpers.safe_get(body, "user", "id") or user_id + admin_name, _ = helpers.format_admin_label(client, acting_user_id, workspace_record) + + try: + client.conversations_join(channel=channel_id) + sync_record = schemas.Sync(title=sync_title, description=None) + DbManager.create_record(sync_record) + channel_sync_record = schemas.SyncChannel( + sync_id=sync_record.id, + channel_id=channel_id, + workspace_id=workspace_record.id, + created_at=datetime.now(UTC), + ) + DbManager.create_record(channel_sync_record) + client.chat_postMessage( + channel=channel_id, + text=f":outbox_tray: *{admin_name}* published this Channel for Syncing. Other Workspaces can now subscribe.", + ) + except Exception as e: + logger.error(f"Failed to create sync for channel {channel_id}: {e}") + + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) + + +def handle_member_joined_channel( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle member_joined_channel: check if SyncBot was added to an untracked channel.""" + event = body.get("event", {}) + user_id = event.get("user") + channel_id = event.get("channel") + team_id = helpers.safe_get(body, "team_id") or event.get("team") + + if not user_id or not channel_id or not team_id: + return + + own_user_id = helpers.get_own_bot_user_id(client) + if user_id != own_user_id: + return + + any_sync_channel = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + if any_sync_channel: + return + + try: + client.chat_postMessage( + channel=channel_id, + text=":wave: Hello! I'm SyncBot. I was added to this Channel, but this Channel " + "doesn't seem to be part of a Sync. I'm leaving now. Please open the SyncBot Home " + "tab to configure me.", + ) + client.conversations_leave(channel=channel_id) + except Exception as e: + _logger.warning(f"Failed to notify and leave untracked channel {channel_id}: {e}") + + +def check_join_sync_channel( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Checks to see if the chosen channel id is already part of a sync.""" + view_id = helpers.safe_get(body, "view", "id") + form_data = forms.JOIN_SYNC_FORM.get_selected_values(body) + channel_id = helpers.safe_get(form_data, actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT) + blocks = helpers.safe_get(body, "view", "blocks") + already_warning = constants.WARNING_BLOCK in [block["block_id"] for block in blocks] + sync_channel_records = DbManager.find_records( + schemas.SyncChannel, + [schemas.SyncChannel.channel_id == channel_id, schemas.SyncChannel.deleted_at.is_(None)], + ) + + if len(sync_channel_records) > 0 and not already_warning: + blocks.append( + orm.SectionBlock( + action=constants.WARNING_BLOCK, + label=":warning: :warning: This Channel is already part of a Sync! Please choose another Channel.", + ).as_form_field() + ) + helpers.update_modal( + blocks=blocks, + client=client, + view_id=view_id, + title_text="Join Sync", + callback_id=actions.CONFIG_JOIN_SYNC_SUBMIT, + ) + elif len(sync_channel_records) == 0 and already_warning: + blocks = [block for block in blocks if block["block_id"] != constants.WARNING_BLOCK] + helpers.update_modal( + blocks=blocks, + client=client, + view_id=view_id, + title_text="Join Sync", + callback_id=actions.CONFIG_JOIN_SYNC_SUBMIT, + ) + + +# --------------------------------------------------------------------------- +# Database Reset (gated by PRIMARY_WORKSPACE + ENABLE_DB_RESET) +# --------------------------------------------------------------------------- + + +def handle_db_reset( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Open a confirmation modal warning the user before a full DB reset. + + Only when PRIMARY_WORKSPACE matches and ENABLE_DB_RESET is truthy (see helpers.core). + """ + team_id = helpers.safe_get(body, "team", "id") or helpers.safe_get(body, "view", "team_id") + if not helpers.is_db_reset_visible_for_workspace(team_id): + return + + user_id = helpers.safe_get(body, "user", "id") or helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + return + + trigger_id = helpers.safe_get(body, "trigger_id") + if not trigger_id: + return + + client.views_open( + trigger_id=trigger_id, + view={ + "type": "modal", + "title": {"type": "plain_text", "text": "Yikes! Reset Database?"}, + "close": {"type": "plain_text", "text": "Cancel"}, + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ( + ":rotating_light: *This Will Permanently Delete ALL Data!* :rotating_light:\n\n" + "Every Slack Install, Workspace Group, Channel Sync, and User Mapping, " + "in this database will be erased and the schema will be reinitialized.\n\n" + "*NOTE:* _All Slack Workspaces will need to reinstall the SyncBot app to get started again._\n\n" + "*This action cannot be undone! MAKE A BACKUP FIRST!*" + ), + }, + }, + { + "type": "actions", + "elements": [ + { + "type": "button", + "text": {"type": "plain_text", "text": "Confirm, Erase Everything!"}, + "style": "danger", + "action_id": actions.CONFIG_DB_RESET_PROCEED, + }, + ], + }, + ], + }, + ) + + +def handle_db_reset_proceed( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Execute the database reset after user confirmed via modal. + + Same gating as handle_db_reset (PRIMARY_WORKSPACE + ENABLE_DB_RESET). + """ + team_id = helpers.safe_get(body, "team", "id") or helpers.safe_get(body, "view", "team_id") + if not helpers.is_db_reset_visible_for_workspace(team_id): + return + + user_id = helpers.get_user_id_from_body(body) + if not user_id or not helpers.is_user_authorized(client, user_id): + return + + # Update the modal to a "done" state so the user can close it (Slack only allows + # closing modals via view_submission, not block_actions, so we replace the view). + view_id = helpers.safe_get(body, "view", "id") + if view_id: + try: + client.views_update( + view_id=view_id, + view={ + "type": "modal", + "title": {"type": "plain_text", "text": "Reset Complete"}, + "close": {"type": "plain_text", "text": "Close"}, + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":skull_and_crossbones: You can close this now.", + }, + }, + ], + }, + ) + except Exception as e: + _logger.warning("Failed to update modal after DB reset: %s", e) + + _logger.critical( + "DB_RESET triggered by user %s — dropping database and reinitializing via Alembic", + user_id, + ) + + from db import drop_and_init_db + + drop_and_init_db() + + helpers.clear_all_caches() + + if team_id and user_id: + try: + client.views_publish( + user_id=user_id, + view={ + "type": "home", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Database Has Been Reset!*\nPlease reinstall SyncBot in your Workspace.", + }, + } + ], + }, + ) + except Exception as e: + _logger.warning("Failed to publish post-reset Home tab: %s", e) diff --git a/syncbot/handlers/tokens.py b/syncbot/handlers/tokens.py new file mode 100644 index 0000000..7d80688 --- /dev/null +++ b/syncbot/handlers/tokens.py @@ -0,0 +1,136 @@ +"""Token revocation handler.""" + +import logging +from datetime import UTC, datetime +from logging import Logger + +from slack_sdk.web import WebClient + +import constants +import helpers +from db import DbManager, schemas + +_logger = logging.getLogger(__name__) + + +def handle_tokens_revoked( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle ``tokens_revoked`` event: a workspace uninstalled the app. + + Soft-deletes the workspace, its group memberships, and its sync channels. + Notifies other group member workspaces via admin DMs and channel messages. + """ + team_id = helpers.safe_get(body, "team_id") + if not team_id: + _logger.warning("handle_tokens_revoked: missing team_id") + return + + workspace_record = DbManager.get_record(schemas.Workspace, team_id=team_id) + if not workspace_record: + _logger.warning("handle_tokens_revoked: unknown workspace", extra={"team_id": team_id}) + return + + now = datetime.now(UTC) + ws_name = helpers.resolve_workspace_name(workspace_record) + retention_days = constants.SOFT_DELETE_RETENTION_DAYS + + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == workspace_record.id], + {schemas.Workspace.deleted_at: now}, + ) + + active_memberships = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.workspace_id == workspace_record.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + + for membership in active_memberships: + DbManager.update_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == membership.id], + {schemas.WorkspaceGroupMember.deleted_at: now}, + ) + + my_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + for sync_channel in my_channels: + DbManager.update_records( + schemas.SyncChannel, + [schemas.SyncChannel.id == sync_channel.id], + {schemas.SyncChannel.deleted_at: now, schemas.SyncChannel.status: "paused"}, + ) + + notified_ws: set[int] = set() + for membership in active_memberships: + group_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == membership.group_id, + schemas.WorkspaceGroupMember.workspace_id != workspace_record.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + for member in group_members: + if not member.workspace_id or member.workspace_id in notified_ws: + continue + member_ws = helpers.get_workspace_by_id(member.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at: + continue + notified_ws.add(member.workspace_id) + + try: + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) + + helpers.notify_admins_dm( + member_client, + f":double_vertical_bar: *{ws_name}* has uninstalled SyncBot. " + f"Syncing has been paused. If they reinstall within {retention_days} days, " + "Syncing will resume automatically.", + ) + + member_channel_ids = [] + for sync_channel in my_channels: + sibling_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync_channel.sync_id, + schemas.SyncChannel.workspace_id == member.workspace_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + for sibling in sibling_channels: + member_channel_ids.append(sibling.channel_id) + + if member_channel_ids: + helpers.notify_synced_channels( + member_client, + member_channel_ids, + f":double_vertical_bar: Syncing with *{ws_name}* has been paused because they uninstalled the app.", + ) + except Exception as e: + _logger.warning(f"handle_tokens_revoked: failed to notify member {member.workspace_id}: {e}") + + _logger.info( + "workspace_soft_deleted", + extra={ + "workspace_id": workspace_record.id, + "team_id": team_id, + "memberships_paused": len(active_memberships), + "channels_paused": len(my_channels), + }, + ) diff --git a/syncbot/handlers/users.py b/syncbot/handlers/users.py new file mode 100644 index 0000000..6e1c74f --- /dev/null +++ b/syncbot/handlers/users.py @@ -0,0 +1,291 @@ +"""User event handlers — team join, profile changes, user mapping management.""" + +import contextlib +import logging +import time +from datetime import UTC, datetime +from logging import Logger + +from slack_sdk.web import WebClient + +import builders +import constants +import helpers +from builders._common import _get_group_members, _get_groups_for_workspace +from db import DbManager, schemas +from handlers._common import _get_authorized_workspace + +_logger = logging.getLogger(__name__) + + +def handle_team_join( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle a team_join event: a new user joined a connected workspace. + + 1. Upsert the new user into ``user_directory`` for this workspace. + 2. Re-check all ``match_method='none'`` mappings targeting this workspace. + """ + event = body.get("event", {}) + user_data = event.get("user", {}) + team_id = helpers.safe_get(body, "team_id") + + if not user_data or not team_id: + return + + if user_data.get("is_bot") or user_data.get("id") == "USLACKBOT": + return + + workspace_record = DbManager.get_record(schemas.Workspace, id=team_id) + if not workspace_record: + _logger.warning(f"team_join: unknown team_id {team_id}") + return + + _logger.info( + "team_join_received", + extra={"team_id": team_id, "user_id": user_data.get("id")}, + ) + + helpers._upsert_single_user_to_directory(user_data, workspace_record.id) + + newly_matched, still_unmatched = helpers.run_auto_match_for_workspace(client, workspace_record.id) + _logger.info( + "team_join_matching_complete", + extra={ + "workspace_id": workspace_record.id, + "newly_matched": newly_matched, + "still_unmatched": still_unmatched, + }, + ) + + +def handle_user_profile_changed( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Handle a user_profile_changed event: update directory and notify group members.""" + event = body.get("event", {}) + user_data = event.get("user", {}) + team_id = helpers.safe_get(body, "team_id") + + if not user_data or not team_id: + return + + if user_data.get("is_bot") or user_data.get("id") == "USLACKBOT": + return + + workspace_record = DbManager.get_record(schemas.Workspace, id=team_id) + if not workspace_record: + return + + helpers._upsert_single_user_to_directory(user_data, workspace_record.id) + + my_groups = _get_groups_for_workspace(workspace_record.id) + notified_ws: set[int] = set() + for group, _ in my_groups: + members = _get_group_members(group.id) + for member in members: + if ( + member.workspace_id + and member.workspace_id != workspace_record.id + and member.workspace_id not in notified_ws + ): + member_ws = helpers.get_workspace_by_id(member.workspace_id, context=context) + if member_ws: + builders.refresh_home_tab_for_workspace(member_ws, logger, context=None) + notified_ws.add(member.workspace_id) + + _logger.info( + "user_profile_updated", + extra={"team_id": team_id, "user_id": user_data.get("id")}, + ) + + +def handle_user_mapping_back( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Return from the user mapping screen to the main Home tab.""" + user_id = helpers.get_user_id_from_body(body) + if not user_id: + return + builders.build_home_tab(body, client, logger, context, user_id=user_id) + + +def handle_user_mapping_refresh( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Refresh user mappings: re-seed, auto-match, then re-render the mapping screen. + + Uses content hash and cached blocks; when hash unchanged and within 60s cooldown, + re-publishes with cooldown message. + """ + auth_result = _get_authorized_workspace(body, client, context, "user_mapping_refresh") + if not auth_result: + return + user_id, workspace_record = auth_result + + raw_group = helpers.safe_get(body, "actions", 0, "value") or "0" + try: + group_id = int(raw_group) + except (TypeError, ValueError): + group_id = 0 + + gid_opt = group_id or None + current_hash = builders._user_mapping_content_hash(workspace_record, gid_opt) + hash_key = f"user_mapping_hash:{workspace_record.team_id}:{user_id}:{group_id}" + blocks_key = f"user_mapping_blocks:{workspace_record.team_id}:{user_id}:{group_id}" + refresh_at_key = f"refresh_at:user_mapping:{workspace_record.team_id}:{user_id}:{group_id}" + + action, cached_blocks, remaining = helpers.refresh_cooldown_check( + current_hash, hash_key, blocks_key, refresh_at_key + ) + cooldown_sec = getattr(constants, "REFRESH_COOLDOWN_SECONDS", 60) + + if action == "cooldown" and cached_blocks is not None and remaining is not None: + blocks_with_message = helpers.inject_cooldown_message( + cached_blocks, builders._USER_MAPPING_REFRESH_BUTTON_INDEX, remaining + ) + client.views_publish(user_id=user_id, view={"type": "home", "blocks": blocks_with_message}) + return + if action == "cached" and cached_blocks is not None: + client.views_publish(user_id=user_id, view={"type": "home", "blocks": cached_blocks}) + helpers._cache_set(refresh_at_key, time.monotonic(), ttl=cooldown_sec * 2) + return + + helpers._CACHE.pop(f"dir_refresh:{workspace_record.id}", None) + + if group_id: + members = _get_group_members(group_id) + else: + members = [] + for group, _ in _get_groups_for_workspace(workspace_record.id): + members.extend(_get_group_members(group.id)) + + member_clients: list[tuple[WebClient, int]] = [] + + for member in members: + if not member.workspace_id or member.workspace_id == workspace_record.id: + continue + try: + # Force a fresh directory pull before rematching. Cached directory + # snapshots can keep stale display names/emails after profile edits. + helpers._CACHE.pop(f"dir_refresh:{member.workspace_id}", None) + member_ws = helpers.get_workspace_by_id(member.workspace_id, context=context) + if member_ws and member_ws.bot_token: + member_client = WebClient(token=helpers.decrypt_bot_token(member_ws.bot_token)) + helpers._refresh_user_directory(member_client, member.workspace_id) + member_clients.append((member_client, member.workspace_id)) + helpers.seed_user_mappings(member.workspace_id, workspace_record.id, group_id=gid_opt) + helpers.seed_user_mappings(workspace_record.id, member.workspace_id, group_id=gid_opt) + except Exception as exc: + _logger.warning( + "user_mapping_refresh_member_sync_failed", + extra={ + "workspace_id": workspace_record.id, + "member_workspace_id": member.workspace_id, + "group_id": gid_opt, + "error": str(exc), + }, + ) + + helpers.run_auto_match_for_workspace(client, workspace_record.id) + for member_client, member_ws_id in member_clients: + with contextlib.suppress(Exception): + helpers.run_auto_match_for_workspace(member_client, member_ws_id) + + block_dicts = builders.build_user_mapping_screen( + client, + workspace_record, + user_id, + group_id=gid_opt, + context=context, + return_blocks=True, + ) + if block_dicts is None: + return + client.views_publish(user_id=user_id, view={"type": "home", "blocks": block_dicts}) + helpers.refresh_after_full(hash_key, blocks_key, refresh_at_key, current_hash, block_dicts) + + +def handle_user_mapping_edit_submit( + body: dict, + client: WebClient, + logger: Logger, + context: dict, +) -> None: + """Save the per-user mapping edit and refresh the mapping screen.""" + from handlers._common import _parse_private_metadata + + auth_result = _get_authorized_workspace(body, client, context, "user_mapping_edit_submit") + if not auth_result: + return + user_id, workspace_record = auth_result + + meta = _parse_private_metadata(body) + mapping_id = meta.get("mapping_id") + group_id = meta.get("group_id") or 0 + + if not mapping_id: + _logger.warning("user_mapping_edit_submit: missing mapping_id") + return + + mapping = DbManager.get_record(schemas.UserMapping, id=mapping_id) + if not mapping: + return + + values = helpers.safe_get(body, "view", "state", "values") or {} + selected = None + for block_data in values.values(): + for action_data in block_data.values(): + sel = action_data.get("selected_option") + if sel: + selected = sel.get("value") + + now = datetime.now(UTC) + if selected == "__remove__": + DbManager.update_records( + schemas.UserMapping, + [schemas.UserMapping.id == mapping.id], + { + schemas.UserMapping.target_user_id: None, + schemas.UserMapping.match_method: "none", + schemas.UserMapping.matched_at: now, + }, + ) + _logger.info("user_mapping_removed", extra={"mapping_id": mapping.id}) + elif selected: + DbManager.update_records( + schemas.UserMapping, + [schemas.UserMapping.id == mapping.id], + { + schemas.UserMapping.target_user_id: selected, + schemas.UserMapping.match_method: "manual", + schemas.UserMapping.matched_at: now, + }, + ) + _logger.info("user_mapping_updated", extra={"mapping_id": mapping.id, "target_user_id": selected}) + + # Invalidate user-mapping caches so next Refresh on that screen does a full rebuild + helpers._cache_delete_prefix(f"user_mapping_hash:{workspace_record.team_id}:") + helpers._cache_delete_prefix(f"user_mapping_blocks:{workspace_record.team_id}:") + helpers._cache_delete_prefix(f"refresh_at:user_mapping:{workspace_record.team_id}:") + + builders.build_user_mapping_screen( + client, + workspace_record, + user_id, + group_id=group_id or None, + context=context, + ) + builders.refresh_home_tab_for_workspace(workspace_record, logger, context=context) diff --git a/syncbot/helpers/__init__.py b/syncbot/helpers/__init__.py new file mode 100644 index 0000000..8d7e131 --- /dev/null +++ b/syncbot/helpers/__init__.py @@ -0,0 +1,156 @@ +"""Shared utility functions used throughout SyncBot. + +This package re-exports public names from sub-modules so existing +``from helpers import X`` and ``helpers.X`` references continue to work. +""" + +from helpers._cache import ( + _CACHE, + _CACHE_TTL_SECONDS, + _USER_INFO_CACHE_TTL, + _cache_delete, + _cache_delete_prefix, + _cache_get, + _cache_set, + clear_all_caches, +) +from helpers.core import ( + format_admin_label, + get_request_type, + get_user_id_from_body, + is_backup_visible_for_workspace, + is_db_reset_visible_for_workspace, + is_user_authorized, + safe_get, +) +from helpers.encryption import decrypt_bot_token, encrypt_bot_token +from helpers.files import ( + cleanup_temp_files, + download_public_file, + download_slack_files, + upload_files_to_slack, +) +from helpers.notifications import ( + get_admin_ids, + notify_admins_dm, + notify_admins_dm_blocks, + notify_synced_channels, + purge_stale_soft_deletes, + save_dm_messages_to_group_member, +) +from helpers.oauth import get_oauth_flow +from helpers.refresh import ( + cooldown_message_block, + index_of_block_with_action, + inject_cooldown_message, + refresh_after_full, + refresh_cooldown_check, +) +from helpers.slack_api import ( + _users_info, + delete_message, + get_bot_info_from_event, + get_own_bot_id, + get_own_bot_user_id, + get_post_records, + get_user_info, + post_message, + slack_retry, + update_modal, +) +from helpers.user_matching import ( + _get_user_profile, + _normalize_name, + _refresh_user_directory, + _upsert_single_user_to_directory, + apply_mentioned_users, + find_synced_channel_in_target, + get_display_name_and_icon_for_synced_message, + get_mapped_target_user_id, + normalize_display_name, + parse_mentioned_users, + resolve_channel_references, + resolve_mention_for_workspace, + run_auto_match_for_workspace, + seed_user_mappings, +) +from helpers.workspace import ( + get_federated_workspace, + get_federated_workspace_for_sync, + get_group_members, + get_groups_for_workspace, + get_sync_list, + get_workspace_by_id, + get_workspace_record, + resolve_channel_name, + resolve_workspace_name, +) + +__all__ = [ + "_CACHE", + "_CACHE_TTL_SECONDS", + "_USER_INFO_CACHE_TTL", + "_cache_delete", + "_cache_delete_prefix", + "_cache_get", + "_cache_set", + "clear_all_caches", + "_get_user_profile", + "_normalize_name", + "_refresh_user_directory", + "_upsert_single_user_to_directory", + "_users_info", + "apply_mentioned_users", + "cleanup_temp_files", + "cooldown_message_block", + "decrypt_bot_token", + "delete_message", + "download_public_file", + "download_slack_files", + "encrypt_bot_token", + "format_admin_label", + "get_admin_ids", + "get_bot_info_from_event", + "get_federated_workspace", + "get_federated_workspace_for_sync", + "get_group_members", + "get_groups_for_workspace", + "get_display_name_and_icon_for_synced_message", + "find_synced_channel_in_target", + "get_mapped_target_user_id", + "get_oauth_flow", + "normalize_display_name", + "get_own_bot_id", + "get_own_bot_user_id", + "get_post_records", + "get_request_type", + "get_sync_list", + "get_user_id_from_body", + "get_user_info", + "get_workspace_by_id", + "get_workspace_record", + "index_of_block_with_action", + "inject_cooldown_message", + "is_backup_visible_for_workspace", + "is_db_reset_visible_for_workspace", + "is_user_authorized", + "notify_admins_dm", + "notify_admins_dm_blocks", + "notify_synced_channels", + "parse_mentioned_users", + "post_message", + "purge_stale_soft_deletes", + "resolve_channel_name", + "resolve_channel_references", + "refresh_after_full", + "refresh_cooldown_check", + "resolve_mention_for_workspace", + "resolve_workspace_name", + "run_auto_match_for_workspace", + "safe_get", + "save_dm_messages_to_group_member", + "seed_user_mappings", + "slack_retry", + "update_modal", + "upload_files_to_slack", +] diff --git a/syncbot/helpers/_cache.py b/syncbot/helpers/_cache.py new file mode 100644 index 0000000..99c781b --- /dev/null +++ b/syncbot/helpers/_cache.py @@ -0,0 +1,45 @@ +"""Lightweight in-process TTL cache. + +Lambda containers are reused across invocations, so a short TTL cache +avoids redundant DB queries for the same sync list within a warm container. +""" + +import time as _time + +_CACHE: dict = {} +_CACHE_TTL_SECONDS = 60 +_USER_INFO_CACHE_TTL = 300 # 5 min for user info lookups + + +def _cache_get(key: str): + """Return a cached value if it exists and has not expired, else *None*.""" + entry = _CACHE.get(key) + if entry and (_time.monotonic() - entry["t"]) < entry.get("ttl", _CACHE_TTL_SECONDS): + return entry["v"] + _CACHE.pop(key, None) + return None + + +def _cache_set(key: str, value, ttl: int = _CACHE_TTL_SECONDS): + """Store *value* in the cache under *key* with an optional TTL (seconds).""" + _CACHE[key] = {"v": value, "t": _time.monotonic(), "ttl": ttl} + + +def _cache_delete(key: str) -> None: + """Remove a single cache entry.""" + _CACHE.pop(key, None) + + +def _cache_delete_prefix(prefix: str) -> int: + """Remove all cache entries whose key starts with *prefix*. Returns count removed.""" + to_remove = [k for k in _CACHE if k.startswith(prefix)] + for k in to_remove: + _CACHE.pop(k, None) + return len(to_remove) + + +def clear_all_caches() -> int: + """Remove every entry from the in-process cache. Returns count removed.""" + count = len(_CACHE) + _CACHE.clear() + return count diff --git a/syncbot/helpers/core.py b/syncbot/helpers/core.py new file mode 100644 index 0000000..d3e7dc7 --- /dev/null +++ b/syncbot/helpers/core.py @@ -0,0 +1,140 @@ +"""Core utility functions used throughout SyncBot.""" + +import logging +import os +from typing import Any + +from slack_sdk.errors import SlackApiError + +import constants +from slack import actions + +_logger = logging.getLogger(__name__) + + +def safe_get(data: Any, *keys: Any) -> Any: + """Safely traverse nested dicts/lists. Returns None on missing keys.""" + if not data: + return None + try: + result = data + for k in keys: + if isinstance(k, int) and isinstance(result, list) or result.get(k): + result = result[k] + else: + return None + return result + except (KeyError, AttributeError, IndexError): + return None + + +def get_user_id_from_body(body: dict) -> str | None: + """Extract the acting user's ID from any Slack request payload.""" + return safe_get(body, "user_id") or safe_get(body, "user", "id") + + +def is_user_authorized(client, user_id: str) -> bool: + """Return *True* if the user is allowed to configure SyncBot. + + When ``REQUIRE_ADMIN`` is ``"true"`` (the default), only workspace + admins and owners are authorized. + """ + from .slack_api import _users_info + + require_admin = os.environ.get(constants.REQUIRE_ADMIN, "true").lower() + if require_admin != "true": + return True + + try: + res = _users_info(client, user_id) + except SlackApiError: + _logger.warning(f"Could not verify admin status for user {user_id} — denying access") + return False + + user = safe_get(res, "user") or {} + return bool(user.get("is_admin") or user.get("is_owner")) + + +def is_backup_visible_for_workspace(team_id: str | None) -> bool: + """Return True if full backup/restore UI and handlers are allowed for this workspace. + + Requires PRIMARY_WORKSPACE to be set and to match *team_id*. + When PRIMARY_WORKSPACE is unset, backup/restore is hidden everywhere. + """ + primary = (os.environ.get(constants.PRIMARY_WORKSPACE) or "").strip() + if not primary: + _logger.debug("backup/restore hidden: PRIMARY_WORKSPACE not set") + return False + visible = (team_id or "") == primary + if not visible: + _logger.debug( + "backup/restore hidden: team_id %r does not match PRIMARY_WORKSPACE", + team_id, + ) + return visible + + +def is_db_reset_visible_for_workspace(team_id: str | None) -> bool: + """Return True if the DB reset button/action is allowed for this workspace. + + Requires PRIMARY_WORKSPACE to match *team_id* and ENABLE_DB_RESET to be a truthy + boolean string (``true``, ``1``, ``yes``). Reads env at call time. + """ + primary = (os.environ.get(constants.PRIMARY_WORKSPACE) or "").strip() + if not primary or (team_id or "") != primary: + _logger.debug("DB reset button hidden: PRIMARY_WORKSPACE unset or team_id mismatch") + return False + enabled = (os.environ.get(constants.ENABLE_DB_RESET) or "").strip().lower() + if enabled not in ("true", "1", "yes"): + _logger.debug("DB reset button hidden: ENABLE_DB_RESET not true") + return False + return True + + +def format_admin_label(client, user_id: str, workspace) -> tuple[str, str]: + """Return ``(display_name, full_label)`` for an admin.""" + from .slack_api import get_user_info + from .workspace import resolve_workspace_name + + display_name, _ = get_user_info(client, user_id) + display_name = display_name or "An Admin from another Workspace" + ws_name = resolve_workspace_name(workspace) if workspace else None + if ws_name: + return display_name, f"{display_name} from {ws_name}" + return display_name, display_name + + +_PREFIXED_ACTIONS = ( + actions.CONFIG_REMOVE_FEDERATION_CONNECTION, + actions.CONFIG_LEAVE_GROUP, + actions.CONFIG_ACCEPT_GROUP_REQUEST, + actions.CONFIG_DECLINE_GROUP_REQUEST, + actions.CONFIG_CANCEL_GROUP_REQUEST, + actions.CONFIG_SUBSCRIBE_CHANNEL, + actions.CONFIG_UNPUBLISH_CHANNEL, + actions.CONFIG_USER_MAPPING_EDIT, + actions.CONFIG_REMOVE_SYNC, + actions.CONFIG_RESUME_SYNC, + actions.CONFIG_PAUSE_SYNC, + actions.CONFIG_STOP_SYNC, +) + + +def get_request_type(body: dict) -> tuple[str, str]: + """Classify an incoming Slack request into a ``(category, identifier)`` pair.""" + request_type = safe_get(body, "type") + if request_type == "event_callback": + return ("event_callback", safe_get(body, "event", "type")) + elif request_type == "block_actions": + block_action = safe_get(body, "actions", 0, "action_id") + for prefix in _PREFIXED_ACTIONS: + if block_action == prefix or block_action.startswith(prefix + "_"): + block_action = prefix + break + return ("block_actions", block_action) + elif request_type == "view_submission": + return ("view_submission", safe_get(body, "view", "callback_id")) + elif not request_type and "command" in body: + return ("command", safe_get(body, "command")) + else: + return ("unknown", "unknown") diff --git a/syncbot/helpers/encryption.py b/syncbot/helpers/encryption.py new file mode 100644 index 0000000..bb7429c --- /dev/null +++ b/syncbot/helpers/encryption.py @@ -0,0 +1,72 @@ +"""Bot-token encryption / decryption using Fernet (AES-128-CBC + HMAC-SHA256). + +The TOKEN_ENCRYPTION_KEY env var is stretched to a 32-byte key using +PBKDF2-HMAC-SHA256 with 600,000 iterations. The derived Fernet instance +is cached so the expensive KDF runs at most once per key per process. +""" + +import base64 +import functools +import logging +import os + +from cryptography.fernet import Fernet, InvalidToken + +import constants + +_logger = logging.getLogger(__name__) + +_PBKDF2_ITERATIONS = 600_000 +_PBKDF2_SALT_PREFIX = b"syncbot-fernet-v1" + + +@functools.lru_cache(maxsize=2) +def _get_fernet(key: str) -> Fernet: + """Derive a Fernet cipher from an arbitrary passphrase via PBKDF2.""" + from cryptography.hazmat.primitives import hashes + from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC + + salt = _PBKDF2_SALT_PREFIX + key.encode()[:16] + kdf = PBKDF2HMAC( + algorithm=hashes.SHA256(), + length=32, + salt=salt, + iterations=_PBKDF2_ITERATIONS, + ) + derived = kdf.derive(key.encode()) + return Fernet(base64.urlsafe_b64encode(derived)) + + +def _encryption_enabled() -> bool: + """Return *True* if bot-token encryption is active.""" + key = os.environ.get(constants.TOKEN_ENCRYPTION_KEY, "") + return bool(key) and key != "123" + + +def encrypt_bot_token(token: str) -> str: + """Encrypt a bot token before storing it in the database.""" + if not _encryption_enabled(): + return token + key = os.environ[constants.TOKEN_ENCRYPTION_KEY] + return _get_fernet(key).encrypt(token.encode()).decode() + + +def decrypt_bot_token(encrypted: str) -> str: + """Decrypt a bot token read from the database. + + Raises on failure when encryption is enabled. + """ + if not _encryption_enabled(): + return encrypted + key = os.environ[constants.TOKEN_ENCRYPTION_KEY] + try: + return _get_fernet(key).decrypt(encrypted.encode()).decode() + except InvalidToken: + _logger.error( + "Bot token decryption failed — refusing to use the token. " + "If you recently enabled encryption, run " + "db/migrate_002_encrypt_tokens.py to encrypt existing tokens." + ) + raise ValueError( + "Bot token decryption failed. The token may be plaintext (not yet migrated) or tampered with." + ) from None diff --git a/syncbot/helpers/export_import.py b/syncbot/helpers/export_import.py new file mode 100644 index 0000000..43b5352 --- /dev/null +++ b/syncbot/helpers/export_import.py @@ -0,0 +1,576 @@ +"""Backup/restore and data migration export/import helpers. + +Full-instance backup: dump all tables as JSON with HMAC for tampering detection. +Data migration: workspace-scoped export with Ed25519 signature; import with replace mode. +""" + +import hashlib +import hmac +import json +import logging +import os +from datetime import datetime +from decimal import Decimal +from typing import Any + +from sqlalchemy import MetaData, Table, delete, select + +import constants +from db import DbManager, get_engine, schemas + +_logger = logging.getLogger(__name__) + +BACKUP_VERSION = 1 +MIGRATION_VERSION = 1 +_RAW_BACKUP_TABLES = ("slack_bots", "slack_installations", "slack_oauth_states") +_DATETIME_COLUMNS = frozenset({ + "bot_token_expires_at", + "user_token_expires_at", + "installed_at", + "expire_at", +}) + + +def _dump_raw_table(table_name: str) -> list[dict]: + """Return all rows from a non-ORM table as dictionaries (dialect-neutral via reflection).""" + engine = get_engine() + meta = MetaData() + table = Table(table_name, meta, autoload_with=engine) + with engine.connect() as conn: + rows = conn.execute(select(table)).mappings().all() + return [dict(row) for row in rows] + + +def _restore_raw_table(table_name: str, rows: list[dict]) -> None: + """Replace table contents for a non-ORM table from backup rows (dialect-neutral).""" + engine = get_engine() + meta = MetaData() + table = Table(table_name, meta, autoload_with=engine) + with engine.begin() as conn: + conn.execute(delete(table)) + for row in rows: + if not row: + continue + parsed: dict[str, Any] = {} + for key, value in row.items(): + if key not in table.c: + continue + if isinstance(value, str) and key in _DATETIME_COLUMNS: + try: + parsed[key] = datetime.fromisoformat(value.replace("Z", "+00:00")) + except ValueError: + parsed[key] = value + else: + parsed[key] = value + if parsed: + conn.execute(table.insert().values(**parsed)) + + +def _json_serializer(obj: Any) -> Any: + """Convert datetime and Decimal for JSON.""" + if isinstance(obj, datetime): + return obj.isoformat() + if isinstance(obj, Decimal): + return float(obj) + raise TypeError(f"Object of type {type(obj).__name__} is not JSON serializable") + + +def canonical_json_dumps(obj: dict) -> bytes: + """Serialize to canonical JSON (sort_keys, no extra whitespace) for signing/HMAC.""" + return json.dumps( + obj, + sort_keys=True, + separators=(",", ":"), + default=_json_serializer, + ).encode("utf-8") + + +def _compute_encryption_key_hash() -> str | None: + """SHA-256 hex of TOKEN_ENCRYPTION_KEY, or None if unset.""" + key = os.environ.get(constants.TOKEN_ENCRYPTION_KEY, "") + if not key or key == "123": + return None + return hashlib.sha256(key.encode()).hexdigest() + + +def _compute_backup_hmac(payload_without_hmac: dict) -> str: + """HMAC-SHA256 of canonical JSON of payload (excluding hmac field), keyed by TOKEN_ENCRYPTION_KEY.""" + key = os.environ.get(constants.TOKEN_ENCRYPTION_KEY, "") + if not key: + key = "" + raw = canonical_json_dumps(payload_without_hmac) + return hmac.new(key.encode(), raw, hashlib.sha256).hexdigest() + + +def _records_to_list(records: list, cls: type) -> list[dict]: + """Convert ORM records to list of dicts with serializable values.""" + out = [] + for r in records: + d = {} + for k in cls._get_column_keys(): + v = getattr(r, k) + if isinstance(v, datetime): + v = v.isoformat() + elif isinstance(v, Decimal): + v = float(v) + d[k] = v + out.append(d) + return out + + +# --------------------------------------------------------------------------- +# Full-instance backup +# --------------------------------------------------------------------------- + +def build_full_backup() -> dict: + """Build full-instance backup payload (all tables, version, exported_at, encryption_key_hash, hmac).""" + payload = { + "version": BACKUP_VERSION, + "exported_at": datetime.utcnow().isoformat() + "Z", + "encryption_key_hash": _compute_encryption_key_hash(), + } + tables = [ + ("workspaces", schemas.Workspace), + ("workspace_groups", schemas.WorkspaceGroup), + ("workspace_group_members", schemas.WorkspaceGroupMember), + ("syncs", schemas.Sync), + ("sync_channels", schemas.SyncChannel), + ("post_meta", schemas.PostMeta), + ("user_directory", schemas.UserDirectory), + ("user_mappings", schemas.UserMapping), + ("federated_workspaces", schemas.FederatedWorkspace), + ("instance_keys", schemas.InstanceKey), + ] + for table_name, cls in tables: + records = DbManager.find_records(cls, []) + payload[table_name] = _records_to_list(records, cls) + for table_name in _RAW_BACKUP_TABLES: + payload[table_name] = _dump_raw_table(table_name) + + payload["hmac"] = _compute_backup_hmac({k: v for k, v in payload.items() if k != "hmac"}) + return payload + + +def verify_backup_hmac(data: dict) -> bool: + """Return True if HMAC in data matches recomputed HMAC (excluding hmac field).""" + stored = data.get("hmac") + if not stored: + return False + payload_without_hmac = {k: v for k, v in data.items() if k != "hmac"} + expected = _compute_backup_hmac(payload_without_hmac) + return hmac.compare_digest(stored, expected) # noqa: S324 + + +def verify_backup_encryption_key(data: dict) -> bool: + """Return True if current encryption key hash matches backup's.""" + current = _compute_encryption_key_hash() + backup_hash = data.get("encryption_key_hash") + if backup_hash is None and current is None: + return True + if backup_hash is None or current is None: + return False + return hmac.compare_digest(current, backup_hash) # noqa: S324 + + +def restore_full_backup( + data: dict, + *, + skip_hmac_check: bool = False, + skip_encryption_key_check: bool = False, +) -> list[str]: + """Restore full backup into DB. Inserts in FK order. Returns list of team_ids for cache invalidation. + + Caller must have validated version and structure. Does not truncate tables; assumes empty or + intentional overwrite (e.g. restore after rebuild). + """ + team_ids: list[str] = [] + tables = [ + "slack_bots", + "slack_installations", + "slack_oauth_states", + "workspaces", + "workspace_groups", + "workspace_group_members", + "syncs", + "sync_channels", + "post_meta", + "user_directory", + "user_mappings", + "federated_workspaces", + "instance_keys", + ] + table_to_schema = { + "workspaces": schemas.Workspace, + "workspace_groups": schemas.WorkspaceGroup, + "workspace_group_members": schemas.WorkspaceGroupMember, + "syncs": schemas.Sync, + "sync_channels": schemas.SyncChannel, + "post_meta": schemas.PostMeta, + "user_directory": schemas.UserDirectory, + "user_mappings": schemas.UserMapping, + "federated_workspaces": schemas.FederatedWorkspace, + "instance_keys": schemas.InstanceKey, + } + datetime_keys = {"created_at", "updated_at", "deleted_at", "joined_at", "matched_at"} + for table_name in tables: + rows = data.get(table_name, []) + if table_name in _RAW_BACKUP_TABLES: + _restore_raw_table(table_name, rows) + continue + cls = table_to_schema[table_name] + for row in rows: + kwargs = {} + for k, v in row.items(): + if v is None: + kwargs[k] = None + elif isinstance(v, str) and k in datetime_keys: + try: + kwargs[k] = datetime.fromisoformat(v.replace("Z", "+00:00")) + except ValueError: + kwargs[k] = v + elif k == "ts" and v is not None: + kwargs[k] = Decimal(str(v)) + else: + kwargs[k] = v + rec = cls(**kwargs) + DbManager.merge_record(rec) + if table_name == "workspaces" and rec.team_id: + team_ids.append(rec.team_id) + return team_ids + + +# --------------------------------------------------------------------------- +# Cache invalidation after restore/import +# --------------------------------------------------------------------------- + +def invalidate_home_tab_caches_for_team(team_id: str) -> None: + """Clear home_tab_hash and home_tab_blocks for a team so next Refresh does full rebuild.""" + from helpers._cache import _cache_delete_prefix + _cache_delete_prefix(f"home_tab_hash:{team_id}") + _cache_delete_prefix(f"home_tab_blocks:{team_id}") + + +def invalidate_home_tab_caches_for_all_teams(team_ids: list[str]) -> None: + """Clear home tab caches for each team_id (e.g. after full restore).""" + for tid in team_ids: + invalidate_home_tab_caches_for_team(tid) + + +def invalidate_sync_list_cache_for_channel(channel_id: str) -> None: + """Clear get_sync_list cache for a channel.""" + from helpers._cache import _cache_delete + _cache_delete(f"sync_list:{channel_id}") + + +# --------------------------------------------------------------------------- +# Data migration export (workspace-scoped) +# --------------------------------------------------------------------------- + +def build_migration_export(workspace_id: int, include_source_instance: bool = True) -> dict: + """Build workspace-scoped migration JSON. Optionally sign with Ed25519 and include source_instance.""" + workspace = DbManager.get_record(schemas.Workspace, workspace_id) + if not workspace or workspace.deleted_at: + raise ValueError("Workspace not found") + + team_id = workspace.team_id + workspace_name = workspace.workspace_name or "" + + # Groups W is in + memberships = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.workspace_id == workspace_id, + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.status == "active", + ], + ) + groups_data = [] + for membership in memberships: + g = DbManager.get_record(schemas.WorkspaceGroup, membership.group_id) + if g: + groups_data.append({"name": g.name, "role": membership.role}) + + # Syncs that have at least one SyncChannel for W + sync_channels_w = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.workspace_id == workspace_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + sync_ids = {sync_channel.sync_id for sync_channel in sync_channels_w} + syncs_data = [] + sync_channels_data = [] + post_meta_by_key = {} + + for sync_id in sync_ids: + sync = DbManager.get_record(schemas.Sync, sync_id) + if not sync: + continue + pub_team = None + tgt_team = None + if sync.publisher_workspace_id: + publisher_ws = DbManager.get_record(schemas.Workspace, sync.publisher_workspace_id) + if publisher_ws: + pub_team = publisher_ws.team_id + if sync.target_workspace_id: + tw = DbManager.get_record(schemas.Workspace, sync.target_workspace_id) + if tw: + tgt_team = tw.team_id + syncs_data.append({ + "title": sync.title, + "sync_mode": sync.sync_mode or "group", + "publisher_team_id": pub_team, + "target_team_id": tgt_team, + "is_publisher": sync.publisher_workspace_id == workspace_id, + }) + for sync_channel in sync_channels_w: + if sync_channel.sync_id != sync_id: + continue + sync_channels_data.append({ + "sync_title": sync.title, + "channel_id": sync_channel.channel_id, + "status": sync_channel.status or "active", + }) + key = f"{sync.title}:{sync_channel.channel_id}" + post_metas = DbManager.find_records( + schemas.PostMeta, + [schemas.PostMeta.sync_channel_id == sync_channel.id], + ) + post_meta_by_key[key] = [{"post_id": post_meta.post_id, "ts": float(post_meta.ts)} for post_meta in post_metas] + + # user_directory for W + ud_records = DbManager.find_records( + schemas.UserDirectory, + [ + schemas.UserDirectory.workspace_id == workspace_id, + schemas.UserDirectory.deleted_at.is_(None), + ], + ) + user_directory_data = [] + for u in ud_records: + user_directory_data.append({ + "slack_user_id": u.slack_user_id, + "email": u.email, + "real_name": u.real_name, + "display_name": u.display_name, + "normalized_name": u.normalized_name, + "updated_at": u.updated_at.isoformat() if u.updated_at else None, + }) + + # user_mappings involving W (export with team_id for other side) + um_records = DbManager.find_records( + schemas.UserMapping, + [ + (schemas.UserMapping.source_workspace_id == workspace_id) | (schemas.UserMapping.target_workspace_id == workspace_id), + ], + ) + user_mappings_data = [] + for um in um_records: + src_ws = DbManager.get_record(schemas.Workspace, um.source_workspace_id) if um.source_workspace_id else None + tgt_ws = DbManager.get_record(schemas.Workspace, um.target_workspace_id) if um.target_workspace_id else None + user_mappings_data.append({ + "source_team_id": src_ws.team_id if src_ws else None, + "target_team_id": tgt_ws.team_id if tgt_ws else None, + "source_user_id": um.source_user_id, + "target_user_id": um.target_user_id, + "match_method": um.match_method, + }) + + payload = { + "version": MIGRATION_VERSION, + "exported_at": datetime.utcnow().isoformat() + "Z", + "workspace": {"team_id": team_id, "workspace_name": workspace_name}, + "groups": groups_data, + "syncs": syncs_data, + "sync_channels": sync_channels_data, + "post_meta": post_meta_by_key, + "user_directory": user_directory_data, + "user_mappings": user_mappings_data, + } + + if include_source_instance: + from federation import core as federation + try: + url = federation.get_public_url() + instance_id = federation.get_instance_id() + _, public_key_pem = federation.get_or_create_instance_keypair() + code = federation.generate_federation_code(webhook_url=url, instance_id=instance_id, public_key=public_key_pem) + payload["source_instance"] = { + "webhook_url": url, + "instance_id": instance_id, + "public_key": public_key_pem, + "connection_code": code, + } + except Exception as e: + _logger.warning("build_migration_export: could not add source_instance: %s", e) + + # Sign with Ed25519 (exclude signature from signed bytes; include signed_at) + try: + from federation import core as federation + payload["signed_at"] = datetime.utcnow().isoformat() + "Z" + to_sign = {k: v for k, v in payload.items() if k != "signature"} + raw = canonical_json_dumps(to_sign).decode("utf-8") + payload["signature"] = federation.sign_body(raw) + except Exception as e: + _logger.warning("build_migration_export: could not sign: %s", e) + + return payload + + +def verify_migration_signature(data: dict) -> bool: + """Verify Ed25519 signature using source_instance.public_key. Returns False if no signature or invalid.""" + sig = data.get("signature") + source = data.get("source_instance") + if not sig or not source: + return False + public_key = source.get("public_key") + if not public_key: + return False + to_verify = {k: v for k, v in data.items() if k != "signature"} + raw = canonical_json_dumps(to_verify).decode("utf-8") + from federation import core as federation + return federation.verify_body(raw, sig, public_key) + + +def import_migration_data( + data: dict, + workspace_id: int, + group_id: int, + *, + team_id_to_workspace_id: dict[str, int], +) -> None: + """Import migration payload into DB (replace mode). Caller must have resolved federated group and team_id_to_workspace_id on B. + + - Replace mode: soft-delete W's SyncChannels in this group and their PostMeta, then create from export. + - team_id_to_workspace_id: map export team_id -> B's workspace id (for publisher/target and user_mappings). + """ + from datetime import UTC + + syncs_export = data.get("syncs", []) + sync_channels_export = data.get("sync_channels", []) + post_meta_export = data.get("post_meta", {}) + user_directory_export = data.get("user_directory", []) + user_mappings_export = data.get("user_mappings", []) + workspace_export = data.get("workspace", {}) + export_team_id = workspace_export.get("team_id") + + # Replace mode: find syncs in group, then SyncChannels for this workspace in those syncs + syncs_in_group = DbManager.find_records(schemas.Sync, [schemas.Sync.group_id == group_id]) + sync_ids_in_group = [s.id for s in syncs_in_group] + if sync_ids_in_group: + channels_to_remove = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id.in_(sync_ids_in_group), + schemas.SyncChannel.workspace_id == workspace_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + now = datetime.now(UTC) + for sync_channel in channels_to_remove: + DbManager.delete_records( + schemas.PostMeta, + [schemas.PostMeta.sync_channel_id == sync_channel.id], + ) + DbManager.update_records( + schemas.SyncChannel, + [schemas.SyncChannel.id == sync_channel.id], + {schemas.SyncChannel.deleted_at: now}, + ) + + # Build sync title -> sync_id (B) for this group (create or reuse) + title_to_sync = {} + for s in syncs_export: + title = s.get("title") + if not title: + continue + existing = DbManager.find_records( + schemas.Sync, + [schemas.Sync.group_id == group_id, schemas.Sync.title == title], + ) + if existing: + title_to_sync[title] = existing[0].id + else: + pub_team = s.get("publisher_team_id") + tgt_team = s.get("target_team_id") + is_publisher = s.get("is_publisher") + pub_ws_id = (workspace_id if is_publisher else team_id_to_workspace_id.get(pub_team)) if pub_team else None + tgt_ws_id = (workspace_id if tgt_team == export_team_id else team_id_to_workspace_id.get(tgt_team)) if tgt_team else None + new_sync = schemas.Sync( + title=title, + group_id=group_id, + sync_mode=s.get("sync_mode", "group"), + publisher_workspace_id=pub_ws_id, + target_workspace_id=tgt_ws_id, + ) + DbManager.create_record(new_sync) + title_to_sync[title] = new_sync.id + + # Create SyncChannels and PostMeta + for sc_entry in sync_channels_export: + sync_title = sc_entry.get("sync_title") + channel_id = sc_entry.get("channel_id") + status = sc_entry.get("status", "active") + sync_id = title_to_sync.get(sync_title) + if not sync_id: + continue + new_sync_channel = schemas.SyncChannel( + sync_id=sync_id, + workspace_id=workspace_id, + channel_id=channel_id, + status=status, + created_at=datetime.now(UTC), + ) + DbManager.create_record(new_sync_channel) + key = f"{sync_title}:{channel_id}" + for post_meta in post_meta_export.get(key, []): + DbManager.create_record(schemas.PostMeta( + post_id=post_meta["post_id"], + sync_channel_id=new_sync_channel.id, + ts=Decimal(str(post_meta["ts"])), + )) + + # user_directory for W (replace: remove existing for this workspace then insert) + DbManager.delete_records( + schemas.UserDirectory, + [schemas.UserDirectory.workspace_id == workspace_id], + ) + for u in user_directory_export: + DbManager.create_record(schemas.UserDirectory( + workspace_id=workspace_id, + slack_user_id=u["slack_user_id"], + email=u.get("email"), + real_name=u.get("real_name"), + display_name=u.get("display_name"), + normalized_name=u.get("normalized_name"), + updated_at=datetime.fromisoformat(u["updated_at"].replace("Z", "+00:00")) if u.get("updated_at") else datetime.now(UTC), + )) + + # user_mappings where both source and target workspace exist on B + for um in user_mappings_export: + src_team = um.get("source_team_id") + tgt_team = um.get("target_team_id") + src_ws_id = team_id_to_workspace_id.get(src_team) if src_team else None + tgt_ws_id = team_id_to_workspace_id.get(tgt_team) if tgt_team else None + if not src_ws_id or not tgt_ws_id: + continue + existing = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.source_workspace_id == src_ws_id, + schemas.UserMapping.source_user_id == um["source_user_id"], + schemas.UserMapping.target_workspace_id == tgt_ws_id, + ], + ) + if existing: + continue + DbManager.create_record(schemas.UserMapping( + source_workspace_id=src_ws_id, + source_user_id=um["source_user_id"], + target_workspace_id=tgt_ws_id, + target_user_id=um.get("target_user_id"), + match_method=um.get("match_method", "none"), + matched_at=datetime.now(UTC), + group_id=group_id, + )) diff --git a/syncbot/helpers/files.py b/syncbot/helpers/files.py new file mode 100644 index 0000000..87c8931 --- /dev/null +++ b/syncbot/helpers/files.py @@ -0,0 +1,204 @@ +"""File upload/download helpers for message sync.""" + +import contextlib +import logging +import os +import re +import time as _time +import uuid +from logging import Logger + +import requests +from slack_sdk import WebClient + +_logger = logging.getLogger(__name__) + +_DOWNLOAD_TIMEOUT = 30 # seconds +_MAX_FILE_BYTES = 100 * 1024 * 1024 # 100 MB +_STREAM_CHUNK = 8192 + + +def cleanup_temp_files(photos: list[dict] | None, direct_files: list[dict] | None) -> None: + """Remove temporary files created during message sync.""" + for item in photos or []: + path = item.get("path") + if path: + with contextlib.suppress(OSError): + os.remove(path) + for item in direct_files or []: + path = item.get("path") + if path: + with contextlib.suppress(OSError): + os.remove(path) + + +def _safe_file_parts(f: dict) -> tuple[str, str, str]: + """Return ``(safe_id, safe_ext, default_name)`` with path-safe characters only.""" + safe_id = re.sub(r"[^a-zA-Z0-9_-]", "", f.get("id", "file")) + safe_ext = re.sub(r"[^a-zA-Z0-9]", "", f.get("filetype", "bin")) + return safe_id, safe_ext, f"{safe_id}.{safe_ext}" + + +def _download_to_file(url: str, file_path: str, headers: dict | None = None) -> None: + """Stream a URL to disk, aborting if the response exceeds *_MAX_FILE_BYTES*. + + Removes the partial file on any failure so /tmp doesn't fill up. + """ + try: + with requests.get(url, headers=headers, timeout=_DOWNLOAD_TIMEOUT, stream=True) as r: + r.raise_for_status() + written = 0 + with open(file_path, "wb") as fh: + for chunk in r.iter_content(chunk_size=_STREAM_CHUNK): + written += len(chunk) + if written > _MAX_FILE_BYTES: + raise ValueError(f"File exceeds {_MAX_FILE_BYTES} byte limit") + fh.write(chunk) + except Exception: + with contextlib.suppress(OSError): + os.remove(file_path) + raise + + +def download_public_file(url: str, logger: Logger) -> dict | None: + """Download a file from a public URL (e.g. GIPHY) to /tmp.""" + try: + content_type = "image/gif" + file_name = f"attachment_{uuid.uuid4().hex[:8]}.gif" + file_path = f"/tmp/{file_name}" + + with requests.get(url, timeout=_DOWNLOAD_TIMEOUT, stream=True) as r: + r.raise_for_status() + content_type = r.headers.get("content-type", "image/gif").split(";")[0] + ext = content_type.split("/")[-1] if "/" in content_type else "gif" + file_name = f"attachment_{uuid.uuid4().hex[:8]}.{ext}" + file_path = f"/tmp/{file_name}" + written = 0 + with open(file_path, "wb") as fh: + for chunk in r.iter_content(chunk_size=_STREAM_CHUNK): + written += len(chunk) + if written > _MAX_FILE_BYTES: + raise ValueError(f"File exceeds {_MAX_FILE_BYTES} byte limit") + fh.write(chunk) + + return {"path": file_path, "name": file_name, "mimetype": content_type} + except Exception as e: + logger.warning(f"download_public_file: failed for {url}: {e}") + return None + + +def download_slack_files( + files: list[dict], client: WebClient, logger: Logger +) -> list[dict]: + """Download files from Slack to /tmp for direct re-upload.""" + downloaded: list[dict] = [] + auth_headers = {"Authorization": f"Bearer {client.token}"} + + for f in files: + try: + url = f.get("url_private") + if not url: + continue + + safe_id, safe_ext, default_name = _safe_file_parts(f) + file_name = f.get("name") or default_name + file_path = f"/tmp/{safe_id}.{safe_ext}" + + _download_to_file(url, file_path, headers=auth_headers) + + downloaded.append({ + "path": file_path, + "name": file_name, + "mimetype": f.get("mimetype", "application/octet-stream"), + }) + except Exception as e: + logger.error(f"download_slack_files: failed for {f.get('id')}: {e}") + return downloaded + + +def upload_files_to_slack( + bot_token: str, + channel_id: str, + files: list[dict], + initial_comment: str | None = None, + thread_ts: str | None = None, +) -> tuple[dict | None, str | None]: + """Upload one or more local files directly to a Slack channel.""" + if not files: + return None, None + + slack_client = WebClient(bot_token) + file_uploads = [] + for f in files: + file_uploads.append({ + "file": f["path"], + "filename": f["name"], + }) + + kwargs: dict = {"channel": channel_id} + if initial_comment: + kwargs["initial_comment"] = initial_comment + if thread_ts: + kwargs["thread_ts"] = thread_ts + + try: + if len(file_uploads) == 1: + kwargs["file"] = file_uploads[0]["file"] + kwargs["filename"] = file_uploads[0]["filename"] + res = slack_client.files_upload_v2(**kwargs) + else: + kwargs["file_uploads"] = file_uploads + res = slack_client.files_upload_v2(**kwargs) + + msg_ts = _extract_file_message_ts(slack_client, res, channel_id, thread_ts=thread_ts) + return res, msg_ts + except Exception as e: + _logger.warning(f"upload_files_to_slack: failed for channel {channel_id}: {e}") + return None, None + + +def _extract_file_message_ts( + client: WebClient, upload_response, channel_id: str, + thread_ts: str | None = None, +) -> str | None: + """Extract the message ts created by a file upload.""" + if not upload_response: + return None + + file_id = None + with contextlib.suppress(KeyError, TypeError, IndexError): + file_id = upload_response["file"]["id"] + + if not file_id: + try: + files_list = upload_response["files"] + if files_list and len(files_list) > 0: + file_id = files_list[0]["id"] if isinstance(files_list[0], dict) else files_list[0].get("id") + except (KeyError, TypeError, IndexError): + pass + + if not file_id: + _logger.warning("_extract_file_message_ts: could not find file_id in upload response") + return None + + for attempt in range(4): + try: + info_resp = client.files_info(file=file_id) + shares = info_resp["file"]["shares"] + for share_type in ("public", "private"): + channel_shares = shares.get(share_type, {}).get(channel_id, []) + if channel_shares: + ts = channel_shares[0].get("ts") + _logger.info("_extract_file_message_ts: success", + extra={"file_id": file_id, "ts": ts, "attempt": attempt}) + return ts + except (KeyError, TypeError, IndexError): + pass + except Exception as e: + _logger.warning(f"_extract_file_message_ts: files.info error (attempt {attempt}): {e}") + + if attempt < 3: + _time.sleep(1.5) + + _logger.warning(f"_extract_file_message_ts: could not resolve ts for file {file_id} after retries") + return None diff --git a/syncbot/helpers/notifications.py b/syncbot/helpers/notifications.py new file mode 100644 index 0000000..4dc472c --- /dev/null +++ b/syncbot/helpers/notifications.py @@ -0,0 +1,226 @@ +"""Admin DM notifications and channel notifications.""" + +import logging +from datetime import UTC, datetime + +from slack_sdk import WebClient +from sqlalchemy.exc import ProgrammingError + +import constants +from db import DbManager, schemas +from helpers._cache import _cache_get, _cache_set +from helpers.core import safe_get +from helpers.encryption import decrypt_bot_token + +_logger = logging.getLogger(__name__) + + +def get_admin_ids( + client: WebClient, + *, + team_id: str | None = None, + context: dict | None = None, +) -> list[str]: + """Return a list of admin/owner user IDs for the workspace behind *client*. + + If *context* and *team_id* are provided, uses request-scoped cache to avoid + repeated users.list for the same workspace within one request. + """ + if context is not None and team_id: + cache = context.setdefault("_admin_ids", {}) + if team_id in cache: + return cache[team_id] + + from helpers.user_matching import _users_list_page + + cursor = "" + admin_ids: list[str] = [] + + while True: + try: + res = _users_list_page(client, cursor=cursor) + except Exception as e: + _logger.warning(f"get_admin_ids: failed to list users: {e}") + break + + members = safe_get(res, "members") or [] + for member in members: + if member.get("is_bot") or member.get("id") == "USLACKBOT": + continue + if member.get("deleted"): + continue + if member.get("is_admin") or member.get("is_owner"): + admin_ids.append(member["id"]) + + next_cursor = safe_get(res, "response_metadata", "next_cursor") + if not next_cursor: + break + cursor = next_cursor + + if context is not None and team_id: + context.setdefault("_admin_ids", {})[team_id] = admin_ids + return admin_ids + + +def notify_admins_dm( + client: WebClient, + message: str, + exclude_user_ids: set[str] | None = None, + blocks: list[dict] | None = None, +) -> int: + """Send a DM to all workspace admins/owners. Best-effort. + + Returns the number of admins successfully notified. + """ + notified = 0 + kwargs: dict = {"text": message} + if blocks: + kwargs["blocks"] = blocks + for user_id in get_admin_ids(client): + if exclude_user_ids and user_id in exclude_user_ids: + continue + try: + dm = client.conversations_open(users=[user_id]) + channel_id = safe_get(dm, "channel", "id") + if channel_id: + client.chat_postMessage(channel=channel_id, **kwargs) + notified += 1 + except Exception as e: + _logger.warning(f"notify_admins_dm: failed to DM user {user_id}: {e}") + + return notified + + +def notify_admins_dm_blocks( + client: WebClient, + text: str, + blocks: list[dict], +) -> list[dict]: + """Send a Block Kit DM to all workspace admins/owners. + + Returns a list of ``{"channel": ..., "ts": ...}`` dicts for each + successfully sent DM (used for later message updates). + """ + sent: list[dict] = [] + for user_id in get_admin_ids(client): + try: + dm = client.conversations_open(users=[user_id]) + channel_id = safe_get(dm, "channel", "id") + if channel_id: + res = client.chat_postMessage(channel=channel_id, text=text, blocks=blocks) + msg_ts = safe_get(res, "ts") + if msg_ts: + sent.append({"channel": channel_id, "ts": msg_ts}) + except Exception as e: + _logger.warning(f"notify_admins_dm_blocks: failed to DM user {user_id}: {e}") + + return sent + + +def save_dm_messages_to_group_member(member_id: int, dm_entries: list[dict]) -> None: + """Persist DM channel/ts metadata on a group member record for later updates.""" + import json as _json + + if not dm_entries: + return + existing = DbManager.get_record(schemas.WorkspaceGroupMember, id=member_id) + if not existing: + return + try: + prev = _json.loads(existing.dm_messages) if existing.dm_messages else [] + except (ValueError, TypeError): + prev = [] + prev.extend(dm_entries) + DbManager.update_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == member_id], + {schemas.WorkspaceGroupMember.dm_messages: _json.dumps(prev)}, + ) + + +def notify_synced_channels(client: WebClient, channel_ids: list[str], message: str) -> int: + """Post a message to a list of channels. Best-effort.""" + notified = 0 + for channel_id in channel_ids: + try: + client.chat_postMessage(channel=channel_id, text=message) + notified += 1 + except Exception as e: + _logger.warning(f"notify_synced_channels: failed to post to {channel_id}: {e}") + return notified + + +def purge_stale_soft_deletes() -> int: + """Permanently delete workspaces that have been soft-deleted beyond the retention period. + + Returns 0 without raising if the schema is missing (e.g. fresh DB before Alembic bootstrap). + """ + from helpers.workspace import get_workspace_by_id + + cache_key = "purge_check" + if _cache_get(cache_key): + return 0 + _cache_set(cache_key, True, ttl=86400) + + retention_days = constants.SOFT_DELETE_RETENTION_DAYS + cutoff = datetime.now(UTC) - __import__("datetime").timedelta(days=retention_days) + + try: + stale_workspaces = DbManager.find_records( + schemas.Workspace, + [ + schemas.Workspace.deleted_at.isnot(None), + schemas.Workspace.deleted_at < cutoff, + ], + ) + except ProgrammingError as e: + _logger.debug("purge_stale_soft_deletes: schema not ready (%s), skipping", e.orig if hasattr(e, "orig") else e) + return 0 + + if not stale_workspaces: + return 0 + + purged = 0 + for ws in stale_workspaces: + ws_name = ws.workspace_name or ws.team_id or f"Workspace {ws.id}" + + group_memberships = DbManager.find_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.workspace_id == ws.id], + ) + + notified_ws: set[int] = set() + for membership in group_memberships: + other_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == membership.group_id, + schemas.WorkspaceGroupMember.workspace_id != ws.id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + for member in other_members: + if not member.workspace_id or member.workspace_id in notified_ws: + continue + member_ws = get_workspace_by_id(member.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at is not None: + continue + notified_ws.add(member.workspace_id) + try: + member_client = WebClient(token=decrypt_bot_token(member_ws.bot_token)) + notify_admins_dm( + member_client, + f":wastebasket: *{ws_name}* has been permanently removed " + f"after {retention_days} days of inactivity.", + ) + except Exception as e: + _logger.warning(f"purge: failed to notify member {member.workspace_id}: {e}") + + DbManager.delete_records(schemas.Workspace, [schemas.Workspace.id == ws.id]) + purged += 1 + + if purged: + _logger.info("purge_stale_soft_deletes_complete", extra={"purged": purged}) + + return purged diff --git a/syncbot/helpers/oauth.py b/syncbot/helpers/oauth.py new file mode 100644 index 0000000..9ecedc3 --- /dev/null +++ b/syncbot/helpers/oauth.py @@ -0,0 +1,68 @@ +"""Slack OAuth flow construction. + +Bot scopes: :envvar:`SLACK_BOT_SCOPES` (``slack_manifest_scopes.BOT_SCOPES`` / manifest bot). +User scopes: :envvar:`SLACK_USER_SCOPES` (defaults to ``USER_SCOPES`` when unset). +Requesting user scopes that do not match the Slack app manifest causes ``invalid_scope`` on install. +""" + +import logging +import os + +from slack_bolt.oauth import OAuthFlow +from slack_bolt.oauth.oauth_settings import OAuthSettings +from slack_sdk.oauth.installation_store.sqlalchemy import SQLAlchemyInstallationStore +from slack_sdk.oauth.state_store.sqlalchemy import SQLAlchemyOAuthStateStore + +import constants +from slack_manifest_scopes import USER_SCOPES + +_logger = logging.getLogger(__name__) + +_OAUTH_STATE_EXPIRATION_SECONDS = 600 + + +def get_oauth_flow(): + """Build the Slack OAuth flow using SQLAlchemy-backed stores. + + Uses the same database engine as the rest of the app. Works for both + local development and production (Lambda). If OAuth credentials are not + set and LOCAL_DEVELOPMENT is true, returns None (single-workspace mode). + """ + client_id = os.environ.get(constants.SLACK_CLIENT_ID, "").strip() + client_secret = os.environ.get(constants.SLACK_CLIENT_SECRET, "").strip() + scopes_raw = os.environ.get(constants.SLACK_BOT_SCOPES, "").strip() + user_scopes_raw = os.environ.get(constants.SLACK_USER_SCOPES, "").strip() + + if constants.LOCAL_DEVELOPMENT and not (client_id and client_secret and scopes_raw): + _logger.info("OAuth credentials not set — running in single-workspace mode") + return None + + from db import get_engine + + engine = get_engine() + installation_store = SQLAlchemyInstallationStore( + client_id=client_id, + engine=engine, + ) + state_store = SQLAlchemyOAuthStateStore( + expiration_seconds=_OAUTH_STATE_EXPIRATION_SECONDS, + engine=engine, + ) + + bot_scopes = [s.strip() for s in scopes_raw.split(",") if s.strip()] + user_scopes = ( + [s.strip() for s in user_scopes_raw.split(",") if s.strip()] + if user_scopes_raw + else list(USER_SCOPES) + ) + + return OAuthFlow( + settings=OAuthSettings( + client_id=client_id, + client_secret=client_secret, + scopes=bot_scopes, + user_scopes=user_scopes, + installation_store=installation_store, + state_store=state_store, + ), + ) diff --git a/syncbot/helpers/refresh.py b/syncbot/helpers/refresh.py new file mode 100644 index 0000000..f500c69 --- /dev/null +++ b/syncbot/helpers/refresh.py @@ -0,0 +1,97 @@ +"""Shared helpers for Refresh-button flows (Home tab and User Mapping). + +Provides a single place for cooldown message text, block injection, +and the hash/cache/cooldown check so both handlers stay DRY. +""" + +import time +from typing import Literal + +import constants +from helpers._cache import _cache_get, _cache_set + +_REFRESH_COOLDOWN_SECONDS = getattr(constants, "REFRESH_COOLDOWN_SECONDS", 60) + + +def cooldown_message_block(remaining_seconds: int) -> dict: + """Return a Block Kit context block dict for the refresh cooldown message.""" + text = ( + f"No new data. Wait {remaining_seconds} second{'s' if remaining_seconds != 1 else ''} " + "before refreshing again." + ) + return { + "type": "context", + "elements": [{"type": "mrkdwn", "text": text}], + } + + +def index_of_block_with_action(block_dicts: list, action_id: str) -> int: + """Return the index of the first block that contains an element with the given action_id. + + Used to find the Refresh button block so the cooldown message can be inserted after it. + Returns len(block_dicts) - 1 if not found (inject at end). + """ + for i, block in enumerate(block_dicts): + if block.get("type") == "actions": + for elt in block.get("elements") or []: + if elt.get("action_id") == action_id: + return i + return max(0, len(block_dicts) - 1) + + +def inject_cooldown_message( + cached_blocks: list, + after_block_index: int, + remaining_seconds: int, +) -> list: + """Insert the cooldown message block after the given block index. Does not mutate cached_blocks.""" + msg_block = cooldown_message_block(remaining_seconds) + i = after_block_index + 1 + return cached_blocks[:i] + [msg_block] + cached_blocks[i:] + + +def refresh_cooldown_check( + current_hash: str, + hash_key: str, + blocks_key: str, + refresh_at_key: str, + cooldown_seconds: int | None = None, +) -> tuple[Literal["cooldown", "cached", "full"], list | None, int | None]: + """Check whether we can short-circuit based on hash and cooldown. + + Returns: + ("cooldown", cached_blocks, remaining_seconds) when hash matches and within cooldown. + ("cached", cached_blocks, None) when hash matches and past cooldown. + ("full", None, None) when hash differs or no cached blocks. + """ + cooldown_sec = cooldown_seconds if cooldown_seconds is not None else _REFRESH_COOLDOWN_SECONDS + + cached_hash = _cache_get(hash_key) + cached_blocks = _cache_get(blocks_key) + last_refresh_at = _cache_get(refresh_at_key) + now = time.monotonic() + + if current_hash != cached_hash or cached_blocks is None: + return ("full", None, None) + + if last_refresh_at is not None and (now - last_refresh_at) < cooldown_sec: + remaining = max(0, int(cooldown_sec - (now - last_refresh_at))) + return ("cooldown", cached_blocks, remaining) + + return ("cached", cached_blocks, None) + + +def refresh_after_full( + hash_key: str, + blocks_key: str, + refresh_at_key: str, + current_hash: str, + block_dicts: list, + cooldown_seconds: int | None = None, +) -> None: + """Store hash, blocks, and refresh timestamp after a full refresh.""" + cooldown_sec = cooldown_seconds if cooldown_seconds is not None else _REFRESH_COOLDOWN_SECONDS + + _cache_set(hash_key, current_hash, ttl=3600) + _cache_set(blocks_key, block_dicts, ttl=3600) + _cache_set(refresh_at_key, time.monotonic(), ttl=cooldown_sec * 2) diff --git a/syncbot/helpers/slack_api.py b/syncbot/helpers/slack_api.py new file mode 100644 index 0000000..072e5df --- /dev/null +++ b/syncbot/helpers/slack_api.py @@ -0,0 +1,230 @@ +"""Slack API wrappers with automatic retry and rate-limit handling.""" + +import json +import logging +import time as _time +from functools import wraps + +from slack_sdk import WebClient +from slack_sdk.errors import SlackApiError + +from db import DbManager, schemas +from helpers._cache import _USER_INFO_CACHE_TTL, _cache_get, _cache_set +from helpers.core import safe_get + +_logger = logging.getLogger(__name__) + +_SLACK_MAX_RETRIES = 3 +_SLACK_INITIAL_BACKOFF = 1.0 # seconds + + +def slack_retry(fn): + """Decorator that retries Slack API calls on rate-limit and server errors.""" + + @wraps(fn) + def wrapper(*args, **kwargs): + last_exc: Exception | None = None + backoff = _SLACK_INITIAL_BACKOFF + + for attempt in range(_SLACK_MAX_RETRIES + 1): + try: + return fn(*args, **kwargs) + except SlackApiError as exc: + last_exc = exc + status = exc.response.status_code if exc.response else 0 + + if status == 429: + retry_after = float(exc.response.headers.get("Retry-After", backoff)) + _logger.warning(f"{fn.__name__} rate-limited (attempt {attempt + 1}), sleeping {retry_after:.1f}s") + _time.sleep(retry_after) + backoff = min(backoff * 2, 30) + elif 500 <= status < 600: + _logger.warning( + f"{fn.__name__} server error {status} (attempt {attempt + 1}), retrying in {backoff:.1f}s" + ) + _time.sleep(backoff) + backoff = min(backoff * 2, 30) + else: + raise + raise last_exc + + return wrapper + + +@slack_retry +def _users_info(client: WebClient, user_id: str) -> dict: + """Low-level wrapper so the retry decorator can catch SlackApiError.""" + return client.users_info(user=user_id) + + +def _get_auth_info(client: WebClient) -> dict | None: + """Call ``auth.test`` once and cache both bot_id and user_id.""" + cache_key = "own_auth_info" + cached = _cache_get(cache_key) + if cached is not None: + return cached + try: + res = client.auth_test() + info = {"bot_id": safe_get(res, "bot_id"), "user_id": safe_get(res, "user_id")} + _cache_set(cache_key, info, ttl=3600) + return info + except Exception: + _logger.warning("Could not determine own identity via auth.test") + return None + + +def get_own_bot_id(client: WebClient, context: dict) -> str | None: + """Return SyncBot's own ``bot_id`` for the current workspace.""" + bot_id = context.get("bot_id") + if bot_id: + return bot_id + info = _get_auth_info(client) + return info["bot_id"] if info else None + + +def get_own_bot_user_id(client: WebClient) -> str | None: + """Return SyncBot's own *user* ID (``U…``) for the current workspace.""" + info = _get_auth_info(client) + return info["user_id"] if info else None + + +def get_bot_info_from_event(body: dict) -> tuple[str | None, str | None]: + """Extract display name and icon URL from a bot_message event.""" + event = body.get("event", {}) + bot_name = event.get("username") or "Bot" + icons = event.get("icons") or {} + icon_url = icons.get("image_48") or icons.get("image_36") or icons.get("image_72") + return bot_name, icon_url + + +def get_user_info(client: WebClient, user_id: str) -> tuple[str | None, str | None]: + """Return (display_name, profile_image_url) for a Slack user.""" + cache_key = f"user_info:{user_id}" + cached = _cache_get(cache_key) + if cached is not None: + return cached + + try: + res = _users_info(client, user_id) + except SlackApiError as exc: + _logger.debug(f"get_user_info: failed to look up user {user_id}: {exc}") + return None, None + + user_name = ( + safe_get(res, "user", "profile", "display_name") or safe_get(res, "user", "profile", "real_name") or None + ) + user_profile_url = safe_get(res, "user", "profile", "image_192") + + result = (user_name, user_profile_url) + _cache_set(cache_key, result, ttl=_USER_INFO_CACHE_TTL) + return result + + +@slack_retry +def post_message( + bot_token: str, + channel_id: str, + msg_text: str, + user_name: str | None = None, + user_profile_url: str | None = None, + thread_ts: str | None = None, + update_ts: str | None = None, + workspace_name: str | None = None, + blocks: list[dict] | None = None, +) -> dict: + """Post or update a message in a Slack channel.""" + slack_client = WebClient(bot_token) + posted_from = f"({workspace_name})" if workspace_name else "(via SyncBot)" + if blocks: + if msg_text.strip(): + msg_block = {"type": "section", "text": {"type": "mrkdwn", "text": msg_text}} + all_blocks = [msg_block] + blocks + else: + all_blocks = blocks + else: + all_blocks = [] + fallback_text = msg_text if msg_text.strip() else "Shared an image" + if update_ts: + res = slack_client.chat_update( + channel=channel_id, + text=fallback_text, + ts=update_ts, + blocks=all_blocks, + ) + else: + res = slack_client.chat_postMessage( + channel=channel_id, + text=fallback_text, + username=f"{user_name} {posted_from}", + icon_url=user_profile_url, + thread_ts=thread_ts, + blocks=all_blocks, + ) + return res + + +def get_post_records(thread_ts: str) -> list[tuple[schemas.PostMeta, schemas.SyncChannel, schemas.Workspace]]: + """Look up all PostMeta records that share the same ``post_id``.""" + post = DbManager.find_records(schemas.PostMeta, [schemas.PostMeta.ts == float(thread_ts)]) + if post: + post_records = DbManager.find_join_records3( + left_cls=schemas.PostMeta, + right_cls1=schemas.SyncChannel, + right_cls2=schemas.Workspace, + filters=[ + schemas.PostMeta.post_id == post[0].post_id, + schemas.SyncChannel.status == "active", + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + else: + post_records = [] + + post_records.sort(key=lambda row: row[0].id) + + seen: set[tuple[int, str]] = set() + deduped: list[tuple[schemas.PostMeta, schemas.SyncChannel, schemas.Workspace]] = [] + for pm, sc, ws in post_records: + key = (ws.id, sc.channel_id) + if key not in seen: + seen.add(key) + deduped.append((pm, sc, ws)) + return deduped + + +@slack_retry +def delete_message(bot_token: str, channel_id: str, ts: str) -> dict: + """Delete a message from a Slack channel.""" + slack_client = WebClient(bot_token) + res = slack_client.chat_delete( + channel=channel_id, + ts=ts, + ) + return res + + +def update_modal( + blocks: list[dict], + client: WebClient, + view_id: str, + title_text: str, + callback_id: str, + submit_button_text: str = "Submit", + parent_metadata: dict | None = None, + close_button_text: str = "Close", + notify_on_close: bool = False, +) -> None: + """Replace the contents of an existing Slack modal.""" + view = { + "type": "modal", + "callback_id": callback_id, + "title": {"type": "plain_text", "text": title_text}, + "submit": {"type": "plain_text", "text": submit_button_text}, + "close": {"type": "plain_text", "text": close_button_text}, + "notify_on_close": notify_on_close, + "blocks": blocks, + } + if parent_metadata: + view["private_metadata"] = json.dumps(parent_metadata) + + client.views_update(view_id=view_id, view=view) diff --git a/syncbot/helpers/user_matching.py b/syncbot/helpers/user_matching.py new file mode 100644 index 0000000..e6661a6 --- /dev/null +++ b/syncbot/helpers/user_matching.py @@ -0,0 +1,725 @@ +"""Cross-workspace user matching and mention resolution.""" + +import logging +import re +from datetime import UTC, datetime +from typing import Any + +from slack_sdk import WebClient +from slack_sdk.errors import SlackApiError + +import constants +from db import DbManager, schemas +from helpers._cache import _CACHE, _USER_INFO_CACHE_TTL, _cache_get, _cache_set +from helpers.core import safe_get +from helpers.encryption import decrypt_bot_token +from helpers.slack_api import _users_info, get_user_info, slack_retry +from helpers.workspace import ( + get_workspace_by_id, + resolve_workspace_name, +) + +_logger = logging.getLogger(__name__) + + +def _get_user_profile(client: WebClient, user_id: str) -> dict[str, Any] | None: + """Fetch a single user's profile with caching and retry.""" + cache_key = f"user_profile:{user_id}" + cached = _cache_get(cache_key) + if cached is not None: + return cached + + try: + res = _users_info(client, user_id) + except SlackApiError as exc: + _logger.warning(f"Failed to look up user {user_id}: {exc}") + return None + + profile = safe_get(res, "user", "profile") or {} + user_name = profile.get("display_name") or profile.get("real_name") or user_id + email = profile.get("email") + + result: dict[str, Any] = {"user_name": user_name, "email": email} + _cache_set(cache_key, result, ttl=_USER_INFO_CACHE_TTL) + return result + + +def _normalize_name(display_name: str) -> str: + """Trim trailing title/qualifier from a display name (e.g. drop text in parens or after dash).""" + name = re.split(r"\s+[\(\-]", display_name or "")[0] + return name.strip() + + +def normalize_display_name(name: str | None) -> str: + """Return display name with trailing paren/dash qualifiers stripped; fallback to original if empty.""" + if not name: + return name or "" + n = _normalize_name(name) + return n if n else name + + +def _match_ttl(method: str) -> int: + """Return the TTL in seconds for a given match method.""" + if method == "manual": + return 0 + if method == "email": + return constants.MATCH_TTL_EMAIL + if method == "name": + return constants.MATCH_TTL_NAME + return constants.MATCH_TTL_NONE + + +def _is_mapping_fresh(mapping: schemas.UserMapping) -> bool: + """Return True if a cached mapping is still within its TTL.""" + if mapping.match_method == "manual": + return True + ttl = _match_ttl(mapping.match_method) + age = (datetime.now(UTC) - mapping.matched_at.replace(tzinfo=UTC)).total_seconds() + return age < ttl + + +@slack_retry +def _users_list_page(client: WebClient, cursor: str = "") -> dict: + """Fetch one page of users.list (with retry on rate-limit).""" + return client.users_list(limit=200, cursor=cursor) + + +def _refresh_user_directory(client: WebClient, workspace_id: int) -> None: + """Crawl users.list for a workspace and upsert into user_directory. + + Active users are upserted normally. Deactivated users + (``member["deleted"] == True``) are soft-deleted via + ``_upsert_single_user_to_directory``. Users that were previously + in the directory but no longer appear in ``users.list`` at all are + hard-deleted along with their mappings. + """ + cache_key = f"dir_refresh:{workspace_id}" + if _cache_get(cache_key): + return + + _logger.info("user_directory_refresh_start", extra={"workspace_id": workspace_id}) + cursor = "" + count = 0 + seen_user_ids: set[str] = set() + + while True: + res = _users_list_page(client, cursor=cursor) + members = safe_get(res, "members") or [] + + for member in members: + if member.get("is_bot") or member.get("id") == "USLACKBOT": + continue + seen_user_ids.add(member["id"]) + _upsert_single_user_to_directory(member, workspace_id) + count += 1 + + cursor = safe_get(res, "response_metadata", "next_cursor") or "" + if not cursor: + break + + if seen_user_ids: + all_entries = DbManager.find_records( + schemas.UserDirectory, + [schemas.UserDirectory.workspace_id == workspace_id], + ) + for entry in all_entries: + if entry.slack_user_id not in seen_user_ids: + _purge_mappings_for_user(entry.slack_user_id, workspace_id) + DbManager.delete_records( + schemas.UserDirectory, + [schemas.UserDirectory.id == entry.id], + ) + + _logger.info("user_directory_refresh_done", extra={"workspace_id": workspace_id, "count": count}) + _cache_set(cache_key, True, ttl=constants.USER_DIR_REFRESH_TTL) + + +def _upsert_single_user_to_directory(member: dict, workspace_id: int) -> None: + """Insert or update a single user in the directory and propagate name changes. + + If the user is deactivated (``member["deleted"] == True``), their + directory entry is soft-deleted and all associated user mappings are + removed. + """ + profile = member.get("profile", {}) + display_name = profile.get("display_name") or "" + real_name = profile.get("real_name") or "" + email = profile.get("email") + now = datetime.now(UTC) + current_name = display_name or real_name + is_deleted = member.get("deleted", False) + + existing = DbManager.find_records( + schemas.UserDirectory, + [ + schemas.UserDirectory.workspace_id == workspace_id, + schemas.UserDirectory.slack_user_id == member["id"], + ], + ) + + if is_deleted: + if existing: + DbManager.update_records( + schemas.UserDirectory, + [schemas.UserDirectory.id == existing[0].id], + {schemas.UserDirectory.deleted_at: now, schemas.UserDirectory.updated_at: now}, + ) + _purge_mappings_for_user(member["id"], workspace_id) + _CACHE.pop(f"user_info:{member['id']}", None) + return + + if existing: + DbManager.update_records( + schemas.UserDirectory, + [schemas.UserDirectory.id == existing[0].id], + { + schemas.UserDirectory.email: email, + schemas.UserDirectory.real_name: real_name, + schemas.UserDirectory.display_name: display_name, + schemas.UserDirectory.normalized_name: _normalize_name(display_name) + if display_name + else _normalize_name(real_name), + schemas.UserDirectory.updated_at: now, + schemas.UserDirectory.deleted_at: None, + }, + ) + else: + DbManager.create_record( + schemas.UserDirectory( + workspace_id=workspace_id, + slack_user_id=member["id"], + email=email, + real_name=real_name, + display_name=display_name, + normalized_name=_normalize_name(display_name) if display_name else _normalize_name(real_name), + updated_at=now, + ) + ) + + if current_name: + mappings = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.source_workspace_id == workspace_id, + schemas.UserMapping.source_user_id == member["id"], + ], + ) + for m in mappings: + if m.source_display_name != current_name: + DbManager.update_records( + schemas.UserMapping, + [schemas.UserMapping.id == m.id], + {schemas.UserMapping.source_display_name: current_name}, + ) + + _CACHE.pop(f"user_info:{member['id']}", None) + + +def _purge_mappings_for_user(slack_user_id: str, workspace_id: int) -> None: + """Hard-delete all user mappings where this user is source or target.""" + DbManager.delete_records( + schemas.UserMapping, + [ + schemas.UserMapping.source_workspace_id == workspace_id, + schemas.UserMapping.source_user_id == slack_user_id, + ], + ) + DbManager.delete_records( + schemas.UserMapping, + [ + schemas.UserMapping.target_workspace_id == workspace_id, + schemas.UserMapping.target_user_id == slack_user_id, + ], + ) + + +@slack_retry +def _lookup_user_by_email(client: WebClient, email: str) -> str | None: + """Resolve a user ID from an email address in the target workspace.""" + res = client.users_lookupByEmail(email=email) + return safe_get(res, "user", "id") + + +def _find_user_match( + source_user_id: str, + source_profile: dict[str, Any], + target_client: WebClient, + target_workspace_id: int, +) -> tuple[str | None, str]: + """Run the matching algorithm for one source user against one target workspace.""" + email = source_profile.get("email") + + if email: + try: + target_uid = _lookup_user_by_email(target_client, email) + if target_uid: + return target_uid, "email" + except SlackApiError as exc: + _logger.debug(f"match_user: email lookup failed for {email}: {exc}") + + _refresh_user_directory(target_client, target_workspace_id) + + source_real = source_profile.get("real_name", "") + source_display = source_profile.get("display_name", "") + source_normalized = _normalize_name(source_display) if source_display else _normalize_name(source_real) + + if not source_normalized: + return None, "none" + + candidates = DbManager.find_records( + schemas.UserDirectory, + [ + schemas.UserDirectory.workspace_id == target_workspace_id, + schemas.UserDirectory.deleted_at.is_(None), + ], + ) + + name_matches = [ + c + for c in candidates + if c.normalized_name + and c.normalized_name.lower() == source_normalized.lower() + and c.real_name + and source_real + and c.real_name.lower() == source_real.lower() + ] + if len(name_matches) == 1: + return name_matches[0].slack_user_id, "name" + + if source_real: + real_only = [c for c in candidates if c.real_name and c.real_name.lower() == source_real.lower()] + if len(real_only) == 1: + return real_only[0].slack_user_id, "name" + + return None, "none" + + +def _get_source_profile_full(client: WebClient, user_id: str) -> dict[str, Any] | None: + """Fetch full profile fields needed for matching.""" + cache_key = f"user_profile_full:{user_id}" + cached = _cache_get(cache_key) + if cached is not None: + return cached + + try: + res = _users_info(client, user_id) + except SlackApiError as exc: + _logger.warning(f"Failed to look up user {user_id}: {exc}") + return None + + profile = safe_get(res, "user", "profile") or {} + result: dict[str, Any] = { + "display_name": profile.get("display_name") or "", + "real_name": profile.get("real_name") or "", + "email": profile.get("email"), + } + _cache_set(cache_key, result, ttl=_USER_INFO_CACHE_TTL) + return result + + +def get_mapped_target_user_id( + source_user_id: str, + source_workspace_id: int, + target_workspace_id: int, +) -> str | None: + """Return the mapped target user ID, or *None* if unmapped.""" + mappings = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.source_workspace_id == source_workspace_id, + schemas.UserMapping.source_user_id == source_user_id, + schemas.UserMapping.target_workspace_id == target_workspace_id, + schemas.UserMapping.target_user_id.isnot(None), + schemas.UserMapping.match_method != "none", + ], + ) + return mappings[0].target_user_id if mappings else None + + +def get_display_name_and_icon_for_synced_message( + source_user_id: str, + source_workspace_id: int, + source_display_name: str | None, + source_icon_url: str | None, + target_client: WebClient, + target_workspace_id: int, +) -> tuple[str | None, str | None]: + """Return (display_name, icon_url) to use when syncing a message into the target workspace. + + If the source user is mapped to a user in the target workspace, returns that + local user's display name and profile image so the synced message appears + under the name familiar to users in the target workspace. Otherwise + returns the source display name and icon. Display names are normalized + (text in parens or after a dash at the end is dropped); the app then + appends the remote workspace name in parens when posting. + """ + mapped_id = get_mapped_target_user_id(source_user_id, source_workspace_id, target_workspace_id) + if mapped_id: + local_name, local_icon = get_user_info(target_client, mapped_id) + if local_name: + return normalize_display_name(local_name), local_icon or source_icon_url + return normalize_display_name(source_display_name), source_icon_url + + +def resolve_mention_for_workspace( + source_client: WebClient, + source_user_id: str, + source_workspace_id: int, + target_client: WebClient, + target_workspace_id: int, +) -> str: + """Resolve a single @mention from source workspace to target workspace.""" + source_ws = get_workspace_by_id(source_workspace_id) + source_ws_name = resolve_workspace_name(source_ws) if source_ws else None + + def _unmapped_label(name: str) -> str: + if source_ws_name: + return f"`[@{name} ({source_ws_name})]`" + return f"`[@{name}]`" + + mappings = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.source_workspace_id == source_workspace_id, + schemas.UserMapping.source_user_id == source_user_id, + schemas.UserMapping.target_workspace_id == target_workspace_id, + ], + ) + + if mappings and _is_mapping_fresh(mappings[0]): + mapping = mappings[0] + if mapping.target_user_id: + return f"<@{mapping.target_user_id}>" + return _unmapped_label(mapping.source_display_name or source_user_id) + + source_profile = _get_source_profile_full(source_client, source_user_id) + if not source_profile: + return _unmapped_label(source_user_id) + + target_uid, method = _find_user_match(source_user_id, source_profile, target_client, target_workspace_id) + + display = source_profile.get("display_name") or source_profile.get("real_name") or source_user_id + now = datetime.now(UTC) + + if mappings: + DbManager.update_records( + schemas.UserMapping, + [schemas.UserMapping.id == mappings[0].id], + { + schemas.UserMapping.target_user_id: target_uid, + schemas.UserMapping.match_method: method, + schemas.UserMapping.source_display_name: display, + schemas.UserMapping.matched_at: now, + }, + ) + else: + DbManager.create_record( + schemas.UserMapping( + source_workspace_id=source_workspace_id, + source_user_id=source_user_id, + target_workspace_id=target_workspace_id, + target_user_id=target_uid, + match_method=method, + source_display_name=display, + matched_at=now, + group_id=None, + ) + ) + + if target_uid: + return f"<@{target_uid}>" + return _unmapped_label(display) + + +_MAX_MENTIONS = 50 + + +def parse_mentioned_users(msg_text: str, client: WebClient) -> list[dict[str, Any]]: + """Extract mentioned user IDs from a message and resolve their profiles.""" + user_ids = re.findall(r"<@(\w+)>", msg_text or "")[:_MAX_MENTIONS] + if not user_ids: + return [] + + results: list[dict[str, Any]] = [] + for uid in user_ids: + profile = _get_user_profile(client, uid) + if profile: + results.append({"user_id": uid, **profile}) + else: + results.append({"user_id": uid, "user_name": uid, "email": None}) + return results + + +def apply_mentioned_users( + msg_text: str, + source_client: WebClient, + target_client: WebClient, + mentioned_user_info: list[dict[str, Any]], + source_workspace_id: int, + target_workspace_id: int, +) -> str: + """Re-map @mentions from the source workspace to the target workspace.""" + msg_text = msg_text or "" + if not mentioned_user_info: + return msg_text + + replace_list: list[str] = [] + for user_info in mentioned_user_info: + uid = user_info.get("user_id", "") + try: + resolved = resolve_mention_for_workspace( + source_client=source_client, + source_user_id=uid, + source_workspace_id=source_workspace_id, + target_client=target_client, + target_workspace_id=target_workspace_id, + ) + replace_list.append(resolved) + except Exception as exc: + _logger.error(f"Failed to resolve mention for user {uid}: {exc}") + fallback = user_info.get("user_name") or uid + source_ws = get_workspace_by_id(source_workspace_id) if source_workspace_id else None + ws_label = resolve_workspace_name(source_ws) if source_ws else None + if ws_label: + replace_list.append(f"`[@{fallback} ({ws_label})]`") + else: + replace_list.append(f"`[@{fallback}]`") + + replace_iter = iter(replace_list) + return re.sub(r"<@\w+>", lambda _: next(replace_iter), msg_text) + + +def find_synced_channel_in_target(source_channel_id: str, target_workspace_id: int) -> str | None: + """If *source_channel_id* belongs to an active sync that *target_workspace_id* also has a channel in, return the local channel ID.""" + source_rows = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.channel_id == source_channel_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.status == "active", + ], + ) + if not source_rows: + return None + sync_id = source_rows[0].sync_id + target_rows = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync_id, + schemas.SyncChannel.workspace_id == target_workspace_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.status == "active", + ], + ) + if not target_rows: + return None + return target_rows[0].channel_id + + +_ARCHIVE_LINK_PATTERN = re.compile( + r"]+)>" +) + + +def _rewrite_slack_archive_links_to_native_channels(msg_text: str, target_workspace_id: int) -> str: + """Replace Slack archive mrkdwn links with native ``<#C_LOCAL>`` when that channel is synced to *target_workspace_id*.""" + if not msg_text or not target_workspace_id: + return msg_text + + def repl(m: re.Match) -> str: + ch_id = m.group(2) + local = find_synced_channel_in_target(ch_id, target_workspace_id) + if local: + return f"<#{local}>" + return m.group(0) + + return _ARCHIVE_LINK_PATTERN.sub(repl, msg_text) + + +def _get_workspace_domain(client: WebClient, team_id: str) -> str | None: + """Return the workspace subdomain (e.g. ``acme`` for ``acme.slack.com``) from ``team.info``, cached.""" + cache_key = f"ws_domain:{team_id}" + cached = _cache_get(cache_key) + if cached: + return cached + + try: + info = client.team_info() + domain = safe_get(info, "team", "domain") + if domain: + _cache_set(cache_key, domain, ttl=86400) + return domain + except Exception as exc: + _logger.debug("get_workspace_domain_failed", extra={"team_id": team_id, "error": str(exc)}) + return None + + +def resolve_channel_references( + msg_text: str, + source_client: WebClient | None, + source_workspace: "schemas.Workspace | None" = None, + target_workspace_id: int | None = None, +) -> str: + """Replace ``<#CHANNEL_ID>`` references with native local channels when synced, else archive URLs or fallbacks. + + When *target_workspace_id* is set, Slack archive links from federated senders may be rewritten to + ``<#C_LOCAL>`` if that source channel is synced to the target workspace. + """ + if not msg_text: + return msg_text + + if target_workspace_id: + msg_text = _rewrite_slack_archive_links_to_native_channels(msg_text, target_workspace_id) + + channel_pattern = re.compile(r"<#(C[A-Z0-9]+)(?:\|([^>]*))?>") + pair_tuples = channel_pattern.findall(msg_text) + if not pair_tuples: + return msg_text + + by_channel_id: dict[str, str | None] = {} + for cid, pipe in pair_tuples: + if cid not in by_channel_id: + by_channel_id[cid] = pipe.strip() if pipe and pipe.strip() else None + + team_id = getattr(source_workspace, "team_id", None) if source_workspace else None + ws_name = resolve_workspace_name(source_workspace) if source_workspace else None + + for ch_id, inline_label in by_channel_id.items(): + if target_workspace_id: + local_ch = find_synced_channel_in_target(ch_id, target_workspace_id) + if local_ch: + replacement = f"<#{local_ch}>" + msg_text = channel_pattern.sub( + lambda m, _cid=ch_id, _rep=replacement: _rep if m.group(1) == _cid else m.group(0), + msg_text, + ) + continue + + ch_name = ch_id + if source_client: + try: + info = source_client.conversations_info(channel=ch_id) + ch_name = safe_get(info, "channel", "name") or ch_id + except Exception as exc: + _logger.debug( + "resolve_channel_reference_failed", + extra={"channel_id": ch_id, "error": str(exc)}, + ) + if inline_label: + ch_name = inline_label + elif inline_label: + ch_name = inline_label + + if ch_name != ch_id: + label = f"#{ch_name} ({ws_name})" if ws_name else f"#{ch_name}" + domain = _get_workspace_domain(source_client, team_id) if source_client and team_id else None + if domain: + deep_link = f"https://{domain}.slack.com/archives/{ch_id}" + replacement = f"<{deep_link}|{label}>" + else: + replacement = f"`[{label}]`" + else: + replacement = f"#{ch_id}" + + msg_text = channel_pattern.sub( + lambda m, _cid=ch_id, _rep=replacement: _rep if m.group(1) == _cid else m.group(0), + msg_text, + ) + + return msg_text + + +def seed_user_mappings(source_workspace_id: int, target_workspace_id: int, group_id: int | None = None) -> int: + """Create stub UserMapping records for all active users in the source directory.""" + directory = DbManager.find_records( + schemas.UserDirectory, + [schemas.UserDirectory.workspace_id == source_workspace_id, schemas.UserDirectory.deleted_at.is_(None)], + ) + + existing = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.source_workspace_id == source_workspace_id, + schemas.UserMapping.target_workspace_id == target_workspace_id, + ], + ) + existing_by_uid = {m.source_user_id: m for m in existing} + + now = datetime.now(UTC) + created = 0 + for entry in directory: + current_name = entry.display_name or entry.real_name + if entry.slack_user_id in existing_by_uid: + mapping = existing_by_uid[entry.slack_user_id] + if mapping.source_display_name != current_name: + DbManager.update_records( + schemas.UserMapping, + [schemas.UserMapping.id == mapping.id], + {schemas.UserMapping.source_display_name: current_name}, + ) + continue + DbManager.create_record( + schemas.UserMapping( + source_workspace_id=source_workspace_id, + source_user_id=entry.slack_user_id, + target_workspace_id=target_workspace_id, + target_user_id=None, + match_method="none", + source_display_name=current_name, + matched_at=now, + group_id=group_id, + ) + ) + created += 1 + + return created + + +def run_auto_match_for_workspace(target_client: WebClient, target_workspace_id: int) -> tuple[int, int]: + """Re-run auto-matching for all unmatched mappings targeting a workspace.""" + unmatched = DbManager.find_records( + schemas.UserMapping, + [ + schemas.UserMapping.target_workspace_id == target_workspace_id, + schemas.UserMapping.match_method == "none", + ], + ) + + _refresh_user_directory(target_client, target_workspace_id) + + newly_matched = 0 + still_unmatched = 0 + + for mapping in unmatched: + source_workspace = get_workspace_by_id(mapping.source_workspace_id) + if not source_workspace: + still_unmatched += 1 + continue + + source_client = WebClient(token=decrypt_bot_token(source_workspace.bot_token)) + source_profile = _get_source_profile_full(source_client, mapping.source_user_id) + if not source_profile: + still_unmatched += 1 + continue + + target_uid, method = _find_user_match( + mapping.source_user_id, source_profile, target_client, target_workspace_id + ) + + if target_uid: + display = source_profile.get("display_name") or source_profile.get("real_name") or mapping.source_user_id + DbManager.update_records( + schemas.UserMapping, + [schemas.UserMapping.id == mapping.id], + { + schemas.UserMapping.target_user_id: target_uid, + schemas.UserMapping.match_method: method, + schemas.UserMapping.source_display_name: display, + schemas.UserMapping.matched_at: datetime.now(UTC), + }, + ) + newly_matched += 1 + else: + still_unmatched += 1 + + return newly_matched, still_unmatched diff --git a/syncbot/helpers/workspace.py b/syncbot/helpers/workspace.py new file mode 100644 index 0000000..f64a234 --- /dev/null +++ b/syncbot/helpers/workspace.py @@ -0,0 +1,405 @@ +"""Workspace record management and name resolution.""" + +import logging + +from slack_sdk import WebClient + +from db import DbManager, schemas +from helpers._cache import _cache_get, _cache_set +from helpers.core import safe_get +from helpers.encryption import decrypt_bot_token, encrypt_bot_token + +_logger = logging.getLogger(__name__) + + +def get_sync_list(team_id: str, channel_id: str) -> list[tuple[schemas.SyncChannel, schemas.Workspace]]: + """Return every (SyncChannel, Workspace) pair that shares a sync with *channel_id*.""" + cache_key = f"sync_list:{channel_id}" + cached = _cache_get(cache_key) + if cached is not None: + return cached + + sync_channel_record = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.channel_id == channel_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.status == "active", + ], + ) + if sync_channel_record: + sync_channels = DbManager.find_join_records2( + left_cls=schemas.SyncChannel, + right_cls=schemas.Workspace, + filters=[ + schemas.SyncChannel.sync_id == sync_channel_record[0].sync_id, + schemas.SyncChannel.deleted_at.is_(None), + schemas.SyncChannel.status == "active", + ], + ) + else: + sync_channels = [] + + # One logical target per (workspace, Slack channel): duplicate SyncChannel rows + # (e.g. double-submit on join/subscribe) would otherwise post the same message N times. + seen: set[tuple[int, str]] = set() + deduped: list[tuple[schemas.SyncChannel, schemas.Workspace]] = [] + for sc, ws in sync_channels: + key = (ws.id, sc.channel_id) + if key not in seen: + seen.add(key) + deduped.append((sc, ws)) + sync_channels = deduped + + _cache_set(cache_key, sync_channels) + return sync_channels + + +def get_federated_workspace(group_id: int, workspace_id: int) -> schemas.FederatedWorkspace | None: + """Return the federated workspace for a group membership, if one exists.""" + members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.workspace_id == workspace_id, + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + if not members or not members[0].federated_workspace_id: + return None + + fed_ws = DbManager.get_record(schemas.FederatedWorkspace, id=members[0].federated_workspace_id) + if not fed_ws or fed_ws.status != "active": + return None + + return fed_ws + + +def get_federated_workspace_for_sync(sync_id: int) -> schemas.FederatedWorkspace | None: + """Return the federated workspace for a sync, checking group membership.""" + sync = DbManager.get_record(schemas.Sync, id=sync_id) + if not sync or not sync.group_id: + return None + + fed_members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == sync.group_id, + schemas.WorkspaceGroupMember.federated_workspace_id.isnot(None), + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.status == "active", + ], + ) + if not fed_members: + return None + + fed_ws = DbManager.get_record(schemas.FederatedWorkspace, id=fed_members[0].federated_workspace_id) + if not fed_ws or fed_ws.status != "active": + return None + + return fed_ws + + +def get_workspace_record(team_id: str, body: dict, context: dict, client: WebClient) -> schemas.Workspace: + """Fetch or create the Workspace record for a Slack workspace.""" + workspace_record: schemas.Workspace = DbManager.get_record(schemas.Workspace, id=team_id) + team_domain = safe_get(body, "team", "domain") + + if not workspace_record: + try: + team_info = client.team_info() + ws_name = team_info["team"]["name"] + except Exception as exc: + _logger.debug(f"get_workspace: team_info failed, falling back to domain: {exc}") + ws_name = team_domain + workspace_record: schemas.Workspace = DbManager.create_record( + schemas.Workspace( + team_id=team_id, + workspace_name=ws_name, + bot_token=encrypt_bot_token(context["bot_token"]), + ) + ) + elif workspace_record.deleted_at is not None: + workspace_record = _restore_workspace(workspace_record, context, client) + else: + _maybe_refresh_bot_token(workspace_record, context) + _maybe_refresh_workspace_name(workspace_record, client) + + return workspace_record + + +def _maybe_refresh_bot_token(workspace_record: schemas.Workspace, context: dict) -> None: + """Update the stored bot token if the OAuth flow provided a newer one.""" + new_token = safe_get(context, "bot_token") + if not new_token: + return + + encrypted_new = encrypt_bot_token(new_token) + if encrypted_new != workspace_record.bot_token: + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == workspace_record.id], + {schemas.Workspace.bot_token: encrypted_new}, + ) + workspace_record.bot_token = encrypted_new + _logger.info( + "bot_token_refreshed", + extra={"workspace_id": workspace_record.id, "team_id": workspace_record.team_id}, + ) + + +def _maybe_refresh_workspace_name(workspace_record: schemas.Workspace, client: WebClient) -> None: + """Refresh the stored workspace name from the Slack API (at most once per day).""" + cache_key = f"ws_name_refresh:{workspace_record.id}" + if _cache_get(cache_key): + return + + _cache_set(cache_key, True, ttl=86400) + + try: + team_info = client.team_info() + current_name = team_info["team"]["name"] + except Exception as exc: + _logger.debug(f"_maybe_refresh_workspace_name: team_info call failed: {exc}") + return + + if current_name and current_name != workspace_record.workspace_name: + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == workspace_record.id], + {schemas.Workspace.workspace_name: current_name}, + ) + workspace_record.workspace_name = current_name + _logger.info( + "workspace_name_refreshed", + extra={"workspace_id": workspace_record.id, "new_name": current_name}, + ) + + +def _restore_workspace( + workspace_record: schemas.Workspace, + context: dict, + client: WebClient, +) -> schemas.Workspace: + """Restore a soft-deleted workspace and notify group members.""" + from helpers.notifications import notify_admins_dm, notify_synced_channels + + ws_name = resolve_workspace_name(workspace_record) + + new_token = safe_get(context, "bot_token") + update_fields = {schemas.Workspace.deleted_at: None} + if new_token: + update_fields[schemas.Workspace.bot_token] = encrypt_bot_token(new_token) + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == workspace_record.id], + update_fields, + ) + + workspace_record = DbManager.get_record(schemas.Workspace, id=workspace_record.team_id) + + soft_deleted_memberships = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.workspace_id == workspace_record.id, + schemas.WorkspaceGroupMember.deleted_at.isnot(None), + schemas.WorkspaceGroupMember.status == "active", + ], + ) + + restored_group_ids: set[int] = set() + for membership in soft_deleted_memberships: + group = DbManager.get_record(schemas.WorkspaceGroup, id=membership.group_id) + if not group or group.status != "active": + continue + + DbManager.update_records( + schemas.WorkspaceGroupMember, + [schemas.WorkspaceGroupMember.id == membership.id], + {schemas.WorkspaceGroupMember.deleted_at: None}, + ) + restored_group_ids.add(membership.group_id) + + if restored_group_ids: + my_soft_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.workspace_id == workspace_record.id, + schemas.SyncChannel.deleted_at.isnot(None), + ], + ) + for sync_channel in my_soft_channels: + sync = DbManager.get_record(schemas.Sync, id=sync_channel.sync_id) + if sync and sync.group_id in restored_group_ids: + DbManager.update_records( + schemas.SyncChannel, + [schemas.SyncChannel.id == sync_channel.id], + {schemas.SyncChannel.deleted_at: None, schemas.SyncChannel.status: "active"}, + ) + + notified_ws: set[int] = set() + for group_id in restored_group_ids: + members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + schemas.WorkspaceGroupMember.workspace_id != workspace_record.id, + ], + ) + for m in members: + if not m.workspace_id or m.workspace_id in notified_ws: + continue + member_ws = get_workspace_by_id(m.workspace_id) + if not member_ws or not member_ws.bot_token or member_ws.deleted_at is not None: + continue + notified_ws.add(m.workspace_id) + try: + member_client = WebClient(token=decrypt_bot_token(member_ws.bot_token)) + notify_admins_dm( + member_client, + f":arrow_forward: *{ws_name}* has been restored. Group syncing will resume.", + ) + + syncs_in_group = DbManager.find_records( + schemas.Sync, [schemas.Sync.group_id == group_id], + ) + other_channel_ids = [] + for sync in syncs_in_group: + other_sync_channels = DbManager.find_records( + schemas.SyncChannel, + [ + schemas.SyncChannel.sync_id == sync.id, + schemas.SyncChannel.workspace_id == m.workspace_id, + schemas.SyncChannel.deleted_at.is_(None), + ], + ) + for sync_channel in other_sync_channels: + other_channel_ids.append(sync_channel.channel_id) + if other_channel_ids: + notify_synced_channels( + member_client, + other_channel_ids, + f":arrow_forward: Syncing with *{ws_name}* has been resumed.", + ) + except Exception as e: + _logger.warning(f"_restore_workspace: failed to notify member {m.workspace_id}: {e}") + + _logger.info( + "workspace_restored", + extra={ + "workspace_id": workspace_record.id, + "groups_restored": len(restored_group_ids), + }, + ) + + return workspace_record + + +def get_workspace_by_id(workspace_id: int, context: dict | None = None) -> schemas.Workspace | None: + """Look up a workspace by its integer primary-key ``id`` column. + + If *context* is provided, uses request-scoped cache to avoid repeated DB + lookups for the same workspace_id within one request. + """ + if context is not None: + cache = context.setdefault("_workspace_by_id", {}) + if workspace_id in cache: + return cache[workspace_id] + rows = DbManager.find_records(schemas.Workspace, [schemas.Workspace.id == workspace_id]) + result = rows[0] if rows else None + if context is not None: + context.setdefault("_workspace_by_id", {})[workspace_id] = result + return result + + +def get_groups_for_workspace(workspace_id: int) -> list[schemas.WorkspaceGroup]: + """Return all active groups the workspace belongs to.""" + members = DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.workspace_id == workspace_id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + groups: list[schemas.WorkspaceGroup] = [] + for m in members: + g = DbManager.get_record(schemas.WorkspaceGroup, id=m.group_id) + if g and g.status == "active": + groups.append(g) + return groups + + +def get_group_members(group_id: int) -> list[schemas.WorkspaceGroupMember]: + """Return all active members of a group.""" + return DbManager.find_records( + schemas.WorkspaceGroupMember, + [ + schemas.WorkspaceGroupMember.group_id == group_id, + schemas.WorkspaceGroupMember.status == "active", + schemas.WorkspaceGroupMember.deleted_at.is_(None), + ], + ) + + +def resolve_workspace_name(workspace: schemas.Workspace) -> str: + """Return a human-readable name for a workspace.""" + if workspace.workspace_name: + return workspace.workspace_name + + if workspace.bot_token: + try: + ws_client = WebClient(token=decrypt_bot_token(workspace.bot_token)) + team_info = ws_client.team_info() + name = safe_get(team_info, "team", "name") + if name: + DbManager.update_records( + schemas.Workspace, + [schemas.Workspace.id == workspace.id], + {schemas.Workspace.workspace_name: name}, + ) + workspace.workspace_name = name + return name + except Exception as exc: + # Name lookup is best-effort; falling back to team_id keeps UI usable + # even when Slack API calls fail intermittently. + _logger.debug( + "resolve_workspace_name_failed", + extra={"workspace_id": workspace.id, "team_id": workspace.team_id, "error": str(exc)}, + ) + + return workspace.team_id or f"Workspace {workspace.id}" + + +def resolve_channel_name(channel_id: str, workspace=None) -> str: + """Resolve a channel ID to a human-readable name.""" + if not channel_id: + return channel_id + + cache_key = f"chan_name:{channel_id}" + cached = _cache_get(cache_key) + if cached: + return cached + + ch_name = channel_id + ws_name = None + + if workspace and hasattr(workspace, "bot_token") and workspace.bot_token: + ws_name = getattr(workspace, "workspace_name", None) + try: + ws_client = WebClient(token=decrypt_bot_token(workspace.bot_token)) + info = ws_client.conversations_info(channel=channel_id) + ch_name = safe_get(info, "channel", "name") or channel_id + except Exception as exc: + _logger.debug(f"resolve_channel_name: conversations_info failed for {channel_id}: {exc}") + + if ws_name: + result = f"#{ch_name} ({ws_name})" + else: + result = f"#{ch_name}" + + if ch_name != channel_id: + _cache_set(cache_key, result, ttl=3600) + return result diff --git a/syncbot/logger.py b/syncbot/logger.py new file mode 100644 index 0000000..7cd116e --- /dev/null +++ b/syncbot/logger.py @@ -0,0 +1,261 @@ +"""Structured logging and observability helpers. + +Provides: + +* **Structured JSON formatter** — Every log entry is emitted as a single + JSON object with consistent fields (``timestamp``, ``level``, + ``correlation_id``, ``module``, ``message``). This makes CloudWatch + Logs Insights queries fast and reliable. +* **Correlation IDs** — A unique ``correlation_id`` is generated at the + start of each incoming Slack request and automatically included in + every log line emitted during that request. +* **Metrics helpers** — Lightweight functions that emit metric events as + structured log entries. CloudWatch Logs Insights or a metric filter + can aggregate these into numeric dashboards. + +Usage:: + + from logger import configure_logging, set_correlation_id, emit_metric + + configure_logging() # call once at module level + set_correlation_id() # call at the start of each request + emit_metric("messages_synced", 3, sync_id="abc") +""" + +import json +import logging +import time as _time +import uuid +from datetime import UTC +from typing import Any + +# --------------------------------------------------------------------------- +# Correlation-ID storage (thread-local not needed — Lambda is single-thread) +# --------------------------------------------------------------------------- + +_correlation_id: str | None = None +_request_start: float | None = None + + +def set_correlation_id(value: str | None = None) -> str: + """Set and return a correlation ID for the current request. + + If *value* is ``None`` a new UUID-4 is generated. Also resets the + internal request-start timer used by :func:`get_request_duration_ms`. + """ + global _correlation_id, _request_start + _correlation_id = value or uuid.uuid4().hex[:12] + _request_start = _time.monotonic() + return _correlation_id + + +def get_correlation_id() -> str: + """Return the current correlation ID, or ``"none"`` if unset.""" + return _correlation_id or "none" + + +def get_request_duration_ms() -> float: + """Milliseconds elapsed since :func:`set_correlation_id` was called.""" + if _request_start is None: + return 0.0 + return (_time.monotonic() - _request_start) * 1000 + + +# --------------------------------------------------------------------------- +# Structured JSON formatter +# --------------------------------------------------------------------------- + + +class StructuredFormatter(logging.Formatter): + """Emit each log record as a single-line JSON object. + + Fields included in every entry: + + * ``timestamp`` — ISO-8601 UTC + * ``level`` — e.g. INFO, WARNING, ERROR + * ``correlation_id`` — request-scoped ID set by :func:`set_correlation_id` + * ``module`` — Python module that emitted the log + * ``function`` — function name + * ``message`` — the formatted log message + + Extra keys passed via ``logging.info("msg", extra={...})`` are merged + into the top-level JSON object. + """ + + # Keys that belong to the stdlib LogRecord and should not be forwarded. + _RESERVED = frozenset(logging.LogRecord("", 0, "", 0, "", (), None).__dict__.keys()) + + def format(self, record: logging.LogRecord) -> str: + entry: dict[str, Any] = { + "timestamp": self.formatTime(record, datefmt="%Y-%m-%dT%H:%M:%S.%fZ"), + "level": record.levelname, + "correlation_id": get_correlation_id(), + "module": record.module, + "function": record.funcName, + "message": record.getMessage(), + } + + if record.exc_info and record.exc_info[1]: + entry["exception"] = self.formatException(record.exc_info) + + # Merge any extra fields the caller passed. + for key, val in record.__dict__.items(): + if key not in self._RESERVED and key not in entry: + entry[key] = val + + return json.dumps(entry, default=str) + + def formatTime(self, record, datefmt=None): # noqa: N802 — override + from datetime import datetime + + dt = datetime.fromtimestamp(record.created, tz=UTC) + if datefmt: + return dt.strftime(datefmt) + return dt.isoformat() + + +class DevFormatter(logging.Formatter): + """Human-readable colorized formatter for local development. + + Outputs logs like:: + + 17:14:05 INFO [app.main_response] (9dab20ac) request_received + request_type=event_callback request_id=app_home_opened + + 17:14:06 ERROR [listener_error_handler.handle] (9dab20ac) Something broke + Traceback (most recent call last): + ... + """ + + _RESERVED = frozenset(logging.LogRecord("", 0, "", 0, "", (), None).__dict__.keys()) + + _COLORS = { + "DEBUG": "\033[90m", # grey + "INFO": "\033[32m", # green + "WARNING": "\033[33m", # yellow + "ERROR": "\033[31m", # red + "CRITICAL": "\033[1;31m", # bold red + } + _RESET = "\033[0m" + _DIM = "\033[90m" + + def format(self, record: logging.LogRecord) -> str: + from datetime import datetime + + dt = datetime.fromtimestamp(record.created, tz=UTC) + time_str = dt.strftime("%H:%M:%S") + + color = self._COLORS.get(record.levelname, "") + level = f"{color}{record.levelname:<5}{self._RESET}" + + corr = get_correlation_id() + corr_str = f" {self._DIM}({corr}){self._RESET}" if corr != "none" else "" + + location = f"{record.module}.{record.funcName}" + msg = record.getMessage() + + line = f"{self._DIM}{time_str}{self._RESET} {level} [{location}]{corr_str} {msg}" + + extras = {} + for key, val in record.__dict__.items(): + if key not in self._RESERVED and key not in ("message", "correlation_id"): + extras[key] = val + + if extras: + pairs = " ".join(f"{k}={v}" for k, v in extras.items()) + line += f"\n{' ' * 15}{self._DIM}{pairs}{self._RESET}" + + if record.exc_info and record.exc_info[1]: + exc_text = self.formatException(record.exc_info) + indented = "\n".join(f"{' ' * 15}{line_}" for line_ in exc_text.splitlines()) + line += f"\n{indented}" + + return line + + +# --------------------------------------------------------------------------- +# One-time logging configuration +# --------------------------------------------------------------------------- + +_configured = False + + +def configure_logging(level: int = logging.INFO) -> None: + """Replace the root logger's handlers with a single structured-JSON handler. + + The effective level is determined by the ``LOG_LEVEL`` environment variable + (e.g. ``DEBUG``, ``INFO``, ``WARNING``, ``ERROR``, ``CRITICAL``). If the + variable is unset or invalid the *level* parameter is used as a fallback. + + Uses :class:`DevFormatter` (human-readable, colorized) when + ``LOCAL_DEVELOPMENT`` is enabled, otherwise :class:`StructuredFormatter` + (single-line JSON for CloudWatch). + + Safe to call multiple times — subsequent calls are no-ops. + """ + import os + + global _configured + if _configured: + return + _configured = True + + env_level = os.environ.get("LOG_LEVEL", "").strip().upper() + effective_level = getattr(logging, env_level, None) if env_level else None + if not isinstance(effective_level, int): + effective_level = level + + root = logging.getLogger() + root.setLevel(effective_level) + + # Remove any existing handlers (e.g. Slack Bolt's defaults). + for h in list(root.handlers): + root.removeHandler(h) + + local_dev = os.environ.get("LOCAL_DEVELOPMENT", "false").lower() == "true" + + handler = logging.StreamHandler() + handler.setFormatter(DevFormatter() if local_dev else StructuredFormatter()) + root.addHandler(handler) + + +# --------------------------------------------------------------------------- +# Metric-event helper +# --------------------------------------------------------------------------- + +_metrics_logger = logging.getLogger("syncbot.metrics") + + +def emit_metric( + metric_name: str, + value: float = 1, + unit: str = "Count", + **dimensions: Any, +) -> None: + """Emit a metric as a structured log entry. + + CloudWatch Logs Insights can aggregate these with queries like:: + + filter metric_name = "messages_synced" + | stats sum(metric_value) as total by bin(5m) + + Parameters + ---------- + metric_name: + Short snake_case identifier, e.g. ``messages_synced``. + value: + Numeric value (default ``1`` for counter-style metrics). + unit: + CloudWatch-compatible unit string (``Count``, ``Milliseconds``, …). + **dimensions: + Arbitrary key/value pairs attached to the metric event. + """ + _metrics_logger.info( + metric_name, + extra={ + "metric_name": metric_name, + "metric_value": value, + "metric_unit": unit, + **dimensions, + }, + ) diff --git a/syncbot/requirements.txt b/syncbot/requirements.txt index d794c55..8d1ceb2 100644 --- a/syncbot/requirements.txt +++ b/syncbot/requirements.txt @@ -1,15 +1,19 @@ -certifi==2023.7.22 ; python_version >= "3.11" and python_version < "4.0" -cffi==1.16.0 ; python_version >= "3.11" and python_version < "4.0" -charset-normalizer==3.3.0 ; python_version >= "3.11" and python_version < "4.0" -cryptography==41.0.4 ; python_version >= "3.11" and python_version < "4.0" -greenlet==3.0.0 ; python_version >= "3.11" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") and python_version < "4.0" -idna==3.4 ; python_version >= "3.11" and python_version < "4.0" -pillow-heif==0.16.0 ; python_version >= "3.11" and python_version < "4.0" -pillow==10.3.0 ; python_version >= "3.11" and python_version < "4.0" -pycparser==2.21 ; python_version >= "3.11" and python_version < "4.0" -pymysql==1.1.0 ; python_version >= "3.11" and python_version < "4.0" -requests==2.31.0 ; python_version >= "3.11" and python_version < "4.0" -slack-bolt==1.18.0 ; python_version >= "3.11" and python_version < "4.0" -slack-sdk==3.23.0 ; python_version >= "3.11" and python_version < "4.0" -sqlalchemy==1.4.49 ; python_version >= "3.11" and python_version < "4.0" -urllib3==1.26.17 ; python_version >= "3.11" and python_version < "4.0" +alembic==1.18.4 ; python_version >= "3.12" and python_version < "4.0" +certifi==2026.2.25 ; python_version >= "3.12" and python_version < "4.0" +cffi==2.0.0 ; python_version >= "3.12" and python_version < "4.0" and platform_python_implementation != "PyPy" +charset-normalizer==3.4.6 ; python_version >= "3.12" and python_version < "4.0" +cryptography==46.0.6 ; python_version >= "3.12" and python_version < "4.0" +greenlet==3.3.2 ; python_version >= "3.12" and python_version < "4.0" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") +idna==3.11 ; python_version >= "3.12" and python_version < "4.0" +mako==1.3.10 ; python_version >= "3.12" and python_version < "4.0" +markupsafe==3.0.3 ; python_version >= "3.12" and python_version < "4.0" +psycopg2-binary==2.9.11 ; python_version >= "3.12" and python_version < "4.0" +pycparser==3.0 ; python_version >= "3.12" and python_version < "4.0" and platform_python_implementation != "PyPy" and implementation_name != "PyPy" +pymysql==1.1.2 ; python_version >= "3.12" and python_version < "4.0" +python-dotenv==1.2.2 ; python_version >= "3.12" and python_version < "4.0" +requests==2.33.0 ; python_version >= "3.12" and python_version < "4.0" +slack-bolt==1.27.0 ; python_version >= "3.12" and python_version < "4.0" +slack-sdk==3.41.0 ; python_version >= "3.12" and python_version < "4.0" +sqlalchemy==2.0.48 ; python_version >= "3.12" and python_version < "4.0" +typing-extensions==4.15.0 ; python_version >= "3.12" and python_version < "4.0" +urllib3==2.6.3 ; python_version >= "3.12" and python_version < "4.0" diff --git a/syncbot/routing.py b/syncbot/routing.py new file mode 100644 index 0000000..de4f5db --- /dev/null +++ b/syncbot/routing.py @@ -0,0 +1,97 @@ +"""Request routing tables. + +Maps incoming Slack request types to handler functions. The +:data:`MAIN_MAPPER` is a two-level dict keyed first by request category +(``block_actions``, ``event_callback``, ``view_submission``) and then by +the specific identifier (action ID, event type, or callback ID). + +:func:`~app.main_response` uses these tables to dispatch every request. +:data:`VIEW_ACK_MAPPER` lists view submission callback IDs handled by the fast ack +path in :mod:`app` (``view_ack``) before lazy work runs in :func:`~app.main_response`. +""" + +import builders +import handlers +from slack import actions + +ACTION_MAPPER = { + actions.CONFIG_JOIN_EXISTING_SYNC: builders.build_join_sync_form, + actions.CONFIG_CREATE_NEW_SYNC: builders.build_new_sync_form, + actions.CONFIG_REMOVE_SYNC: handlers.handle_remove_sync, + actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT: handlers.check_join_sync_channel, + actions.CONFIG_MANAGE_USER_MATCHING: builders.build_user_matching_entry, + actions.CONFIG_USER_MAPPING_BACK: handlers.handle_user_mapping_back, + actions.CONFIG_USER_MAPPING_EDIT: builders.build_user_mapping_edit_modal, + actions.CONFIG_USER_MAPPING_REFRESH: handlers.handle_user_mapping_refresh, + actions.CONFIG_CREATE_GROUP: handlers.handle_create_group, + actions.CONFIG_JOIN_GROUP: handlers.handle_join_group, + actions.CONFIG_INVITE_WORKSPACE: handlers.handle_invite_workspace, + actions.CONFIG_LEAVE_GROUP: handlers.handle_leave_group, + actions.CONFIG_ACCEPT_GROUP_REQUEST: handlers.handle_accept_group_invite, + actions.CONFIG_DECLINE_GROUP_REQUEST: handlers.handle_decline_group_invite, + actions.CONFIG_CANCEL_GROUP_REQUEST: handlers.handle_decline_group_invite, + actions.CONFIG_PUBLISH_CHANNEL: handlers.handle_publish_channel, + actions.CONFIG_UNPUBLISH_CHANNEL: handlers.handle_unpublish_channel, + actions.CONFIG_PAUSE_SYNC: handlers.handle_pause_sync, + actions.CONFIG_RESUME_SYNC: handlers.handle_resume_sync, + actions.CONFIG_STOP_SYNC: handlers.handle_stop_sync, + actions.CONFIG_SUBSCRIBE_CHANNEL: handlers.handle_subscribe_channel, + actions.CONFIG_REFRESH_HOME: handlers.handle_refresh_home, + actions.CONFIG_BACKUP_RESTORE: handlers.handle_backup_restore, + actions.CONFIG_BACKUP_DOWNLOAD: handlers.handle_backup_download, + actions.CONFIG_BACKUP_RESTORE_PROCEED: handlers.handle_backup_restore_proceed, + actions.CONFIG_DATA_MIGRATION: handlers.handle_data_migration, + actions.CONFIG_DATA_MIGRATION_EXPORT: handlers.handle_data_migration_export, + actions.CONFIG_DATA_MIGRATION_PROCEED: handlers.handle_data_migration_proceed, + actions.CONFIG_DB_RESET: handlers.handle_db_reset, + actions.CONFIG_DB_RESET_PROCEED: handlers.handle_db_reset_proceed, + actions.CONFIG_GENERATE_FEDERATION_CODE: handlers.handle_generate_federation_code, + actions.CONFIG_ENTER_FEDERATION_CODE: handlers.handle_enter_federation_code, + actions.CONFIG_REMOVE_FEDERATION_CONNECTION: handlers.handle_remove_federation_connection, +} +"""Block-action ``action_id`` -> handler.""" + +EVENT_MAPPER = { + "app_home_opened": handlers.handle_app_home_opened, + "member_joined_channel": handlers.handle_member_joined_channel, + "message": handlers.respond_to_message_event, + "reaction_added": handlers._handle_reaction, + "reaction_removed": handlers._handle_reaction, + "team_join": handlers.handle_team_join, + "tokens_revoked": handlers.handle_tokens_revoked, + "user_profile_changed": handlers.handle_user_profile_changed, +} +"""Event ``type`` -> handler.""" + +VIEW_MAPPER = { + actions.CONFIG_JOIN_SYNC_SUBMIT: handlers.handle_join_sync_submission, + actions.CONFIG_NEW_SYNC_SUBMIT: handlers.handle_new_sync_submission, + actions.CONFIG_USER_MAPPING_EDIT_SUBMIT: handlers.handle_user_mapping_edit_submit, + actions.CONFIG_CREATE_GROUP_SUBMIT: handlers.handle_create_group_submit, + actions.CONFIG_JOIN_GROUP_SUBMIT: handlers.handle_join_group_submit, + actions.CONFIG_INVITE_WORKSPACE_SUBMIT: handlers.handle_invite_workspace_submit, + actions.CONFIG_LEAVE_GROUP_CONFIRM: handlers.handle_leave_group_confirm, + actions.CONFIG_PUBLISH_CHANNEL_SUBMIT: handlers.handle_publish_channel_submit_work, + actions.CONFIG_SUBSCRIBE_CHANNEL_SUBMIT: handlers.handle_subscribe_channel_submit, + actions.CONFIG_STOP_SYNC_CONFIRM: handlers.handle_stop_sync_confirm, + actions.CONFIG_FEDERATION_CODE_SUBMIT: handlers.handle_federation_code_submit, + actions.CONFIG_FEDERATION_LABEL_SUBMIT: handlers.handle_federation_label_submit, + actions.CONFIG_BACKUP_RESTORE_SUBMIT: handlers.handle_backup_restore_submit_work, + actions.CONFIG_DATA_MIGRATION_SUBMIT: handlers.handle_data_migration_submit_work, +} +"""View submission ``callback_id`` -> lazy work handler (after HTTP ack).""" + +VIEW_ACK_MAPPER = { + actions.CONFIG_PUBLISH_MODE_SUBMIT: handlers.handle_publish_mode_submit_ack, + actions.CONFIG_PUBLISH_CHANNEL_SUBMIT: handlers.handle_publish_channel_submit_ack, + actions.CONFIG_BACKUP_RESTORE_SUBMIT: handlers.handle_backup_restore_submit_ack, + actions.CONFIG_DATA_MIGRATION_SUBMIT: handlers.handle_data_migration_submit_ack, +} +"""Deferred-ack view submissions: fast ack handler (``dict`` or ``None`` for Slack ``ack()``).""" + +MAIN_MAPPER = { + "block_actions": ACTION_MAPPER, + "event_callback": EVENT_MAPPER, + "view_submission": VIEW_MAPPER, +} +"""Top-level dispatcher: request category -> sub-mapper.""" diff --git a/syncbot/utils/__init__.py b/syncbot/slack/__init__.py similarity index 100% rename from syncbot/utils/__init__.py rename to syncbot/slack/__init__.py diff --git a/syncbot/slack/actions.py b/syncbot/slack/actions.py new file mode 100644 index 0000000..308ce0e --- /dev/null +++ b/syncbot/slack/actions.py @@ -0,0 +1,225 @@ +"""Slack Block Kit action ID constants. + +These string constants are used as ``action_id`` / ``callback_id`` values +throughout the UI forms and handler routing tables. Keeping them in one +place avoids typos and makes refactoring easier. +""" + +CONFIG_JOIN_EXISTING_SYNC = "join_existing_sync" +"""Action: user clicked "Join existing Sync" button.""" + +CONFIG_CREATE_NEW_SYNC = "create_new_sync" +"""Action: user clicked "Create new Sync" button.""" + +CONFIG_REMOVE_SYNC = "remove_sync" +"""Action: user clicked "DeSync" button (prefix-matched).""" + +CONFIG_NEW_SYNC_CHANNEL_SELECT = "config_new_sync_channel_select" +"""Input: channel picker in the new-sync form.""" + +CONFIG_NEW_SYNC_SUBMIT = "config_new_sync_submit" +"""Callback: new-sync modal submitted.""" + +CONFIG_JOIN_SYNC_SELECT = "config_join_sync_select" +"""Input: sync selector in the join-sync form.""" + +CONFIG_JOIN_SYNC_CHANNEL_SELECT = "config_join_sync_channel_select" +"""Input: channel selector in the join-sync form (dispatches an action on change).""" + +CONFIG_JOIN_SYNC_SUBMIT = "config_join_sync_submit" +"""Callback: join-sync modal submitted.""" + +# --------------------------------------------------------------------------- +# User Matching actions +# --------------------------------------------------------------------------- + +CONFIG_MANAGE_USER_MATCHING = "manage_user_matching" +"""Action: user clicked "User Mapping" button on the Home tab.""" + +CONFIG_USER_MAPPING_BACK = "user_mapping_back" +"""Action: user clicked "Back" on the user mapping screen to return to main Home tab.""" + +CONFIG_USER_MAPPING_EDIT = "user_mapping_edit" +"""Action: user clicked "Edit" on a user row in the mapping screen (prefix-matched with mapping ID).""" + +CONFIG_USER_MAPPING_EDIT_SUBMIT = "user_mapping_edit_submit" +"""Callback: per-user edit mapping modal submitted.""" + +CONFIG_USER_MAPPING_EDIT_SELECT = "user_mapping_edit_select" +"""Input: user picker dropdown in the edit mapping modal.""" + +CONFIG_USER_MAPPING_REFRESH = "user_mapping_refresh" +"""Action: user clicked "Refresh" on the user mapping screen.""" + +# --------------------------------------------------------------------------- +# Workspace Group actions +# --------------------------------------------------------------------------- + +CONFIG_CREATE_GROUP = "create_group" +"""Action: user clicked "Create Group" on the Home tab.""" + +CONFIG_CREATE_GROUP_SUBMIT = "create_group_submit" +"""Callback: create-group modal submitted.""" + +CONFIG_CREATE_GROUP_NAME = "create_group_name" +"""Input: text field for the group name.""" + +CONFIG_JOIN_GROUP = "join_group" +"""Action: user clicked "Join Group" on the Home tab.""" + +CONFIG_JOIN_GROUP_SUBMIT = "join_group_submit" +"""Callback: join-group modal submitted.""" + +CONFIG_JOIN_GROUP_CODE = "join_group_code" +"""Input: text field for the group invite code.""" + +CONFIG_LEAVE_GROUP = "leave_group" +"""Action: user clicked "Leave Group" (prefix-matched with group_id).""" + +CONFIG_LEAVE_GROUP_CONFIRM = "leave_group_confirm" +"""Callback: leave-group confirmation modal submitted.""" + +CONFIG_ACCEPT_GROUP_REQUEST = "accept_group_request" +"""Action: user clicked "Accept" on an incoming group join request (prefix-matched with member_id).""" + +CONFIG_CANCEL_GROUP_REQUEST = "cancel_group_request" +"""Action: user clicked "Cancel Request" on an outgoing group join request (prefix-matched with member_id).""" + +CONFIG_INVITE_WORKSPACE = "invite_workspace" +"""Action: user clicked "Invite Workspace" button on a group (value carries group_id).""" + +CONFIG_INVITE_WORKSPACE_SUBMIT = "invite_workspace_submit" +"""Callback: invite-workspace modal submitted (sends DM invite to selected workspace).""" + +CONFIG_INVITE_WORKSPACE_SELECT = "invite_workspace_select" +"""Input: workspace picker dropdown in the invite workspace modal.""" + +CONFIG_DECLINE_GROUP_REQUEST = "decline_group_request" +"""Action: user clicked "Decline" on an incoming group invite DM (prefix-matched with member_id).""" + +# --------------------------------------------------------------------------- +# Channel Sync actions +# --------------------------------------------------------------------------- + +CONFIG_PUBLISH_CHANNEL = "publish_channel" +"""Action: user clicked "Sync Channel" button (value carries group_id).""" + +CONFIG_PUBLISH_CHANNEL_SELECT = "publish_channel_select" +"""Input: channel picker in the publish channel modal.""" + +CONFIG_PUBLISH_CHANNEL_SUBMIT = "publish_channel_submit" +"""Callback: publish channel modal submitted.""" + +CONFIG_PUBLISH_MODE_SUBMIT = "publish_mode_submit" +"""Callback: step 1 of publish channel (sync mode selection) submitted.""" + +CONFIG_PUBLISH_SYNC_MODE = "publish_sync_mode" +"""Input: radio buttons for direct vs group-wide sync mode.""" + +CONFIG_PUBLISH_DIRECT_TARGET = "publish_direct_target" +"""Input: workspace picker for direct (1-to-1) sync target.""" + +CONFIG_UNPUBLISH_CHANNEL = "unpublish_channel" +"""Action: user clicked "Unpublish" on a published channel (prefix-matched with sync_channel_id).""" + +CONFIG_PAUSE_SYNC = "pause_sync" +"""Action: user clicked "Pause Syncing" on an active channel sync (prefix-matched with sync_id).""" + +CONFIG_RESUME_SYNC = "resume_sync" +"""Action: user clicked "Resume Syncing" on a paused channel sync (prefix-matched with sync_id).""" + +CONFIG_STOP_SYNC = "stop_sync" +"""Action: user clicked "Stop Syncing" on a channel sync (prefix-matched with sync_id).""" + +CONFIG_STOP_SYNC_CONFIRM = "stop_sync_confirm" +"""View submission: user confirmed stopping a channel sync.""" + +CONFIG_SUBSCRIBE_CHANNEL = "subscribe_channel" +"""Action: user clicked "Start Syncing" on an available channel (prefix-matched with sync_id).""" + +CONFIG_SUBSCRIBE_CHANNEL_SELECT = "subscribe_channel_select" +"""Input: channel picker in the subscribe channel modal.""" + +CONFIG_SUBSCRIBE_CHANNEL_SUBMIT = "subscribe_channel_submit" +"""Callback: subscribe channel modal submitted.""" + +# --------------------------------------------------------------------------- +# Home Tab actions +# --------------------------------------------------------------------------- + +CONFIG_REFRESH_HOME = "refresh_home" +"""Action: user clicked the "Refresh" button on the Home tab.""" + +CONFIG_BACKUP_RESTORE = "backup_restore" +"""Action: user clicked "Backup/Restore" on the Home tab (opens modal).""" + +CONFIG_BACKUP_RESTORE_SUBMIT = "backup_restore_submit" +"""Callback: Backup/Restore modal submitted (restore from backup).""" + +CONFIG_BACKUP_RESTORE_CONFIRM = "backup_restore_confirm" +"""Callback: Confirm restore when HMAC or encryption key mismatch.""" + +CONFIG_BACKUP_RESTORE_PROCEED = "backup_restore_proceed" +"""Action: danger button to proceed with restore despite warnings.""" + +CONFIG_BACKUP_DOWNLOAD = "backup_download" +"""Action: user clicked Download backup in Backup/Restore modal.""" + +CONFIG_BACKUP_RESTORE_JSON_INPUT = "backup_restore_json_input" +"""Input: uploaded JSON file in Backup/Restore modal.""" + +CONFIG_DATA_MIGRATION = "data_migration" +"""Action: user clicked "Data Migration" in External Connections (opens modal).""" + +CONFIG_DATA_MIGRATION_SUBMIT = "data_migration_submit" +"""Callback: Data Migration modal submitted (import migration file).""" + +CONFIG_DATA_MIGRATION_CONFIRM = "data_migration_confirm" +"""Callback: Confirm import when signature check failed.""" + +CONFIG_DATA_MIGRATION_PROCEED = "data_migration_proceed" +"""Action: danger button to proceed with import despite warnings.""" + +CONFIG_DATA_MIGRATION_EXPORT = "data_migration_export" +"""Action: user clicked Export in Data Migration modal.""" + +CONFIG_DATA_MIGRATION_JSON_INPUT = "data_migration_json_input" +"""Input: uploaded JSON file in Data Migration modal.""" + +# --------------------------------------------------------------------------- +# External Connections (federation) actions +# --------------------------------------------------------------------------- + +CONFIG_GENERATE_FEDERATION_CODE = "generate_federation_code" +"""Action: user clicked "Generate Connection Code" on the Home tab.""" + +CONFIG_ENTER_FEDERATION_CODE = "enter_federation_code" +"""Action: user clicked "Enter Connection Code" on the Home tab.""" + +CONFIG_FEDERATION_CODE_SUBMIT = "federation_code_submit" +"""Callback: enter-connection-code modal submitted.""" + +CONFIG_FEDERATION_CODE_INPUT = "federation_code_input" +"""Input: text field for the connection code in the modal.""" + +CONFIG_FEDERATION_LABEL_SUBMIT = "federation_label_submit" +"""Callback: connection label modal submitted (before code generation).""" + +CONFIG_FEDERATION_LABEL_INPUT = "federation_label_input" +"""Input: text field for the connection label in the modal.""" + +CONFIG_REMOVE_FEDERATION_CONNECTION = "remove_federation_connection" +"""Action: user clicked "Remove Connection" on an external connection (prefix-matched).""" + +# --------------------------------------------------------------------------- +# Database Reset (dev/admin tool, gated by PRIMARY_WORKSPACE + ENABLE_DB_RESET) +# --------------------------------------------------------------------------- + +CONFIG_DB_RESET = "db_reset" +"""Action: user clicked "Reset Database" on the Home tab.""" + +CONFIG_DB_RESET_CONFIRM = "db_reset_confirm" +"""Callback: database reset confirmation view submitted.""" + +CONFIG_DB_RESET_PROCEED = "db_reset_proceed" +"""Action: danger button to proceed with database reset.""" diff --git a/syncbot/slack/blocks.py b/syncbot/slack/blocks.py new file mode 100644 index 0000000..9778f33 --- /dev/null +++ b/syncbot/slack/blocks.py @@ -0,0 +1,95 @@ +"""Block Kit shorthand constructors. + +Thin wrappers around :mod:`slack.orm` dataclasses that collapse the most +common 5-10 line patterns into single function calls. Every function +returns an ``orm`` object, so they compose naturally with +:class:`~slack.orm.BlockView` and the existing dataclass API. + +Usage:: + + from slack.blocks import header, divider, context, text, button, actions + + blocks = [ + header("SyncBot Configuration"), + actions(button(":arrows_counterclockwise: Refresh", action=CONFIG_REFRESH_HOME)), + divider(), + context("Only workspace admins can configure SyncBot."), + ] +""" + +from slack import orm + + +def header(label: str) -> orm.HeaderBlock: + """Large bold header text.""" + return orm.HeaderBlock(text=label) + + +def divider() -> orm.DividerBlock: + """Horizontal divider line.""" + return orm.DividerBlock() + + +def context(label: str) -> orm.ContextBlock: + """Mrkdwn context block (small grey text).""" + return orm.ContextBlock(element=orm.ContextElement(initial_value=label)) + + +def text(label: str) -> orm.SectionBlock: + """Mrkdwn section block (body text).""" + return orm.SectionBlock(label=label) + + +# Alias for section-style usage (SectionBlock with label only). +section = text + + +def button( + label: str, + action: str, + *, + value: str | None = None, + style: str | None = None, + confirm: object = None, + url: str | None = None, +) -> orm.ButtonElement: + """Button element for use inside :func:`actions`.""" + return orm.ButtonElement( + label=label, + action=action, + value=value or label, + style=style, + confirm=confirm, + url=url, + ) + + +def actions(*elements: orm.ButtonElement) -> orm.ActionsBlock: + """Actions block containing one or more buttons.""" + return orm.ActionsBlock(elements=list(elements)) + + +def section_with_image( + label: str, + image_url: str | None, + alt_text: str = "icon", +) -> orm.SectionBlock: + """Section block with an optional image accessory. + + If *image_url* is falsy, returns a plain section block. + """ + if image_url: + return orm.SectionBlock( + label=label, + element=orm.ImageAccessoryElement(image_url=image_url, alt_text=alt_text), + ) + return orm.SectionBlock(label=label) + + +def workspace_card( + label: str, + ws_info: dict, + ws_name: str, +) -> orm.SectionBlock: + """Section block showing workspace info with an optional team icon.""" + return section_with_image(label, ws_info.get("icon_url"), ws_name) diff --git a/syncbot/slack/deferred_ack_views.py b/syncbot/slack/deferred_ack_views.py new file mode 100644 index 0000000..3ffb144 --- /dev/null +++ b/syncbot/slack/deferred_ack_views.py @@ -0,0 +1,18 @@ +"""View submission callback IDs whose handlers control the Slack interaction HTTP ack. + +Kept separate from :mod:`app` so tests can import it without initializing the database. +""" + +from slack.actions import ( + CONFIG_BACKUP_RESTORE_SUBMIT, + CONFIG_DATA_MIGRATION_SUBMIT, + CONFIG_PUBLISH_CHANNEL_SUBMIT, + CONFIG_PUBLISH_MODE_SUBMIT, +) + +DEFERRED_ACK_VIEW_CALLBACK_IDS = frozenset({ + CONFIG_PUBLISH_MODE_SUBMIT, + CONFIG_PUBLISH_CHANNEL_SUBMIT, + CONFIG_BACKUP_RESTORE_SUBMIT, + CONFIG_DATA_MIGRATION_SUBMIT, +}) diff --git a/syncbot/slack/forms.py b/syncbot/slack/forms.py new file mode 100644 index 0000000..67f20c0 --- /dev/null +++ b/syncbot/slack/forms.py @@ -0,0 +1,100 @@ +"""Pre-built Slack Block Kit forms for SyncBot configuration modals. + +Defines reusable form templates that are deep-copied and customised at +runtime before being sent to Slack: + +* :data:`NEW_SYNC_FORM` — Modal for creating a new sync group (channel picker). +* :data:`JOIN_SYNC_FORM` — Modal for joining an existing sync group + (sync selector + channel selector). +* :data:`ENTER_GROUP_CODE_FORM` — Modal for entering a group invite code. +* :data:`PUBLISH_CHANNEL_FORM` — Modal for publishing a channel. +* :data:`SUBSCRIBE_CHANNEL_FORM` — Modal for subscribing to a channel. +""" + +from slack import actions, orm + +NEW_SYNC_FORM = orm.BlockView( + blocks=[ + orm.InputBlock( + label="Channel to Sync", + action=actions.CONFIG_NEW_SYNC_CHANNEL_SELECT, + element=orm.ConversationsSelectElement(placeholder="Select a Channel"), + optional=False, + ), + orm.ContextBlock( + element=orm.ContextElement( + initial_value="Select the Channel you want to sync. The Sync will be named after the Channel. " + "If a Sync has already been set up in another Workspace, use 'Join existing Sync' instead.", + ), + ), + ] +) + +JOIN_SYNC_FORM = orm.BlockView( + blocks=[ + orm.InputBlock( + label="Sync Select", + action=actions.CONFIG_JOIN_SYNC_SELECT, + element=orm.StaticSelectElement(placeholder="Select a Sync to join"), + optional=False, + ), + orm.InputBlock( + label="Sync Channel Select", + action=actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT, + element=orm.ConversationsSelectElement(placeholder="Select a Channel to use for this Sync"), + optional=False, + dispatch_action=True, + ), + ] +) + + +ENTER_GROUP_CODE_FORM = orm.BlockView( + blocks=[ + orm.InputBlock( + label="Group Invite Code", + action=actions.CONFIG_JOIN_GROUP_CODE, + element=orm.PlainTextInputElement(placeholder="Enter the code (e.g. A7X-K9M)"), + optional=False, + ), + orm.ContextBlock( + element=orm.ContextElement( + initial_value="Enter the invite code shared by an Admin from another Workspace in the Group.", + ), + ), + ] +) + + +PUBLISH_CHANNEL_FORM = orm.BlockView( + blocks=[ + orm.InputBlock( + label="Channel to Publish", + action=actions.CONFIG_PUBLISH_CHANNEL_SELECT, + element=orm.ConversationsSelectElement(placeholder="Select a Channel to publish"), + optional=False, + ), + orm.ContextBlock( + element=orm.ContextElement( + initial_value="Select a Channel from your Workspace to make available for Syncing.", + ), + ), + ] +) + + +SUBSCRIBE_CHANNEL_FORM = orm.BlockView( + blocks=[ + orm.InputBlock( + label="Channel for Sync", + action=actions.CONFIG_SUBSCRIBE_CHANNEL_SELECT, + element=orm.ConversationsSelectElement(placeholder="Select a Channel to sync into"), + optional=False, + ), + orm.ContextBlock( + element=orm.ContextElement( + initial_value="Select a Channel in your Workspace to receive messages from the published Channel.", + ), + ), + ] +) diff --git a/syncbot/utils/slack/orm.py b/syncbot/slack/orm.py similarity index 75% rename from syncbot/utils/slack/orm.py rename to syncbot/slack/orm.py index 0da7a6a..a8c2ed0 100644 --- a/syncbot/utils/slack/orm.py +++ b/syncbot/slack/orm.py @@ -1,9 +1,11 @@ import json +import logging from dataclasses import dataclass, field -from typing import Any, Dict, List +from typing import Any +from helpers import safe_get -from utils.helpers import safe_get +logger = logging.getLogger(__name__) @dataclass @@ -14,8 +16,8 @@ class BaseElement: def make_placeholder_field(self): return {"placeholder": {"type": "plain_text", "text": self.placeholder, "emoji": True}} - def get_selected_value(): - return "Not yet implemented" + def get_selected_value(self, input_data, action): + raise NotImplementedError @dataclass @@ -31,7 +33,7 @@ def as_form_field(self, initial_value=None): raise Exception("Not Implemented") def get_selected_value(self, input_data, action): - return "Not yet implemented" + raise NotImplementedError @dataclass @@ -76,7 +78,9 @@ def get_selected_value(self, input_data, **kwargs): return self.element.get_selected_value(input_data, self.action, **kwargs) def as_form_field(self): - block = {"type": "section", "block_id": self.action, "text": self.make_label_field()} + block = {"type": "section", "text": self.make_label_field()} + if self.action: + block["block_id"] = self.action if self.element: block.update({"accessory": self.element.as_form_field(action=self.action)}) return block @@ -85,6 +89,23 @@ def make_label_field(self, text=None): return {"type": "mrkdwn", "text": text or self.label or ""} +@dataclass +class HeaderBlock(BaseBlock): + """A ``header`` block — renders as large bold text.""" + + text: str = None + + def as_form_field(self): + return { + "type": "header", + "text": { + "type": "plain_text", + "text": self.text or self.label or "", + "emoji": True, + }, + } + + @dataclass class ButtonElement(BaseAction): style: str = None @@ -114,8 +135,8 @@ class SelectorOption: value: str -def as_selector_options(names: List[str], values: List[str] = []) -> List[SelectorOption]: - if values == []: +def as_selector_options(names: list[str], values: list[str] | None = None) -> list[SelectorOption]: + if values is None: selectors = [SelectorOption(name=x, value=x) for x in names] else: selectors = [SelectorOption(name=x, value=y) for x, y in zip(names, values)] @@ -125,10 +146,7 @@ def as_selector_options(names: List[str], values: List[str] = []) -> List[Select @dataclass class StaticSelectElement(BaseElement): initial_value: str = None - options: List[SelectorOption] = None - - # def with_options(self, options: List[SelectorOption]): - # return SelectorElement(self.label, self.action, options) + options: list[SelectorOption] = None def as_form_field(self, action: str): if not self.options: @@ -159,7 +177,7 @@ def __make_option(self, option: SelectorOption): @dataclass class RadioButtonsElement(BaseElement): initial_value: str = None - options: List[SelectorOption] = None + options: list[SelectorOption] = None def get_selected_value(self, input_data, action): return safe_get(input_data, action, action, "selected_option", "value") @@ -257,6 +275,32 @@ def as_form_field(self, action: str): return j +@dataclass +class ConversationsSelectElement(BaseElement): + """Channel picker that includes both public and private channels.""" + + initial_value: str = None + + def get_selected_value(self, input_data, action): + return safe_get(input_data, action, action, "selected_conversation") + + def as_form_field(self, action: str): + j = { + "type": "conversations_select", + "action_id": action, + "filter": { + "include": ["public", "private"], + "exclude_bot_users": True, + "exclude_external_shared_channels": True, + }, + } + if self.placeholder: + j.update(self.make_placeholder_field()) + if self.initial_value: + j["initial_conversation"] = self.initial_value + return j + + @dataclass class DatepickerElement(BaseElement): initial_value: str = None @@ -316,7 +360,7 @@ def as_form_field(self, action: str): @dataclass class MultiUsersSelectElement(BaseElement): - initial_value: List[str] = None + initial_value: list[str] = None def get_selected_value(self, input_data, action): return safe_get(input_data, action, action, "selected_users") @@ -336,6 +380,7 @@ def as_form_field(self, action: str): @dataclass class ContextBlock(BaseBlock): element: BaseElement = None + elements: list = None initial_value: str = "" def get_selected_value(self, input_data, action): @@ -346,12 +391,45 @@ def get_selected_value(self, input_data, action): def as_form_field(self): j = {"type": "context"} - j.update({"elements": [self.element.as_form_field()]}) + if self.elements: + j["elements"] = [e.as_form_field() for e in self.elements] + elif self.element: + j["elements"] = [self.element.as_form_field()] if self.action: j["block_id"] = self.action return j +@dataclass +class ImageContextElement(BaseElement): + """An image element for use inside a ContextBlock.""" + + image_url: str = None + alt_text: str = "icon" + + def as_form_field(self): + return { + "type": "image", + "image_url": self.image_url, + "alt_text": self.alt_text, + } + + +@dataclass +class ImageAccessoryElement(BaseElement): + """An image element for use as a SectionBlock accessory.""" + + image_url: str = None + alt_text: str = "icon" + + def as_form_field(self, action: str = None): + return { + "type": "image", + "image_url": self.image_url, + "alt_text": self.alt_text, + } + + @dataclass class ContextElement(BaseElement): initial_value: str = None @@ -372,7 +450,7 @@ def as_form_field(self): @dataclass class ActionsBlock(BaseBlock): - elements: List[BaseAction] = field(default_factory=list) + elements: list[BaseAction] = field(default_factory=list) def as_form_field(self): j = { @@ -386,7 +464,7 @@ def as_form_field(self): @dataclass class BlockView: - blocks: List[BaseBlock] + blocks: list[BaseBlock] def delete_block(self, action: str): self.blocks = [b for b in self.blocks if b.action != action] @@ -399,12 +477,12 @@ def set_initial_values(self, values: dict): if block.action in values: block.element.initial_value = values[block.action] - def set_options(self, options: Dict[str, List[SelectorOption]]): + def set_options(self, options: dict[str, list[SelectorOption]]): for block in self.blocks: if block.action in options: block.element.options = options[block.action] - def as_form_field(self) -> List[dict]: + def as_form_field(self) -> list[dict]: return [b.as_form_field() for b in self.blocks] def get_selected_values(self, body) -> dict: @@ -426,7 +504,7 @@ def post_modal( trigger_id: str, title_text: str, callback_id: str, - submit_button_text: str = "Submit", + submit_button_text: str | None = "Submit", parent_metadata: dict = None, close_button_text: str = "Close", notify_on_close: bool = False, @@ -445,7 +523,7 @@ def post_modal( if parent_metadata: view["private_metadata"] = json.dumps(parent_metadata) - if submit_button_text != "None": # TODO: would prefer this to use None instead of "None" + if submit_button_text: view["submit"] = {"type": "plain_text", "text": submit_button_text} try: @@ -454,8 +532,19 @@ def post_modal( elif new_or_add == "add": client.views_push(trigger_id=trigger_id, view=view) except Exception as e: - print(e) - print(json.dumps(view, indent=2)) + logger.error( + "modal_open_or_push_failed", + extra={"callback_id": callback_id, "mode": new_or_add, "error": str(e)}, + ) + logger.debug("modal_view_payload", extra={"view": json.dumps(view, indent=2)}) + + def publish_home_tab(self, client: Any, user_id: str): + """Publish a Home tab view for the given user.""" + blocks = self.as_form_field() + client.views_publish( + user_id=user_id, + view={"type": "home", "blocks": blocks}, + ) def update_modal( self, @@ -463,7 +552,7 @@ def update_modal( view_id: str, title_text: str, callback_id: str, - submit_button_text: str = "Submit", + submit_button_text: str | None = "Submit", parent_metadata: dict = None, close_button_text: str = "Close", notify_on_close: bool = False, @@ -474,16 +563,40 @@ def update_modal( "type": "modal", "callback_id": callback_id, "title": {"type": "plain_text", "text": title_text}, - "submit": {"type": "plain_text", "text": submit_button_text}, "close": {"type": "plain_text", "text": close_button_text}, "notify_on_close": notify_on_close, "blocks": blocks, } + if submit_button_text: + view["submit"] = {"type": "plain_text", "text": submit_button_text} if parent_metadata: view["private_metadata"] = json.dumps(parent_metadata) client.views_update(view_id=view_id, view=view) + def as_ack_update( + self, + title_text: str, + callback_id: str, + submit_button_text: str = "Submit", + parent_metadata: dict = None, + close_button_text: str = "Close", + ) -> dict: + """Build a modal view dict suitable for ack(response_action="update").""" + blocks = self.as_form_field() + view: dict = { + "type": "modal", + "callback_id": callback_id, + "title": {"type": "plain_text", "text": title_text}, + "close": {"type": "plain_text", "text": close_button_text}, + "blocks": blocks, + } + if submit_button_text != "None": + view["submit"] = {"type": "plain_text", "text": submit_button_text} + if parent_metadata: + view["private_metadata"] = json.dumps(parent_metadata) + return view + @dataclass class ImageBlock(BaseBlock): diff --git a/syncbot/slack_manifest_scopes.py b/syncbot/slack_manifest_scopes.py new file mode 100644 index 0000000..35a8bcf --- /dev/null +++ b/syncbot/slack_manifest_scopes.py @@ -0,0 +1,63 @@ +"""Canonical Slack OAuth scopes — keep in sync with repo root ``slack-manifest.json``. + +``oauth_config.scopes.bot`` must match :envvar:`SLACK_BOT_SCOPES` (comma-separated). +``oauth_config.scopes.user`` must match :envvar:`SLACK_USER_SCOPES` (comma-separated). +This app always uses both **bot** and **user** scopes; ``USER_SCOPES`` is non-empty and must +match the manifest ``user`` array (order included). When changing scopes, edit this module and +``slack-manifest.json`` / ``slack-manifest_test.json`` together, then AWS SAM defaults, +GCP ``slack_user_scopes``, and env examples. +""" + +from __future__ import annotations + +# --- Must match slack-manifest.json oauth_config.scopes.bot (order as in manifest) --- + +BOT_SCOPES: tuple[str, ...] = ( + "app_mentions:read", + "channels:history", + "channels:join", + "channels:read", + "channels:manage", + "chat:write", + "chat:write.customize", + "files:read", + "files:write", + "groups:history", + "groups:read", + "groups:write", + "im:write", + "reactions:read", + "reactions:write", + "team:read", + "users:read", + "users:read.email", +) + +# --- Must match slack-manifest.json oauth_config.scopes.user (order as in manifest) --- + +USER_SCOPES: tuple[str, ...] = ( + "chat:write", + "channels:history", + "channels:read", + "files:read", + "files:write", + "groups:history", + "groups:read", + "groups:write", + "im:write", + "reactions:read", + "reactions:write", + "team:read", + "users:read", + "users:read.email", +) + + +def bot_scopes_comma_separated() -> str: + """Return the bot scope string for SLACK_BOT_SCOPES / CloudFormation.""" + return ",".join(BOT_SCOPES) + + +def user_scopes_comma_separated() -> str: + """Return the user scope string for SLACK_USER_SCOPES / CloudFormation / Terraform.""" + return ",".join(USER_SCOPES) diff --git a/syncbot/utils/announcements.py b/syncbot/utils/announcements.py deleted file mode 100644 index b38c40a..0000000 --- a/syncbot/utils/announcements.py +++ /dev/null @@ -1,44 +0,0 @@ -import time -from logging import Logger -from typing import List - -from slack_sdk.web import WebClient -from utils.db import DbManager -from utils.db.schemas import Region, SyncChannel - -# msg = ":rotating_light: Hey, {region}! This is Moneyball, coming at you with some new features for Syncbot! :rotating_light:\n\n" -# msg += ":camera_with_flash: *Photo Sync*: photos will now be synced when you post them to linked channels. Videos are not supported at this time. Also, animated GIFs will be synced, but they will show up as still images.\n\n" -# msg += ":speech_balloon: *@ mention tagging*: you can now @ mention users in your synced posts, and Syncbot will do its best to translate them to the appropriate user in the target workspace. Linked users must be in both workspaces for this to work, otherwise it will default to a non-tagged representation of a mention.\n\n" -# msg += "~ :moneybag: :baseball:" - - -def send( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -): - if body.get("text")[:7] == "confirm": - msg = body.get("text")[8:] - region_records: List[Region] = DbManager.find_records(Region, filters=[True]) - for region in region_records: - sync_channels: List[SyncChannel] = DbManager.find_records( - SyncChannel, filters=[SyncChannel.region_id == region.id] - ) - client = WebClient(token=region.bot_token) - for channel in sync_channels: - try: - client.chat_postMessage(channel=channel.channel_id, text=msg.format(region=region.workspace_name)) - print("Message sent!") - except Exception as e: - if e.response.get("error") == "ratelimited": - print("Rate limited, waiting 10 seconds") - time.sleep(10) - try: - client.chat_postMessage( - channel=channel.channel_id, text=msg.format(region=region.workspace_name) - ) - print("Message sent!") - except Exception as e: - print(f"Error sending message to {region.workspace_name}: {e}") - print(f"Error sending message to {region.workspace_name}: {e}") diff --git a/syncbot/utils/builders.py b/syncbot/utils/builders.py deleted file mode 100644 index c4b82fa..0000000 --- a/syncbot/utils/builders.py +++ /dev/null @@ -1,137 +0,0 @@ -import copy -from slack_sdk.web import WebClient -from logging import Logger -from utils import helpers -from utils.slack import forms, orm, actions -from utils.db.schemas import Region, SyncChannel, Sync -from utils.db import DbManager -from utils.helpers import safe_get - - -def build_config_form( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> orm.BlockView: - """Builds a BlockView config form for the given region. - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - - """ - - team_id: str = safe_get(body, "team_id") or safe_get(body, "view", "team_id") - trigger_id: str = safe_get(body, "trigger_id") - root_view_id: str = safe_get(body, "view", "root_view_id") - error_message: str = safe_get(body, "error_message") - region_record: Region = helpers.get_region_record(team_id, body, context, client) - - config_form = copy.deepcopy(forms.CONFIG_FORM) - - # pull all Syncs, SyncChannels for this region - records = DbManager.find_join_records2( - left_cls=SyncChannel, - right_cls=Sync, - filters=[SyncChannel.region_id == region_record.id], - ) - - for record in records: - sync_channel: SyncChannel = record[0] - sync: Sync = record[1] - config_form.blocks.extend( - forms.build_config_form_sync_block( - sync_channel=sync_channel, - sync=sync, - ) - ) - - if error_message: - config_form.blocks.insert( - 0, - orm.SectionBlock( - text=orm.MrkdwnText(error_message), - ), - ) - - if root_view_id: - config_form.update_modal( - client=client, - view_id=root_view_id, - callback_id=actions.CONFIG_FORM_SUBMIT, - title_text="SyncBot Configuration", - ) - else: - config_form.post_modal( - client=client, - trigger_id=trigger_id, - callback_id=actions.CONFIG_FORM_SUBMIT, - title_text="SyncBot Configuration", - ) - - -def build_join_sync_form( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Pushes a new modal layer to join a new sync. - - Args: - body (dict): Event body from the action invocation. - client (WebClient): The Slack WebClient object. - logger (Logger): A logger object. - context (dict): A context object. - """ - trigger_id: str = safe_get(body, "trigger_id") - team_id = safe_get(body, "view", "team_id") - join_sync_form: orm.BlockView = copy.deepcopy(forms.JOIN_SYNC_FORM) - - sync_records: list[Sync] = DbManager.find_records(Sync, [True]) - channel_sync_region_records: list[tuple[SyncChannel, Region]] = DbManager.find_join_records2( - left_cls=SyncChannel, - right_cls=Region, - filters=[Region.team_id == team_id], - ) - sync_records = [ - sync for sync in sync_records if sync.id not in [record[0].sync_id for record in channel_sync_region_records] - ] - - options = orm.as_selector_options([sync.title for sync in sync_records], [str(sync.id) for sync in sync_records]) - join_sync_form.set_options({actions.CONFIG_JOIN_SYNC_SELECT: options}) - join_sync_form.post_modal( - client=client, - trigger_id=trigger_id, - callback_id=actions.CONFIG_JOIN_SYNC_SUMBIT, - title_text="Join Sync", - new_or_add="add", - ) - - -def build_new_sync_form( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Pushes a new modal layer to create a new sync. - - Args: - body (dict): Event body from the action invocation. - client (WebClient): The Slack WebClient object. - logger (Logger): A logger object. - context (dict): A context object. - """ - trigger_id: str = safe_get(body, "trigger_id") - new_sync_form: orm.BlockView = copy.deepcopy(forms.NEW_SYNC_FORM) - new_sync_form.post_modal( - client=client, - trigger_id=trigger_id, - callback_id=actions.CONFIG_NEW_SYNC_SUBMIT, - title_text="New Sync", - new_or_add="add", - ) diff --git a/syncbot/utils/constants.py b/syncbot/utils/constants.py deleted file mode 100644 index a24f96b..0000000 --- a/syncbot/utils/constants.py +++ /dev/null @@ -1,32 +0,0 @@ -import os - -SLACK_BOT_TOKEN = "SLACK_BOT_TOKEN" -SLACK_STATE_S3_BUCKET_NAME = "ENV_SLACK_STATE_S3_BUCKET_NAME" -SLACK_INSTALLATION_S3_BUCKET_NAME = "ENV_SLACK_INSTALLATION_S3_BUCKET_NAME" -SLACK_CLIENT_ID = "ENV_SLACK_CLIENT_ID" -SLACK_CLIENT_SECRET = "ENV_SLACK_CLIENT_SECRET" -SLACK_SCOPES = "ENV_SLACK_SCOPES" -PASSWORD_ENCRYPT_KEY = "PASSWORD_ENCRYPT_KEY" - -DATABASE_HOST = "DATABASE_HOST" -ADMIN_DATABASE_USER = "ADMIN_DATABASE_USER" -ADMIN_DATABASE_PASSWORD = "ADMIN_DATABASE_PASSWORD" -ADMIN_DATABASE_SCHEMA = "ADMIN_DATABASE_SCHEMA" - -LOCAL_DEVELOPMENT = os.environ.get(SLACK_BOT_TOKEN, "123") != "123" - -SLACK_STATE_S3_BUCKET_NAME = "ENV_SLACK_STATE_S3_BUCKET_NAME" -SLACK_INSTALLATION_S3_BUCKET_NAME = "ENV_SLACK_INSTALLATION_S3_BUCKET_NAME" -SLACK_CLIENT_ID = "ENV_SLACK_CLIENT_ID" -SLACK_CLIENT_SECRET = "ENV_SLACK_CLIENT_SECRET" -SLACK_SCOPES = "ENV_SLACK_SCOPES" -SLACK_SIGNING_SECRET = "SLACK_SIGNING_SECRET" - -WARNING_BLOCK = "WARNING_BLOCK" - -MAX_HEIF_SIZE = 1000 - -AWS_ACCESS_KEY_ID = "AWS_ACCESS_KEY_ID" -AWS_SECRET_ACCESS_KEY = "AWS_SECRET_ACCESS_KEY" -S3_IMAGE_BUCKET = "syncbot-images" -S3_IMAGE_URL = f"https://{S3_IMAGE_BUCKET}.s3.amazonaws.com/" diff --git a/syncbot/utils/db/__init__.py b/syncbot/utils/db/__init__.py deleted file mode 100644 index 02a1098..0000000 --- a/syncbot/utils/db/__init__.py +++ /dev/null @@ -1,163 +0,0 @@ -from dataclasses import dataclass -from typing import Tuple, TypeVar, List -import os -from sqlalchemy import create_engine, pool, and_ -from sqlalchemy.orm import sessionmaker -from utils.db.schemas import BaseClass -from utils import constants - - -@dataclass -class DatabaseField: - name: str - value: object = None - - -GLOBAL_ENGINE = None -GLOBAL_SESSION = None -GLOBAL_SCHEMA = None - - -def get_session(echo=False, schema=None): - if GLOBAL_SESSION: - return GLOBAL_SESSION - - global GLOBAL_ENGINE, GLOBAL_SCHEMA - if schema != GLOBAL_SCHEMA or not GLOBAL_ENGINE: - host = os.environ[constants.DATABASE_HOST] - user = os.environ[constants.ADMIN_DATABASE_USER] - passwd = os.environ[constants.ADMIN_DATABASE_PASSWORD] - database = schema or os.environ[constants.ADMIN_DATABASE_SCHEMA] - - db_url = f"mysql+pymysql://{user}:{passwd}@{host}:3306/{database}?charset=utf8mb4" - GLOBAL_ENGINE = create_engine(db_url, echo=echo, poolclass=pool.NullPool, convert_unicode=True) - GLOBAL_SCHEMA = database - return sessionmaker()(bind=GLOBAL_ENGINE) - - -def close_session(session): - global GLOBAL_SESSION, GLOBAL_ENGINE - if GLOBAL_SESSION == session: - if GLOBAL_ENGINE: - GLOBAL_ENGINE.close() - GLOBAL_SESSION = None - - -T = TypeVar("T") - - -class DbManager: - def get_record(cls: T, id, schema=None) -> T: - session = get_session(schema=schema) - try: - x = session.query(cls).filter(cls.get_id() == id).first() - if x: - session.expunge(x) - return x - finally: - session.rollback() - close_session(session) - - def find_records(cls: T, filters, schema=None) -> List[T]: - session = get_session(schema=schema) - try: - records = session.query(cls).filter(and_(*filters)).all() - for r in records: - session.expunge(r) - return records - finally: - session.rollback() - close_session(session) - - def find_join_records2(left_cls: T, right_cls: T, filters, schema=None) -> List[Tuple[T]]: - session = get_session(schema=schema) - try: - records = session.query(left_cls, right_cls).join(right_cls).filter(and_(*filters)).all() - session.expunge_all() - return records - finally: - session.rollback() - close_session(session) - - def find_join_records3( - left_cls: T, right_cls1: T, right_cls2: T, filters, schema=None, left_join=False - ) -> List[Tuple[T]]: - session = get_session(schema=schema) - try: - records = ( - session.query(left_cls, right_cls1, right_cls2) - .select_from(left_cls) - .join(right_cls1, isouter=left_join) - .join(right_cls2, isouter=left_join) - .filter(and_(*filters)) - .all() - ) - session.expunge_all() - return records - finally: - session.rollback() - close_session(session) - - def update_record(cls: T, id, fields, schema=None): - session = get_session(schema=schema) - try: - session.query(cls).filter(cls.get_id() == id).update(fields, synchronize_session="fetch") - session.flush() - finally: - session.commit() - close_session(session) - - def update_records(cls: T, filters, fields, schema=None): - session = get_session(schema=schema) - try: - session.query(cls).filter(and_(*filters)).update(fields, synchronize_session="fetch") - session.flush() - finally: - session.commit() - close_session(session) - - def create_record(record: BaseClass, schema=None) -> BaseClass: - session = get_session(schema=schema) - try: - session.add(record) - session.flush() - session.expunge(record) - finally: - session.commit() - close_session(session) - return record - - def create_records(records: List[BaseClass], schema=None): - session = get_session(schema=schema) - try: - session.add_all(records) - session.flush() - finally: - session.commit() - close_session(session) - - def delete_record(cls: T, id, schema=None): - session = get_session(schema=schema) - try: - session.query(cls).filter(cls.get_id() == id).delete() - session.flush() - finally: - session.commit() - close_session(session) - - def delete_records(cls: T, filters, schema=None): - session = get_session(schema=schema) - try: - session.query(cls).filter(and_(*filters)).delete() - session.flush() - finally: - session.commit() - close_session(session) - - def execute_sql_query(sql_query, schema=None): - session = get_session(schema=schema) - try: - records = session.execute(sql_query) - return records - finally: - close_session(session) diff --git a/syncbot/utils/db/schemas.py b/syncbot/utils/db/schemas.py deleted file mode 100644 index b02984a..0000000 --- a/syncbot/utils/db/schemas.py +++ /dev/null @@ -1,80 +0,0 @@ -import sqlalchemy -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy import Column, ForeignKey, Integer, String -from sqlalchemy.types import DECIMAL -from sqlalchemy.orm import relationship - -BaseClass = declarative_base(mapper=sqlalchemy.orm.mapper) - - -class GetDBClass: - def get_id(self): - return self.id - - def get(self, attr): - if attr in [c.key for c in self.__table__.columns]: - return getattr(self, attr) - return None - - def to_json(self): - return {c.key: self.get(c.key) for c in self.__table__.columns} - - def __repr__(self): - return str(self.to_json()) - - -class Region(BaseClass, GetDBClass): - __tablename__ = "regions" - id = Column(Integer, primary_key=True) - team_id = Column(String(100), unique=True) - workspace_name = Column(String(100)) - bot_token = Column(String(100)) - - def get_id(): - return Region.team_id - - -class Sync(BaseClass, GetDBClass): - __tablename__ = "syncs" - id = Column(Integer, primary_key=True) - title = Column(String(100), unique=True) - description = Column(String(100)) - - def get_id(): - return Sync.id - - -class SyncChannel(BaseClass, GetDBClass): - __tablename__ = "sync_channels" - id = Column(Integer, primary_key=True) - sync_id = Column(Integer, ForeignKey("syncs.id")) - region_id = Column(Integer, ForeignKey("regions.id")) - region = relationship("Region", backref="sync_channels") - channel_id = Column(String(100)) - - def get_id(): - return SyncChannel.channel_id - - -class PostMeta(BaseClass, GetDBClass): - __tablename__ = "post_meta" - id = Column(Integer, primary_key=True) - post_id = Column(String(100)) - sync_channel_id = Column(Integer, ForeignKey("sync_channels.id")) - ts = Column(DECIMAL(16, 6)) - - def get_id(): - return PostMeta.post_id - - -# class SyncChannelExtended(BaseClass, GetDBClass): -# __tablename__ = "sync_channels_extended" -# id = Column(Integer, primary_key=True) -# sync_id = Column(Integer) -# region_id = Column(Integer) -# channel_id = Column(String(100)) -# sync_title = Column(String(100)) -# sync_description = Column(String(100)) -# region_team_id = Column(String(100)) -# region_workspace_name = Column(String(100)) -# region_bot_token = Column(String(100)) diff --git a/syncbot/utils/handlers.py b/syncbot/utils/handlers.py deleted file mode 100644 index cc365c3..0000000 --- a/syncbot/utils/handlers.py +++ /dev/null @@ -1,342 +0,0 @@ -import os - -# import time -import uuid -from logging import Logger - -from slack_sdk.web import WebClient -from utils import builders, constants, helpers -from utils.db import DbManager, schemas -from utils.slack import actions, forms, orm - - -def handle_remove_sync( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -): - """Handles the "DeSync" button action by removing the SyncChannel record from the database. - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - """ - sync_channel_id = int(helpers.safe_get(body, "actions", 0, "value")) - sync_channel_record = DbManager.get_record(schemas.SyncChannel, id=sync_channel_id) - DbManager.delete_records(schemas.SyncChannel, [schemas.SyncChannel.id == sync_channel_id]) - try: - client.conversations_leave(channel=sync_channel_record.channel_id) - except Exception: - pass - builders.build_config_form(body, client, logger, context) - - -def respond_to_message_event( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Main function for handling message events. - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - """ - event_type = helpers.safe_get(body, "event", "type") - event_subtype = helpers.safe_get(body, "event", "subtype") - message_subtype = helpers.safe_get(body, "event", "message", "subtype") or helpers.safe_get( - body, "event", "previous_message", "subtype" - ) - team_id = helpers.safe_get(body, "team_id") - channel_id = helpers.safe_get(body, "event", "channel") - msg_text = helpers.safe_get(body, "event", "text") or helpers.safe_get(body, "event", "message", "text") - msg_text = " " if (msg_text or "") == "" else msg_text - mentioned_users = helpers.parse_mentioned_users(msg_text, client) - user_id = helpers.safe_get(body, "event", "user") or helpers.safe_get(body, "event", "message", "user") - thread_ts = helpers.safe_get(body, "event", "thread_ts") - ts = ( - helpers.safe_get(body, "event", "message", "ts") - or helpers.safe_get(body, "event", "previous_message", "ts") - or helpers.safe_get(body, "event", "ts") - ) - files = [ - file - for file in helpers.safe_get(body, "event", "files") - or helpers.safe_get(body, "event", "message", "files") - or [] - ] - photos = [photo for photo in files if helpers.safe_get(photo, "original_w")] - if event_subtype in ["message_changed", "message_deleted"]: - photo_names = [ - f"{photo['id']}.png" if photo['filetype'] == "heic" else f"{photo['id']}.{photo['filetype']}" - for photo in photos - ] - photo_list = [{"url": f"{constants.S3_IMAGE_URL}{name}", "name": name} for name in photo_names] - else: - photo_list = helpers.upload_photos(files=photos, client=client, logger=logger) - photo_blocks = [ - orm.ImageBlock(image_url=photo["url"], alt_text=photo["name"]).as_form_field() for photo in photo_list - ] - - if (event_type == "message") and (message_subtype != "bot_message"): # and (event_context not in EVENT_LIST): - # EVENT_LIST.append(event_context) - if (not event_subtype) or (event_subtype == "file_share" and msg_text != ""): - post_list = [] - post_uuid = uuid.uuid4().bytes - if not thread_ts: - # handle new post - sync_records = helpers.get_sync_list(team_id, channel_id) - if not sync_records: - try: - client.chat_postMessage( - channel=channel_id, - text=":wave: Hello! I'm SyncBot. I was added to this channel, but this channel doesn't seem to be part of a Sync. Please use the `/config-syncbot` command to configure me.", - ) - client.conversations_leave(channel=channel_id) - except Exception as e: - logger.error(e) - return - user_name, user_profile_url = helpers.get_user_info(client, user_id) - region_name = helpers.safe_get( - [record[1].workspace_name for record in sync_records if record[0].channel_id == channel_id], 0 - ) - for record in sync_records: - sync_channel, region = record - if sync_channel.channel_id == channel_id: - ts = helpers.safe_get(body, "event", "ts") - else: - msg_text = helpers.apply_mentioned_users(msg_text, client, mentioned_users) - res = helpers.post_message( - bot_token=region.bot_token, - channel_id=sync_channel.channel_id, - msg_text=msg_text, - user_name=user_name, - user_profile_url=user_profile_url, - region_name=region_name, - blocks=photo_blocks, - ) - # if photos != []: - # time.sleep(3) # required so the next step catches the latest ts - # posts = client.conversations_history(channel=sync_channel.channel_id, limit=1) - # print(posts["messages"][0]["ts"]) - # # ts = posts["messages"][0]["ts"] - # ts = helpers.safe_get(res, "ts") or helpers.safe_get(body, "event", "ts") - # else: - # ts = helpers.safe_get(res, "ts") or helpers.safe_get(body, "event", "ts") - ts = helpers.safe_get(res, "ts") or helpers.safe_get(body, "event", "ts") - post_list.append( - schemas.PostMeta( - post_id=post_uuid, - sync_channel_id=sync_channel.id, - ts=float(ts), - ) - ) - for photo in photo_list: - os.remove(photo["path"]) - DbManager.create_records(post_list) - else: - # handle threaded reply - post_list = [] - post_uuid = uuid.uuid4().bytes - post_records = helpers.get_post_records(thread_ts) - region_name = helpers.safe_get( - [record[2].workspace_name for record in post_records if record[1].channel_id == channel_id], 0 - ) - for record in post_records: - post_meta, sync_channel, region = record - user_name, user_profile_url = helpers.get_user_info(client, user_id) - if sync_channel.channel_id == channel_id: - ts = helpers.safe_get(body, "event", "ts") - else: - msg_text = helpers.apply_mentioned_users(msg_text, client, mentioned_users) - res = helpers.post_message( - bot_token=region.bot_token, - channel_id=sync_channel.channel_id, - msg_text=msg_text, - user_name=user_name, - user_profile_url=user_profile_url, - thread_ts="{:.6f}".format(post_meta.ts), - region_name=region_name, - blocks=photo_blocks, - ) - ts = helpers.safe_get(res, "ts") - post_list.append( - schemas.PostMeta( - post_id=post_uuid, - sync_channel_id=sync_channel.id, - ts=float(ts), - ) - ) - DbManager.create_records(post_list) - - elif event_subtype == "message_changed": - # handle edited message - post_records = helpers.get_post_records(ts) - region_name = helpers.safe_get( - [record[2].workspace_name for record in post_records if record[1].channel_id == channel_id], 0 - ) - for record in post_records: - post_meta, sync_channel, region = record - if sync_channel.channel_id == channel_id: - continue - else: - msg_text = helpers.apply_mentioned_users(msg_text, client, mentioned_users) - res = helpers.post_message( - bot_token=region.bot_token, - channel_id=sync_channel.channel_id, - msg_text=msg_text, - update_ts="{:.6f}".format(post_meta.ts), - region_name=region_name, - blocks=photo_blocks, - ) - elif event_subtype == "message_deleted": - # handle deleted message - post_records = helpers.get_post_records(ts) - for record in post_records: - post_meta, sync_channel, region = record - if sync_channel.channel_id == channel_id: - continue - else: - res = helpers.delete_message( - bot_token=region.bot_token, - channel_id=sync_channel.channel_id, - ts="{:.6f}".format(post_meta.ts), - ) - - -def handle_config_submission( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Handles the config form submission (currently does nothing) - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - """ - pass - - -def handle_join_sync_submission( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Handles the join sync form submission by appending to the SyncChannel table. - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - """ - form_data = forms.JOIN_SYNC_FORM.get_selected_values(body) - sync_id = helpers.safe_get(form_data, actions.CONFIG_JOIN_SYNC_SELECT) - channel_id = helpers.safe_get(form_data, actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT) - team_id = helpers.safe_get(body, "view", "team_id") - region_record: schemas.Region = DbManager.get_record(schemas.Region, id=team_id) - sync_record: schemas.Sync = DbManager.get_record(schemas.Sync, id=sync_id) - - channel_sync_record = schemas.SyncChannel( - sync_id=sync_id, - channel_id=channel_id, - region_id=region_record.id, - ) - try: - DbManager.create_record(channel_sync_record) - client.conversations_join(channel=channel_id) - client.chat_postMessage( - channel=channel_id, - text=f":wave: Hello! I'm SyncBot. I'll be keeping this channel in sync with *{sync_record.title}*.", - ) - except Exception: - body["error_message"] = "Your chosen channel is already part of a Sync. Please choose another channel." - - builders.build_config_form(body, client, logger, context) - - -def handle_new_sync_submission( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Handles the new sync form submission by appending to the Sync table. - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - """ - form_data = forms.NEW_SYNC_FORM.get_selected_values(body) - sync_title = helpers.safe_get(form_data, actions.CONFIG_NEW_SYNC_TITLE) - sync_description = helpers.safe_get(form_data, actions.CONFIG_NEW_SYNC_DESCRIPTION) - - sync_record = schemas.Sync( - title=sync_title, - description=sync_description, - ) - DbManager.create_record(sync_record) - - -def check_join_sync_channel( - body: dict, - client: WebClient, - logger: Logger, - context: dict, -) -> None: - """Checks to see if the chosen channel id is already part of a sync - - Args: - body (dict): Event body from the invocation. - client (WebClient): Slack WebClient object. - logger (Logger): Logger object. - context (dict): Context object. - """ - view_id = helpers.safe_get(body, "view", "id") - form_data = forms.JOIN_SYNC_FORM.get_selected_values(body) - channel_id = helpers.safe_get(form_data, actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT) - blocks = helpers.safe_get(body, "view", "blocks") - already_warning = constants.WARNING_BLOCK in [block["block_id"] for block in blocks] - sync_channel_records = DbManager.find_records(schemas.SyncChannel, [schemas.SyncChannel.channel_id == channel_id]) - - if len(sync_channel_records) > 0 and not already_warning: - block = orm.SectionBlock( - action=constants.WARNING_BLOCK, - label=":warning: :warning: This channel is already part of a Sync! Please choose another channel.", - ).as_form_field() - print(block) - blocks.append( - orm.SectionBlock( - action=constants.WARNING_BLOCK, - label=":warning: :warning: This channel is already part of a Sync! Please choose another channel.", - ).as_form_field() - ) - helpers.update_modal( - blocks=blocks, - client=client, - view_id=view_id, - title_text="Join Sync", - callback_id=actions.CONFIG_JOIN_SYNC_SUMBIT, - ) - elif len(sync_channel_records) == 0 and already_warning: - blocks = [block for block in blocks if block["block_id"] != constants.WARNING_BLOCK] - helpers.update_modal( - blocks=blocks, - client=client, - view_id=view_id, - title_text="Join Sync", - callback_id=actions.CONFIG_JOIN_SYNC_SUMBIT, - ) diff --git a/syncbot/utils/helpers.py b/syncbot/utils/helpers.py deleted file mode 100644 index 1e3697c..0000000 --- a/syncbot/utils/helpers.py +++ /dev/null @@ -1,313 +0,0 @@ -import json -import os -import re -from logging import Logger -from typing import Dict, List, Tuple - -import boto3 -import requests -import slack_sdk -from PIL import Image -from pillow_heif import register_heif_opener -from slack_bolt.adapter.aws_lambda.lambda_s3_oauth_flow import LambdaS3OAuthFlow -from slack_bolt.oauth.oauth_settings import OAuthSettings -from slack_sdk import WebClient -from utils import constants -from utils.db import DbManager, schemas -from utils.slack import actions - -register_heif_opener() - - -def get_oauth_flow(): - if constants.LOCAL_DEVELOPMENT: - return None - else: - return LambdaS3OAuthFlow( - oauth_state_bucket_name=os.environ[constants.SLACK_STATE_S3_BUCKET_NAME], - installation_bucket_name=os.environ[constants.SLACK_INSTALLATION_S3_BUCKET_NAME], - settings=OAuthSettings( - client_id=os.environ[constants.SLACK_CLIENT_ID], - client_secret=os.environ[constants.SLACK_CLIENT_SECRET], - scopes=os.environ[constants.SLACK_SCOPES].split(","), - ), - ) - - -def safe_get(data, *keys): - if not data: - return None - try: - result = data - for k in keys: - if isinstance(k, int) and isinstance(result, list): - result = result[k] - elif result.get(k): - result = result[k] - else: - return None - return result - except KeyError: - return None - - -def get_sync_list(team_id: str, channel_id: str) -> List[Tuple[schemas.SyncChannel, schemas.Region]]: - sync_channel_record = DbManager.find_records(schemas.SyncChannel, [schemas.SyncChannel.channel_id == channel_id]) - if sync_channel_record: - sync_channels = DbManager.find_join_records2( - left_cls=schemas.SyncChannel, - right_cls=schemas.Region, - filters=[schemas.SyncChannel.sync_id == sync_channel_record[0].sync_id], - ) - else: - sync_channels = [] - return sync_channels - - -def get_user_info(client: WebClient, user_id: str) -> Tuple[str, str]: - try: - res = client.users_info(user=user_id) - except slack_sdk.errors.SlackApiError: - return None, None - - user_name = ( - safe_get(res, "user", "profile", "display_name") or safe_get(res, "user", "profile", "real_name") or None - ) - user_profile_url = safe_get(res, "user", "profile", "image_192") - return user_name, user_profile_url - - -def post_message( - bot_token: str, - channel_id: str, - msg_text: str, - user_name: str = None, - user_profile_url: str = None, - thread_ts: str = None, - update_ts: str = None, - region_name: str = None, - blocks: List[dict] = None, -) -> Dict: - slack_client = WebClient(bot_token) - posted_from = f"({region_name})" if region_name else "(via SyncBot)" - if blocks: - # msg_block = orm.SectionBlock(label=msg_text).as_form_field() - msg_block = {"type": "section", "text": {"type": "mrkdwn", "text": msg_text}} - all_blocks = [msg_block] + blocks - else: - all_blocks = [] - if update_ts: - res = slack_client.chat_update( - channel=channel_id, - text=msg_text, - ts=update_ts, - blocks=all_blocks, - ) - else: - res = slack_client.chat_postMessage( - channel=channel_id, - text=msg_text, - username=f"{user_name} {posted_from}", - icon_url=user_profile_url, - thread_ts=thread_ts, - blocks=all_blocks, - ) - return res - - -def get_post_records(thread_ts: str) -> List[Tuple[schemas.PostMeta, schemas.SyncChannel, schemas.Region]]: - post = DbManager.find_records(schemas.PostMeta, [schemas.PostMeta.ts == float(thread_ts)]) - if post: - post_records = DbManager.find_join_records3( - left_cls=schemas.PostMeta, - right_cls1=schemas.SyncChannel, - right_cls2=schemas.Region, - filters=[schemas.PostMeta.post_id == post[0].post_id], - ) - else: - post_records = [] - return post_records - - -def delete_message(bot_token: str, channel_id: str, ts: str) -> Dict: - slack_client = WebClient(bot_token) - res = slack_client.chat_delete( - channel=channel_id, - ts=ts, - ) - return res - - -def get_request_type(body: dict) -> tuple[str]: - request_type = safe_get(body, "type") - if request_type == "event_callback": - return ("event_callback", safe_get(body, "event", "type")) - elif request_type == "block_actions": - block_action = safe_get(body, "actions", 0, "action_id") - if block_action[: len(actions.CONFIG_REMOVE_SYNC)] == actions.CONFIG_REMOVE_SYNC: - block_action = actions.CONFIG_REMOVE_SYNC - return ("block_actions", block_action) - elif request_type == "view_submission": - return ("view_submission", safe_get(body, "view", "callback_id")) - elif not request_type and "command" in body: - return ("command", safe_get(body, "command")) - else: - return ("unknown", "unknown") - - -def get_region_record(team_id: str, body: dict, context: dict, client: WebClient) -> schemas.Region: - region_record: schemas.Region = DbManager.get_record(schemas.Region, id=team_id) - team_domain = safe_get(body, "team", "domain") - - if not region_record: - try: - team_info = client.team_info() - team_name = team_info["team"]["name"] - except Exception: - team_name = team_domain - region_record: schemas.Region = DbManager.create_record( - schemas.Region( - team_id=team_id, - workspace_name=team_name, - bot_token=context["bot_token"], - ) - ) - - return region_record - - -def update_modal( - blocks: List[dict], - client: WebClient, - view_id: str, - title_text: str, - callback_id: str, - submit_button_text: str = "Submit", - parent_metadata: dict = None, - close_button_text: str = "Close", - notify_on_close: bool = False, -): - view = { - "type": "modal", - "callback_id": callback_id, - "title": {"type": "plain_text", "text": title_text}, - "submit": {"type": "plain_text", "text": submit_button_text}, - "close": {"type": "plain_text", "text": close_button_text}, - "notify_on_close": notify_on_close, - "blocks": blocks, - } - if parent_metadata: - view["private_metadata"] = json.dumps(parent_metadata) - - client.views_update(view_id=view_id, view=view) - - -def upload_photos(files: List[dict], client: WebClient, logger: Logger) -> List[dict]: - uploaded_photos = [] - photos = [file for file in files if file["mimetype"][:5] == "image"] - for photo in photos: - try: - # Download photo - # Try to get a medium size photo first, then fallback to smaller sizes - r = requests.get( - photo.get("thumb_480") or photo.get("thumb_360") or photo.get("thumb_80") or photo.get("url_private"), - headers={"Authorization": f"Bearer {client.token}"}, - ) - r.raise_for_status() - - file_name = f"{photo['id']}.{photo['filetype']}" - file_path = f"/tmp/{file_name}" - file_mimetype = photo["mimetype"] - - # Save photo to disk - with open(file_path, "wb") as f: - f.write(r.content) - - # Convert HEIC to PNG - if photo["filetype"] == "heic": - heic_img = Image.open(file_path) - x, y = heic_img.size - coeff = min(constants.MAX_HEIF_SIZE / max(x, y), 1) - heic_img = heic_img.resize((int(x * coeff), int(y * coeff))) - heic_img.save(file_path.replace(".heic", ".png"), quality=95, optimize=True, format="PNG") - os.remove(file_path) - - file_path = file_path.replace(".heic", ".png") - file_name = file_name.replace(".heic", ".png") - file_mimetype = "image/png" - - # Upload photo to S3 - if constants.LOCAL_DEVELOPMENT: - s3_client = boto3.client( - "s3", - aws_access_key_id=os.environ[constants.AWS_ACCESS_KEY_ID], - aws_secret_access_key=os.environ[constants.AWS_SECRET_ACCESS_KEY], - ) - else: - s3_client = boto3.client("s3") - - with open(file_path, "rb") as f: - s3_client.upload_fileobj( - f, constants.S3_IMAGE_BUCKET, file_name, ExtraArgs={"ContentType": file_mimetype} - ) - uploaded_photos.append( - { - "url": f"{constants.S3_IMAGE_URL}{file_name}", - "name": file_name, - "path": file_path, - } - ) - except Exception as e: - logger.error(f"Error uploading file: {e}") - return uploaded_photos - - -def parse_mentioned_users(msg_text: str, client: WebClient) -> List[Dict]: - - user_ids = re.findall(r"<@(\w+)>", msg_text or "") - - if user_ids != []: - try: - members = client.users_list()["members"] - except slack_sdk.errors.SlackApiError: - # TODO: rate limited, use client.user_info() to get individual user info - members = [] - member_dict = {} - for member in members: - user_name = ( - member["profile"]["real_name"] - if member["profile"]["display_name"] != "" - else member["profile"]["display_name"] - ) - member_dict.update({member["id"]: {"user_name": user_name, "email": safe_get(member, "profile", "email")}}) - - return [member_dict[user_id] for user_id in user_ids] - - -def apply_mentioned_users(msg_text: str, client: WebClient, mentioned_user_info: List[Dict]) -> List[Dict]: - - email_list = [user["email"] for user in mentioned_user_info] - msg_text = msg_text or "" - - if email_list == []: - return msg_text - else: - try: - members = client.users_list()["members"] - except slack_sdk.errors.SlackApiError: - # TODO: rate limited, use client.user_info() to get individual user info - members = [] - member_dict = { - member["profile"].get("email"): member["id"] for member in members if member["profile"].get("email") - } - - replace_list = [] - for index, email in enumerate(email_list): - user_id = member_dict.get(email) - if user_id: - replace_list.append(f"<@{user_id}>") - else: - replace_list.append(f"@{mentioned_user_info[index]['user_name']}") - - pattern = r"<@\w+>" - return re.sub(pattern, "{}", msg_text).format(*replace_list) diff --git a/syncbot/utils/routing.py b/syncbot/utils/routing.py deleted file mode 100644 index 5ee474c..0000000 --- a/syncbot/utils/routing.py +++ /dev/null @@ -1,31 +0,0 @@ -from utils import announcements, builders, handlers -from utils.slack import actions - -COMMAND_MAPPER = { - "/config-syncbot": builders.build_config_form, - "/send-syncbot-announcement": announcements.send, -} - -ACTION_MAPPER = { - actions.CONFIG_JOIN_EXISTING_SYNC: builders.build_join_sync_form, - actions.CONFIG_CREATE_NEW_SYNC: builders.build_new_sync_form, - actions.CONFIG_REMOVE_SYNC: handlers.handle_remove_sync, - actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT: handlers.check_join_sync_channel, -} - -EVENT_MAPPER = { - "message": handlers.respond_to_message_event, -} - -VIEW_MAPPER = { - actions.CONFIG_FORM_SUBMIT: handlers.handle_config_submission, - actions.CONFIG_JOIN_SYNC_SUMBIT: handlers.handle_join_sync_submission, - actions.CONFIG_NEW_SYNC_SUBMIT: handlers.handle_new_sync_submission, -} - -MAIN_MAPPER = { - "command": COMMAND_MAPPER, - "block_actions": ACTION_MAPPER, - "event_callback": EVENT_MAPPER, - "view_submission": VIEW_MAPPER, -} diff --git a/syncbot/utils/slack/__init__.py b/syncbot/utils/slack/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/syncbot/utils/slack/actions.py b/syncbot/utils/slack/actions.py deleted file mode 100644 index 1099ac7..0000000 --- a/syncbot/utils/slack/actions.py +++ /dev/null @@ -1,12 +0,0 @@ -CONFIG_JOIN_EXISTING_SYNC = "join_existing_sync" -CONFIG_CREATE_NEW_SYNC = "create_new_sync" -CONFIG_REMOVE_SYNC = "remove_sync" - -CONFIG_FORM_SUBMIT = "config_form_submit" - -CONFIG_NEW_SYNC_TITLE = "config_new_sync_title" -CONFIG_NEW_SYNC_DESCRIPTION = "config_new_sync_description" -CONFIG_NEW_SYNC_SUBMIT = "config_new_sync_submit" -CONFIG_JOIN_SYNC_SELECT = "config_join_sync_select" -CONFIG_JOIN_SYNC_CHANNEL_SELECT = "config_join_sync_channel_select" -CONFIG_JOIN_SYNC_SUMBIT = "config_join_sync_submit" diff --git a/syncbot/utils/slack/forms.py b/syncbot/utils/slack/forms.py deleted file mode 100644 index c810376..0000000 --- a/syncbot/utils/slack/forms.py +++ /dev/null @@ -1,86 +0,0 @@ -from typing import List -from utils.db import schemas -from utils.slack import orm, actions - -CONFIG_FORM = orm.BlockView( - blocks=[ - orm.ActionsBlock( - elements=[ - orm.ButtonElement( - label="Join existing Sync", - action=actions.CONFIG_JOIN_EXISTING_SYNC, - ), - orm.ButtonElement( - label="Create new Sync", - action=actions.CONFIG_CREATE_NEW_SYNC, - ), - ] - ), - orm.DividerBlock(), - ] -) - -NEW_SYNC_FORM = orm.BlockView( - blocks=[ - orm.InputBlock( - label="Sync Title", - action=actions.CONFIG_NEW_SYNC_TITLE, - element=orm.PlainTextInputElement(placeholder="Enter a title for this Sync"), - optional=False, - ), - orm.InputBlock( - label="Sync Description", - action=actions.CONFIG_NEW_SYNC_DESCRIPTION, - element=orm.PlainTextInputElement(placeholder="Enter a description for this Sync"), - optional=False, - ), - orm.ContextBlock( - element=orm.ContextElement( - initial_value="Reminder: this form is for creating NEW Syncs. If the Sync has already been set up " - "in another region, please use the 'Join existing Sync' button to join it.", - ), - ), - ] -) - -JOIN_SYNC_FORM = orm.BlockView( - blocks=[ - orm.InputBlock( - label="Sync Select", - action=actions.CONFIG_JOIN_SYNC_SELECT, - element=orm.StaticSelectElement(placeholder="Select a Sync to join"), - optional=False, - ), - orm.InputBlock( - label="Sync Channel Select", - action=actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT, - element=orm.ChannelsSelectElement(placeholder="Select a channel to use for this Sync"), - optional=False, - dispatch_action=True, - ), - ] -) - - -def build_config_form_sync_block(sync_channel: schemas.SyncChannel, sync: schemas.Sync) -> List[orm.BaseBlock]: - """Function to build a block for a sync channel. - - Args: - sync_channel (orm.SyncChannel): SyncChannel database record. - sync (orm.Sync): Sync database record. - - Returns: - List[orm.BaseBlock]: List of blocks to be appended to the config form. - """ - return [ - orm.SectionBlock( - label=f"*{sync.title}*\n{sync.description}\nChannel: <#{sync_channel.channel_id}>", - action=f"{actions.CONFIG_REMOVE_SYNC}_{sync_channel.id}", - element=orm.ButtonElement( - label="DeSync", - style="danger", - value=f"{sync_channel.id}", # TODO: add confirmation block - ), - ), - orm.DividerBlock(), - ] diff --git a/template.yaml b/template.yaml deleted file mode 100644 index 1ff977f..0000000 --- a/template.yaml +++ /dev/null @@ -1,137 +0,0 @@ -AWSTemplateFormatVersion: "2010-09-09" -Transform: AWS::Serverless-2016-10-31 -Description: syncbot-build-template - -# More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst -Globals: - Function: - Timeout: 10 - MemorySize: 128 - Tracing: Active - Api: - TracingEnabled: true - -Parameters: - SlackToken: - Description: Slack token that is passed by the bot - Type: String - Default: "123" - SlackSigningSecret: - Description: Auth token used to pass to the controller to get the commands - Type: String - Default: "123" - SlackClientSecret: - Description: Auth token used to pass to the controller to get the commands - Type: String - Default: "123" - SlackOauthScopes: - Description: Auth token used to pass to the controller to get the commands - Type: String - Default: "app_mentions:read,channels:history,channels:join,chat:write,chat:write.customize,commands,files:read,files:write,team:read,users:read,channels:manage,users:read.email,reactions:read,reactions:write" - Stage: - Description: Parameter for getting the deployment stage - Type: String - Default: staging - DatabaseHost: - Description: RDS Database Host - Type: String - Default: "123" - DatabaseUser: - Description: RDS Database User - Type: String - Default: "moneyball" - DatabasePassword: - Description: RDS Database Password - Type: String - Default: "123" - DatabaseSchema: - Description: RDS Database Schema - Type: String - Default: "syncbot" - PasswordEncryptKey: - Description: Hash encrypt key for decrypting email passwords - Type: String - Default: "123" - -Mappings: - StagesMap: - staging: - SlackClientID: "1966318390773.6037875913205" - SlackStateS3Bucket: "slack-state-bucket" - SlackInstallS3Bucket: "slack-installation-bucket" - KeepWarmName: "SyncBotKeepWarmTest" - prod: - SlackClientID: "1990266264068.6053437451057" - SlackStateS3Bucket: "slack-state-bucket" - SlackInstallS3Bucket: "slack-installation-bucket" - KeepWarmName: "SyncBotKeepWarmProd" - -Resources: - SyncBotFunction: - Type: AWS::Serverless::Function # More info about Function Resource: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#awsserverlessfunction - Properties: - CodeUri: syncbot/ - Handler: app.handler - Runtime: python3.11 - Architectures: - - x86_64 - Policies: - - AmazonS3FullAccess - - AWSLambdaRole - - AmazonEventBridgeFullAccess - Timeout: 400 - Events: - SyncBot: - Type: Api # More info about API Event Source: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#api - Properties: - Path: /slack/events - Method: post - SyncBotInstall: - Type: Api # More info about API Event Source: https://github.com/awslabs/serverless-application-model/blob/master/versions/2016-10-31.md#api - Properties: - Path: /slack/install - Method: get - SyncBotKeepWarm: - Type: ScheduleV2 - Properties: - ScheduleExpression: "rate(5 minutes)" - Name: !FindInMap - - StagesMap - - Ref: Stage - - KeepWarmName - Environment: - Variables: - SLACK_BOT_TOKEN: !Ref SlackToken - SLACK_SIGNING_SECRET: !Ref SlackSigningSecret - ENV_SLACK_CLIENT_SECRET: !Ref SlackClientSecret - ENV_SLACK_SCOPES: !Ref SlackOauthScopes - DATABASE_HOST: !Ref DatabaseHost - ADMIN_DATABASE_USER: !Ref DatabaseUser - ADMIN_DATABASE_PASSWORD: !Ref DatabasePassword - ADMIN_DATABASE_SCHEMA: !Ref DatabaseSchema - ENV_SLACK_CLIENT_ID: !FindInMap - - StagesMap - - Ref: Stage - - SlackClientID - ENV_SLACK_STATE_S3_BUCKET_NAME: !FindInMap - - StagesMap - - Ref: Stage - - SlackStateS3Bucket - ENV_SLACK_INSTALLATION_S3_BUCKET_NAME: !FindInMap - - StagesMap - - Ref: Stage - - SlackInstallS3Bucket - -Outputs: - # ServerlessRestApi is an implicit API created out of Events key under Serverless::Function - # Find out more about other implicit resources you can reference within SAM - # https://github.com/awslabs/serverless-application-model/blob/master/docs/internals/generated_resources.rst#api - SyncBotApi: - Description: API Gateway endpoint URL for Prod stage for SyncBot function - Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/slack/events/" - SyncBotFunction: - Description: SyncBot Lambda Function ARN - Value: !GetAtt SyncBotFunction.Arn - SyncBotFunctionIamRole: - Description: Implicit IAM Role created for Hello World function - Value: !GetAtt SyncBotFunctionRole.Arn diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..ac90161 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,12 @@ +"""Pytest configuration: default DB backend for unit tests (no live DB required).""" + +import os + +# In-memory SQLite so importing `app` (which calls initialize_database) works without MySQL. +os.environ.setdefault("DATABASE_BACKEND", "sqlite") +os.environ.setdefault("DATABASE_URL", "sqlite:///:memory:") +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") diff --git a/tests/test_app_main_response.py b/tests/test_app_main_response.py new file mode 100644 index 0000000..359379b --- /dev/null +++ b/tests/test_app_main_response.py @@ -0,0 +1,120 @@ +"""Unit tests for syncbot.app.view_ack and main_response (ack + lazy work).""" + +import os +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +import app as app_module # noqa: E402 +from slack import actions # noqa: E402 + + +def _body_view_submit(callback_id: str) -> dict: + return { + "type": "view_submission", + "team_id": "T001", + "view": {"callback_id": callback_id}, + } + + +class TestViewAck: + """Production ``view_ack``: deferred views get custom ack kwargs.""" + + def test_returns_dict_uses_ack_kwargs(self): + ack = MagicMock() + context: dict = {} + + def ack_handler(b, c, ctx): + return { + "response_action": "errors", + "errors": {actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: "bad"}, + } + + custom = {actions.CONFIG_BACKUP_RESTORE_SUBMIT: ack_handler} + with patch.object(app_module, "VIEW_ACK_MAPPER", custom): + app_module.view_ack( + _body_view_submit(actions.CONFIG_BACKUP_RESTORE_SUBMIT), + MagicMock(), + MagicMock(), + ack, + context, + ) + + ack.assert_called_once() + assert ack.call_args.kwargs["response_action"] == "errors" + assert "errors" in ack.call_args.kwargs + + def test_returns_none_calls_empty_ack(self): + ack = MagicMock() + context: dict = {} + + def ack_handler(b, c, ctx): + return None + + custom = {actions.CONFIG_PUBLISH_MODE_SUBMIT: ack_handler} + with patch.object(app_module, "VIEW_ACK_MAPPER", custom): + app_module.view_ack( + _body_view_submit(actions.CONFIG_PUBLISH_MODE_SUBMIT), + MagicMock(), + MagicMock(), + ack, + context, + ) + + ack.assert_called_once_with() + + def test_unknown_callback_calls_empty_ack(self): + ack = MagicMock() + context: dict = {} + with patch.object(app_module, "VIEW_ACK_MAPPER", {}): + app_module.view_ack(_body_view_submit("unknown_callback"), MagicMock(), MagicMock(), ack, context) + ack.assert_called_once_with() + + +class TestMainResponseLocalDevViewSubmission: + """With LOCAL_DEVELOPMENT, main_response runs ack + work in one call.""" + + @patch.object(app_module, "LOCAL_DEVELOPMENT", True) + def test_non_deferred_ack_before_handler(self): + ack = MagicMock() + context: dict = {} + + def handler(b, c, log, ctx): + assert ack.call_count == 1 + return None + + cid = actions.CONFIG_NEW_SYNC_SUBMIT + custom = {cid: handler} + with ( + patch.object(app_module, "MAIN_MAPPER", {"view_submission": custom}), + patch.object(app_module, "emit_metric"), + ): + app_module.main_response(_body_view_submit(cid), MagicMock(), MagicMock(), ack, context) + + ack.assert_called_once_with() + + +class TestMainResponseProdViewSubmission: + """Production main_response (lazy): does not call ack for view_submission.""" + + @patch.object(app_module, "LOCAL_DEVELOPMENT", False) + def test_view_submission_skips_ack_in_main_response(self): + ack = MagicMock() + context: dict = {} + + def handler(b, c, log, ctx): + return None + + cid = actions.CONFIG_NEW_SYNC_SUBMIT + custom = {cid: handler} + with ( + patch.object(app_module, "MAIN_MAPPER", {"view_submission": custom}), + patch.object(app_module, "emit_metric"), + ): + app_module.main_response(_body_view_submit(cid), MagicMock(), MagicMock(), ack, context) + + ack.assert_not_called() diff --git a/tests/test_app_registration.py b/tests/test_app_registration.py new file mode 100644 index 0000000..1d111cc --- /dev/null +++ b/tests/test_app_registration.py @@ -0,0 +1,46 @@ +"""Guardrails: Slack Bolt listener wiring for view ack + lazy main_response.""" + +from pathlib import Path + + +def test_app_py_view_listener_has_ack_and_lazy_in_prod_branch(): + """Production registers view with view_ack + lazy main_response.""" + root = Path(__file__).resolve().parents[1] + app_py = root / "syncbot" / "app.py" + text = app_py.read_text(encoding="utf-8") + assert "ack=view_ack" in text + assert "lazy=[main_response]" in text + assert "app.event(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS)" in text + assert "app.action(MATCH_ALL_PATTERN)(*ARGS, **LAZY_KWARGS)" in text + + +def test_bolt_view_listener_uses_view_ack_when_not_local_dev(): + """Bolt view listener should use view_ack as ack_function when not LOCAL_DEVELOPMENT.""" + import app as app_module + + if app_module.LOCAL_DEVELOPMENT: + return + bolt_app = app_module.app + view_ack_listeners = [ + li + for li in bolt_app._listeners + if getattr(li.ack_function, "__name__", None) == "view_ack" + and li.lazy_functions + and any(getattr(f, "__name__", None) == "main_response" for f in li.lazy_functions) + ] + assert view_ack_listeners, "expected view listener with ack_function=view_ack and lazy main_response" + + +def test_bolt_event_or_action_uses_lazy_main_response_in_prod_mode(): + """When not LOCAL_DEVELOPMENT, event/action listeners should defer work to lazy main_response.""" + import app as app_module + + if app_module.LOCAL_DEVELOPMENT: + return + bolt_app = app_module.app + lazy = [ + li + for li in bolt_app._listeners + if li.lazy_functions and any(getattr(f, "__name__", None) == "main_response" for f in li.lazy_functions) + ] + assert lazy, "expected lazy listeners with main_response when LOCAL_DEVELOPMENT is false" diff --git a/tests/test_channel_sync_handlers.py b/tests/test_channel_sync_handlers.py new file mode 100644 index 0000000..6f9747e --- /dev/null +++ b/tests/test_channel_sync_handlers.py @@ -0,0 +1,145 @@ +"""Focused unit tests for channel sync handler branches.""" + +import os +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from handlers.channel_sync import ( # noqa: E402 + handle_publish_channel_submit_ack, + handle_publish_mode_submit_ack, + handle_subscribe_channel_submit, +) + + +class TestPublishModeSubmitAck: + def test_missing_group_id_logs_warning(self): + client = MagicMock() + context = {} + workspace = SimpleNamespace(id=10) + body = {"view": {"team_id": "T1", "private_metadata": "{}"}} + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={}), + patch("handlers.channel_sync._logger.warning") as warn_log, + ): + result = handle_publish_mode_submit_ack(body, client, context) + + assert result is None + assert warn_log.call_args is not None + assert "publish_mode_submit: missing group_id in metadata" in warn_log.call_args.args[0] + + +class TestPublishChannelSubmitAck: + def test_missing_group_id_exits_early(self): + client = MagicMock() + context = {} + workspace = SimpleNamespace(id=10) + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={}), + patch("handlers.channel_sync.DbManager.create_record") as create_record, + ): + result = handle_publish_channel_submit_ack({}, client, context) + + assert result is None + create_record.assert_not_called() + + def test_missing_channel_selection_returns_ack_error(self): + client = MagicMock() + context = {} + workspace = SimpleNamespace(id=10) + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={"group_id": 7}), + patch("handlers.channel_sync._get_selected_conversation_or_option", return_value="__none__"), + patch("handlers.channel_sync.DbManager.create_record") as create_record, + ): + result = handle_publish_channel_submit_ack({}, client, context) + + assert result is not None + assert result["response_action"] == "errors" + assert "Select a Channel to publish." in result["errors"].values() + create_record.assert_not_called() + + def test_existing_sync_channel_returns_ack_error(self): + client = MagicMock() + context = {} + workspace = SimpleNamespace(id=10) + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={"group_id": 7}), + patch("handlers.channel_sync._get_selected_conversation_or_option", return_value="C123"), + patch("handlers.channel_sync.DbManager.find_records", return_value=[object()]), + patch("handlers.channel_sync.DbManager.create_record") as create_record, + ): + result = handle_publish_channel_submit_ack({}, client, context) + + assert result is not None + assert result["response_action"] == "errors" + assert "already being synced" in next(iter(result["errors"].values())) + create_record.assert_not_called() + + +class TestSubscribeChannelSubmit: + def test_missing_sync_id_exits_early(self): + client = MagicMock() + logger = MagicMock() + context = {} + workspace = SimpleNamespace(id=10) + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={}), + patch("handlers.channel_sync.DbManager.create_record") as create_record, + ): + handle_subscribe_channel_submit({}, client, logger, context) + + create_record.assert_not_called() + + def test_missing_channel_selection_exits_early(self): + client = MagicMock() + logger = MagicMock() + context = {} + workspace = SimpleNamespace(id=10) + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={"sync_id": 55}), + patch("handlers.channel_sync._get_selected_conversation_or_option", return_value="__none__"), + patch("handlers.channel_sync.DbManager.create_record") as create_record, + ): + handle_subscribe_channel_submit({}, client, logger, context) + + create_record.assert_not_called() + + def test_duplicate_channel_skips_join_and_create(self): + client = MagicMock() + logger = MagicMock() + context = {} + workspace = SimpleNamespace(id=10) + sync_record = SimpleNamespace(group_id=None) + + with ( + patch("handlers.channel_sync._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.channel_sync._parse_private_metadata", return_value={"sync_id": 55}), + patch("handlers.channel_sync._get_selected_conversation_or_option", return_value="Cdup"), + patch("handlers.channel_sync.DbManager.get_record", return_value=sync_record), + patch("handlers.channel_sync.DbManager.find_records", return_value=[object()]), + patch("handlers.channel_sync.DbManager.create_record") as create_record, + patch("handlers.channel_sync.builders.refresh_home_tab_for_workspace") as refresh_home, + ): + handle_subscribe_channel_submit({"user": {"id": "U1"}}, client, logger, context) + + create_record.assert_not_called() + client.conversations_join.assert_not_called() + refresh_home.assert_called_once() diff --git a/tests/test_container_http_server.py b/tests/test_container_http_server.py new file mode 100644 index 0000000..6de9b22 --- /dev/null +++ b/tests/test_container_http_server.py @@ -0,0 +1,54 @@ +"""Tests for Cloud Run / container HTTP server helpers in ``app``.""" + +import json +import os +import socket +import threading +import time +import urllib.error +import urllib.request +from unittest.mock import patch + +import pytest + + +def test_http_listen_port_from_env() -> None: + from app import _http_listen_port + + with patch.dict(os.environ, {"PORT": "8080"}): + assert _http_listen_port() == 8080 + + +def test_http_listen_port_invalid_falls_back() -> None: + from app import _http_listen_port + + with patch.dict(os.environ, {"PORT": "nope"}): + assert _http_listen_port() == 3000 + + +def test_health_endpoint_on_container_server() -> None: + """GET ``/health`` returns 200 and JSON (same server path as Cloud Run).""" + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.bind(("127.0.0.1", 0)) + port = sock.getsockname()[1] + sock.close() + + def serve() -> None: + from app import run_syncbot_http_server + + run_syncbot_http_server(port=port, http_server_logger_enabled=False) + + threading.Thread(target=serve, daemon=True).start() + + url = f"http://127.0.0.1:{port}/health" + last_err: BaseException | None = None + for _ in range(100): + try: + with urllib.request.urlopen(url, timeout=0.3) as r: + assert r.status == 200 + assert json.loads(r.read().decode()) == {"status": "ok"} + return + except (urllib.error.URLError, OSError) as e: + last_err = e + time.sleep(0.05) + pytest.fail(f"/health never became ready: {last_err!r}") diff --git a/tests/test_db.py b/tests/test_db.py new file mode 100644 index 0000000..e7d09af --- /dev/null +++ b/tests/test_db.py @@ -0,0 +1,253 @@ +"""Unit tests for ``syncbot/db`` connection pooling, retry logic, and backend parity (MySQL/SQLite).""" + +import contextlib +import os +from unittest.mock import patch + +import pytest + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from sqlalchemy import inspect +from sqlalchemy.exc import OperationalError + +from db import _MAX_RETRIES, _with_retry + +# ----------------------------------------------------------------------- +# _with_retry decorator +# ----------------------------------------------------------------------- + + +class TestWithRetry: + def test_success_no_retry(self): + call_count = 0 + + @_with_retry + def fn(): + nonlocal call_count + call_count += 1 + return "ok" + + assert fn() == "ok" + assert call_count == 1 + + def test_retries_on_operational_error(self): + call_count = 0 + + @_with_retry + def fn(): + nonlocal call_count + call_count += 1 + if call_count <= _MAX_RETRIES: + raise OperationalError("statement", {}, Exception("connection lost")) + return "recovered" + + assert fn() == "recovered" + assert call_count == _MAX_RETRIES + 1 + + def test_exhausts_retries_raises(self): + @_with_retry + def fn(): + raise OperationalError("statement", {}, Exception("connection lost")) + + with pytest.raises(OperationalError): + fn() + + def test_non_operational_error_not_retried(self): + call_count = 0 + + @_with_retry + def fn(): + nonlocal call_count + call_count += 1 + raise ValueError("not a db error") + + with pytest.raises(ValueError): + fn() + assert call_count == 1 + + +# ----------------------------------------------------------------------- +# Engine creation uses QueuePool +# ----------------------------------------------------------------------- + + +class TestEngineConfig: + @patch.dict( + os.environ, + { + "DATABASE_BACKEND": "mysql", + "DATABASE_HOST": "localhost", + "DATABASE_USER": "root", + "DATABASE_PASSWORD": "test", + "DATABASE_SCHEMA": "syncbot", + }, + clear=False, + ) + def test_engine_uses_queue_pool_mysql(self): + from sqlalchemy.pool import QueuePool + + import db as db_mod + from db import get_engine + + old_engine = db_mod.GLOBAL_ENGINE + old_schema = db_mod.GLOBAL_SCHEMA + engine = None + try: + db_mod.GLOBAL_ENGINE = None + db_mod.GLOBAL_SCHEMA = None + engine = get_engine(schema="test_schema_unique") + assert isinstance(engine.pool, QueuePool) + finally: + if engine: + engine.dispose() + db_mod.GLOBAL_ENGINE = old_engine + db_mod.GLOBAL_SCHEMA = old_schema + + @patch.dict( + os.environ, + { + "DATABASE_BACKEND": "postgresql", + "DATABASE_HOST": "localhost", + "DATABASE_USER": "root", + "DATABASE_PASSWORD": "test", + "DATABASE_SCHEMA": "syncbot", + }, + clear=False, + ) + def test_engine_uses_queue_pool_postgresql(self): + from sqlalchemy.pool import QueuePool + + import db as db_mod + from db import get_engine + + old_engine = db_mod.GLOBAL_ENGINE + old_schema = db_mod.GLOBAL_SCHEMA + engine = None + try: + db_mod.GLOBAL_ENGINE = None + db_mod.GLOBAL_SCHEMA = None + engine = get_engine(schema="test_schema_unique_pg") + assert isinstance(engine.pool, QueuePool) + finally: + if engine: + engine.dispose() + db_mod.GLOBAL_ENGINE = old_engine + db_mod.GLOBAL_SCHEMA = old_schema + + @patch.dict( + os.environ, + { + "DATABASE_BACKEND": "sqlite", + "DATABASE_URL": "sqlite:///:memory:", + }, + clear=False, + ) + def test_engine_uses_null_pool_sqlite(self): + from sqlalchemy.pool import NullPool + + import db as db_mod + from db import get_engine + + old_engine = db_mod.GLOBAL_ENGINE + old_schema = db_mod.GLOBAL_SCHEMA + engine = None + try: + db_mod.GLOBAL_ENGINE = None + db_mod.GLOBAL_SCHEMA = None + engine = get_engine() + assert isinstance(engine.pool, NullPool) + finally: + if engine: + engine.dispose() + db_mod.GLOBAL_ENGINE = old_engine + db_mod.GLOBAL_SCHEMA = old_schema + + +# ----------------------------------------------------------------------- +# Backend parity: SQLite bootstrap and required vars +# ----------------------------------------------------------------------- + + +class TestBackendParity: + @pytest.mark.parametrize("sqlite_url", ["sqlite:///test_bootstrap.db"]) + @patch.dict(os.environ, {"DATABASE_BACKEND": "sqlite"}, clear=False) + def test_sqlite_initialize_database_creates_tables(self, sqlite_url): + import db as db_mod + from db import get_engine, initialize_database + + os.environ["DATABASE_URL"] = sqlite_url + old_engine = db_mod.GLOBAL_ENGINE + old_schema = db_mod.GLOBAL_SCHEMA + try: + db_mod.GLOBAL_ENGINE = None + db_mod.GLOBAL_SCHEMA = None + initialize_database() + engine = get_engine() + insp = inspect(engine) + assert insp.has_table("workspaces") + assert insp.has_table("alembic_version") + assert insp.has_table("slack_bots") + finally: + if db_mod.GLOBAL_ENGINE: + db_mod.GLOBAL_ENGINE.dispose() + db_mod.GLOBAL_ENGINE = old_engine + db_mod.GLOBAL_SCHEMA = old_schema + if "DATABASE_URL" in os.environ and "test_bootstrap" in os.environ["DATABASE_URL"]: + with contextlib.suppress(Exception): + (__import__("pathlib").Path("test_bootstrap.db")).unlink(missing_ok=True) + + def test_get_required_db_vars_mysql_without_url(self): + with patch.dict(os.environ, {"DATABASE_BACKEND": "mysql"}, clear=False): + if "DATABASE_URL" in os.environ: + del os.environ["DATABASE_URL"] + from constants import get_required_db_vars + + required = get_required_db_vars() + assert "DATABASE_HOST" in required + assert "DATABASE_USER" in required + assert "DATABASE_PASSWORD" in required + assert "DATABASE_SCHEMA" in required + + def test_get_required_db_vars_sqlite(self): + with patch.dict(os.environ, {"DATABASE_BACKEND": "sqlite"}, clear=False): + from constants import get_required_db_vars + + required = get_required_db_vars() + assert required == ["DATABASE_URL"] + + def test_get_required_db_vars_postgresql_without_url(self): + with patch.dict( + os.environ, + {"DATABASE_BACKEND": "postgresql"}, + clear=False, + ): + if "DATABASE_URL" in os.environ: + del os.environ["DATABASE_URL"] + from constants import get_required_db_vars + + required = get_required_db_vars() + assert "DATABASE_HOST" in required + assert "DATABASE_USER" in required + assert "DATABASE_PASSWORD" in required + assert "DATABASE_SCHEMA" in required + + def test_default_database_backend_is_mysql(self): + import importlib + + import constants as c + + old = os.environ.pop("DATABASE_BACKEND", None) + try: + importlib.reload(c) + assert c.get_database_backend() == "mysql" + finally: + if old is not None: + os.environ["DATABASE_BACKEND"] = old + else: + os.environ.setdefault("DATABASE_BACKEND", "mysql") + importlib.reload(c) diff --git a/tests/test_db_setup.py b/tests/test_db_setup.py new file mode 100644 index 0000000..9bf794f --- /dev/null +++ b/tests/test_db_setup.py @@ -0,0 +1,94 @@ +"""Unit tests for infra/aws/db_setup/handler.py (MySQL vs PostgreSQL branches).""" + +import importlib +import sys +from unittest.mock import MagicMock, patch + +import pytest + +# handler.py does ``import psycopg2`` at the top level. The package +# (psycopg2-binary) may not ship wheels for every Python version +# (e.g. 3.14). Stub the module so the import succeeds regardless. +if "psycopg2" not in sys.modules: + _pg_stub = MagicMock() + _pg_stub.sql = MagicMock() + sys.modules["psycopg2"] = _pg_stub + sys.modules["psycopg2.sql"] = _pg_stub.sql + + +def _fresh_handler(): + """(Re-)import handler so patches take effect.""" + if "handler" in sys.modules: + return importlib.reload(sys.modules["handler"]) + import handler + + return handler + + +@pytest.fixture +def cfn_create_event(): + return { + "RequestType": "Create", + "ResponseURL": "https://example.invalid/", + "StackId": "arn:aws:cloudformation:us-east-1:123:stack/x", + "RequestId": "req", + "LogicalResourceId": "AppDbSetup", + "ResourceProperties": { + "Host": "db.example.com", + "AdminUser": "admin", + "AdminPassword": "adminpw", + "Schema": "syncbot_test", + "Stage": "test", + "SecretArn": "arn:aws:secretsmanager:us-east-1:123:secret:x", + "DatabaseEngine": "mysql", + }, + } + + +def test_handler_calls_mysql_setup(cfn_create_event): + handler = _fresh_handler() + with ( + patch.object(handler, "send") as mock_send, + patch.object(handler, "get_secret_value", return_value="apppw"), + patch.object(handler, "_assert_tcp_reachable"), + patch.object(handler, "setup_database_mysql") as mock_mysql, + patch.object(handler, "setup_database_postgresql") as mock_pg, + ): + handler._handler_impl(cfn_create_event, MagicMock()) + mock_mysql.assert_called_once() + mock_pg.assert_not_called() + assert mock_send.call_args[0][2] == "SUCCESS" + + +def test_handler_delete_uses_physical_resource_id(): + """Delete must echo PhysicalResourceId from Create; never a placeholder.""" + delete_event = { + "RequestType": "Delete", + "ResponseURL": "https://example.invalid/", + "StackId": "arn:aws:cloudformation:us-east-1:123:stack/x", + "RequestId": "req", + "LogicalResourceId": "AppDbSetup", + "PhysicalResourceId": "syncbot_test", + } + handler = _fresh_handler() + with patch.object(handler, "send") as mock_send: + handler._handler_impl(delete_event, MagicMock()) + mock_send.assert_called_once() + assert mock_send.call_args[0][2] == "SUCCESS" + assert mock_send.call_args[1]["physical_resource_id"] == "syncbot_test" + + +def test_handler_calls_postgresql_setup(cfn_create_event): + cfn_create_event["ResourceProperties"]["DatabaseEngine"] = "postgresql" + handler = _fresh_handler() + with ( + patch.object(handler, "send") as mock_send, + patch.object(handler, "get_secret_value", return_value="apppw"), + patch.object(handler, "_assert_tcp_reachable"), + patch.object(handler, "setup_database_mysql") as mock_mysql, + patch.object(handler, "setup_database_postgresql") as mock_pg, + ): + handler._handler_impl(cfn_create_event, MagicMock()) + mock_pg.assert_called_once() + mock_mysql.assert_not_called() + assert mock_send.call_args[0][2] == "SUCCESS" diff --git a/tests/test_deploy_script_syntax.py b/tests/test_deploy_script_syntax.py new file mode 100644 index 0000000..b5d06f3 --- /dev/null +++ b/tests/test_deploy_script_syntax.py @@ -0,0 +1,25 @@ +"""Smoke-check deploy shell scripts parse with bash -n.""" + +import subprocess +from pathlib import Path + +import pytest + +REPO_ROOT = Path(__file__).resolve().parent.parent + +DEPLOY_SCRIPTS = [ + REPO_ROOT / "deploy.sh", + REPO_ROOT / "infra" / "gcp" / "scripts" / "deploy.sh", + REPO_ROOT / "infra" / "aws" / "scripts" / "deploy.sh", +] + + +@pytest.mark.parametrize("path", DEPLOY_SCRIPTS, ids=lambda p: str(p.relative_to(REPO_ROOT))) +def test_bash_syntax(path: Path) -> None: + assert path.is_file(), f"missing {path}" + subprocess.run( + ["bash", "-n", str(path)], + check=True, + capture_output=True, + text=True, + ) diff --git a/tests/test_export_import_handlers.py b/tests/test_export_import_handlers.py new file mode 100644 index 0000000..619fe24 --- /dev/null +++ b/tests/test_export_import_handlers.py @@ -0,0 +1,75 @@ +"""Focused unit tests for backup/restore and migration handler validation.""" + +import os +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from handlers.export_import import ( # noqa: E402 + handle_backup_restore, + handle_backup_restore_submit_ack, +) +from slack import actions # noqa: E402 + + +class TestBackupRestoreSubmitValidation: + def test_returns_error_when_file_missing(self): + client = MagicMock() + body = {"user": {"id": "U1"}, "team": {"id": "TTEST"}, "view": {"state": {"values": {}}}} + + with ( + patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TTEST"}), + patch("handlers.export_import._is_admin", return_value=True), + ): + resp = handle_backup_restore_submit_ack(body, client, context={}) + + assert resp["response_action"] == "errors" + assert actions.CONFIG_BACKUP_RESTORE_JSON_INPUT in resp["errors"] + + def test_returns_error_when_uploaded_file_has_no_url(self): + client = MagicMock() + body = { + "user": {"id": "U1"}, + "team": {"id": "TTEST"}, + "view": { + "state": { + "values": { + actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: { + actions.CONFIG_BACKUP_RESTORE_JSON_INPUT: { + "files": [{"id": "F123"}], + } + } + } + } + }, + } + + with ( + patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TTEST"}), + patch("handlers.export_import._is_admin", return_value=True), + ): + resp = handle_backup_restore_submit_ack(body, client, context={}) + + assert resp["response_action"] == "errors" + assert "Could not retrieve the uploaded file." in resp["errors"][actions.CONFIG_BACKUP_RESTORE_JSON_INPUT] + + +class TestHandleBackupRestorePrimaryWorkspace: + def test_returns_early_when_primary_mismatch(self): + client = MagicMock() + body = { + "user": {"id": "U1"}, + "team": {"id": "T_WRONG"}, + "trigger_id": "trig", + } + with ( + patch.dict(os.environ, {"PRIMARY_WORKSPACE": "T_PRIMARY"}), + patch("handlers.export_import._is_admin", return_value=True), + ): + handle_backup_restore(body, client, MagicMock(), {}) + + client.views_open.assert_not_called() diff --git a/tests/test_federation_inbound_resolve.py b/tests/test_federation_inbound_resolve.py new file mode 100644 index 0000000..414c185 --- /dev/null +++ b/tests/test_federation_inbound_resolve.py @@ -0,0 +1,76 @@ +"""Tests for federation inbound text resolution (mentions and channels).""" + +import os +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from db import schemas +from federation import api as federation_api + + +class TestResolveMentionsForFederated: + def test_maps_via_user_mapping_target(self): + m = MagicMock() + m.target_user_id = "ULOCAL" + m.source_display_name = "Alice" + + def fake_find(model, _filters): + if model == schemas.UserMapping: + return [m] + return [] + + with patch.object(federation_api.DbManager, "find_records", side_effect=fake_find): + out = federation_api._resolve_mentions_for_federated("hi <@UREMOTE>", 10, "Partner WS") + assert out == "hi <@ULOCAL>" + + def test_fallback_stub_mapping_display_name(self): + m = MagicMock() + m.target_user_id = None + m.source_display_name = "Bob" + + def fake_find(model, _filters): + if model == schemas.UserMapping: + return [m] + return [] + + with patch.object(federation_api.DbManager, "find_records", side_effect=fake_find): + out = federation_api._resolve_mentions_for_federated("hi <@UREMOTE>", 10, "Partner WS") + assert out == "hi `[@Bob (Partner WS)]`" + + def test_fallback_user_directory_display_name(self): + entry = MagicMock() + entry.display_name = "Carol" + entry.real_name = None + + def fake_find(model, _filters): + if model == schemas.UserMapping: + return [] + if model == schemas.UserDirectory: + return [entry] + return [] + + with patch.object(federation_api.DbManager, "find_records", side_effect=fake_find): + out = federation_api._resolve_mentions_for_federated("hey <@UX>", 10, "Remote") + assert out == "hey `[@Carol (Remote)]`" + + def test_prefers_mapping_with_target_user_id(self): + good = MagicMock() + good.target_user_id = "UBEST" + good.source_display_name = "Best" + stale = MagicMock() + stale.target_user_id = None + stale.source_display_name = "Stale" + + def fake_find(model, _filters): + if model == schemas.UserMapping: + return [stale, good] + return [] + + with patch.object(federation_api.DbManager, "find_records", side_effect=fake_find): + out = federation_api._resolve_mentions_for_federated("<@U1>", 10, "R") + assert out == "<@UBEST>" diff --git a/tests/test_federation_reactions.py b/tests/test_federation_reactions.py new file mode 100644 index 0000000..ea6bfb4 --- /dev/null +++ b/tests/test_federation_reactions.py @@ -0,0 +1,218 @@ +"""Tests for federated reaction payload and fallback behavior.""" + +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +from slack_sdk.errors import SlackApiError + +from federation import api as federation_api +from federation import core as federation_core + + +class TestFederationReactionPayload: + def test_build_reaction_payload_includes_user_fields(self): + payload = federation_core.build_reaction_payload( + post_id="post-1", + channel_id="C123", + reaction="custom_emoji", + action="add", + user_name="Alice", + user_avatar_url="https://avatar.example/alice.png", + workspace_name="Workspace A", + timestamp="100.000001", + ) + + assert payload["post_id"] == "post-1" + assert payload["channel_id"] == "C123" + assert payload["reaction"] == "custom_emoji" + assert payload["action"] == "add" + assert payload["user_name"] == "Alice" + assert payload["user_avatar_url"] == "https://avatar.example/alice.png" + assert payload["workspace_name"] == "Workspace A" + assert payload["timestamp"] == "100.000001" + + +class TestFederationReactionFallback: + def test_invalid_name_reaction_falls_back_to_thread_text(self): + body = { + "post_id": "post-1", + "channel_id": "C123", + "reaction": "missing_custom", + "action": "add", + "user_name": "Alice", + "user_avatar_url": "https://avatar.example/alice.png", + "workspace_name": "Workspace A", + } + fed_ws = SimpleNamespace(instance_id="remote-instance") + sync_channel = SimpleNamespace(id=101, channel_id="C123") + workspace = SimpleNamespace(bot_token="enc-token") + post_meta = SimpleNamespace(ts=123.456) + + slack_response = MagicMock() + slack_response.get.return_value = "invalid_name" + slack_exc = SlackApiError(message="emoji not found", response=slack_response) + + ws_client = MagicMock() + ws_client.reactions_add.side_effect = slack_exc + + with ( + patch.object(federation_api, "_resolve_channel_for_federated", return_value=(sync_channel, workspace)), + patch.object(federation_api, "_find_post_records", return_value=[post_meta]), + patch.object(federation_api.helpers, "decrypt_bot_token", return_value="xoxb-test"), + patch.object(federation_api, "WebClient", return_value=ws_client), + patch.object(federation_api.helpers, "post_message", return_value={"ts": "200.000001"}) as post_message_mock, + ): + status, resp = federation_api.handle_message_react(body, fed_ws) + + assert status == 200 + assert resp["ok"] is True + assert resp["applied"] == 1 + ws_client.reactions_add.assert_called_once_with(channel="C123", timestamp="123.456", name="missing_custom") + post_message_mock.assert_called_once_with( + bot_token="xoxb-test", + channel_id="C123", + msg_text="reacted with :missing_custom:", + user_name="Alice", + user_profile_url="https://avatar.example/alice.png", + workspace_name="Workspace A", + thread_ts="123.456", + ) + + def test_non_invalid_name_error_does_not_fallback(self): + """Other Slack errors (rate limit, network, etc.) should NOT trigger the text fallback.""" + body = { + "post_id": "post-1", + "channel_id": "C123", + "reaction": "thumbsup", + "action": "add", + "user_name": "Alice", + } + fed_ws = SimpleNamespace(instance_id="remote-instance") + sync_channel = SimpleNamespace(id=101, channel_id="C123") + workspace = SimpleNamespace(bot_token="enc-token") + post_meta = SimpleNamespace(ts=123.456) + + slack_response = MagicMock() + slack_response.get.return_value = "too_many_reactions" + slack_exc = SlackApiError(message="too many reactions", response=slack_response) + + ws_client = MagicMock() + ws_client.reactions_add.side_effect = slack_exc + + with ( + patch.object(federation_api, "_resolve_channel_for_federated", return_value=(sync_channel, workspace)), + patch.object(federation_api, "_find_post_records", return_value=[post_meta]), + patch.object(federation_api.helpers, "decrypt_bot_token", return_value="xoxb-test"), + patch.object(federation_api, "WebClient", return_value=ws_client), + patch.object(federation_api.helpers, "post_message") as post_message_mock, + ): + status, resp = federation_api.handle_message_react(body, fed_ws) + + assert status == 200 + assert resp["applied"] == 0 + post_message_mock.assert_not_called() + + def test_successful_reaction_add_no_fallback(self): + """When reactions_add succeeds, no text fallback should be posted.""" + body = { + "post_id": "post-1", + "channel_id": "C123", + "reaction": "thumbsup", + "action": "add", + "user_name": "Alice", + } + fed_ws = SimpleNamespace(instance_id="remote-instance") + sync_channel = SimpleNamespace(id=101, channel_id="C123") + workspace = SimpleNamespace(bot_token="enc-token") + post_meta = SimpleNamespace(ts=123.456) + + ws_client = MagicMock() + + with ( + patch.object(federation_api, "_resolve_channel_for_federated", return_value=(sync_channel, workspace)), + patch.object(federation_api, "_find_post_records", return_value=[post_meta]), + patch.object(federation_api.helpers, "decrypt_bot_token", return_value="xoxb-test"), + patch.object(federation_api, "WebClient", return_value=ws_client), + patch.object(federation_api.helpers, "post_message") as post_message_mock, + ): + status, resp = federation_api.handle_message_react(body, fed_ws) + + assert status == 200 + assert resp["applied"] == 1 + ws_client.reactions_add.assert_called_once() + post_message_mock.assert_not_called() + + def test_reaction_remove_invalid_name_no_fallback(self): + """Removing a non-existent emoji should not post a text fallback.""" + body = { + "post_id": "post-1", + "channel_id": "C123", + "reaction": "missing_custom", + "action": "remove", + "user_name": "Alice", + } + fed_ws = SimpleNamespace(instance_id="remote-instance") + sync_channel = SimpleNamespace(id=101, channel_id="C123") + workspace = SimpleNamespace(bot_token="enc-token") + post_meta = SimpleNamespace(ts=123.456) + + slack_response = MagicMock() + slack_response.get.return_value = "invalid_name" + slack_exc = SlackApiError(message="emoji not found", response=slack_response) + + ws_client = MagicMock() + ws_client.reactions_remove.side_effect = slack_exc + + with ( + patch.object(federation_api, "_resolve_channel_for_federated", return_value=(sync_channel, workspace)), + patch.object(federation_api, "_find_post_records", return_value=[post_meta]), + patch.object(federation_api.helpers, "decrypt_bot_token", return_value="xoxb-test"), + patch.object(federation_api, "WebClient", return_value=ws_client), + patch.object(federation_api.helpers, "post_message") as post_message_mock, + ): + status, resp = federation_api.handle_message_react(body, fed_ws) + + assert status == 200 + assert resp["applied"] == 0 + post_message_mock.assert_not_called() + + def test_missing_user_fields_use_defaults(self): + """When user_name/workspace_name are absent from payload, defaults are used.""" + body = { + "post_id": "post-1", + "channel_id": "C123", + "reaction": "missing_custom", + "action": "add", + } + fed_ws = SimpleNamespace(instance_id="remote-instance") + sync_channel = SimpleNamespace(id=101, channel_id="C123") + workspace = SimpleNamespace(bot_token="enc-token") + post_meta = SimpleNamespace(ts=123.456) + + slack_response = MagicMock() + slack_response.get.return_value = "invalid_name" + slack_exc = SlackApiError(message="emoji not found", response=slack_response) + + ws_client = MagicMock() + ws_client.reactions_add.side_effect = slack_exc + + with ( + patch.object(federation_api, "_resolve_channel_for_federated", return_value=(sync_channel, workspace)), + patch.object(federation_api, "_find_post_records", return_value=[post_meta]), + patch.object(federation_api.helpers, "decrypt_bot_token", return_value="xoxb-test"), + patch.object(federation_api, "WebClient", return_value=ws_client), + patch.object(federation_api.helpers, "post_message", return_value={"ts": "200.000001"}) as post_message_mock, + ): + status, resp = federation_api.handle_message_react(body, fed_ws) + + assert status == 200 + assert resp["applied"] == 1 + post_message_mock.assert_called_once_with( + bot_token="xoxb-test", + channel_id="C123", + msg_text="reacted with :missing_custom:", + user_name="Remote User", + user_profile_url=None, + workspace_name="Remote", + thread_ts="123.456", + ) diff --git a/tests/test_file_upload_attribution.py b/tests/test_file_upload_attribution.py new file mode 100644 index 0000000..aca9d27 --- /dev/null +++ b/tests/test_file_upload_attribution.py @@ -0,0 +1,69 @@ +"""Tests for threaded file upload ``initial_comment`` (mentions + permalink).""" + +from unittest.mock import MagicMock, patch + +from slack_sdk.web import WebClient + +from handlers.messages import _shared_by_file_initial_comment + + +class TestSharedByFileInitialComment: + def test_file_only_uses_mention_when_mapped(self): + client = MagicMock(spec=WebClient) + with patch("handlers.messages.helpers.get_mapped_target_user_id", return_value="UMAPPED"): + text = _shared_by_file_initial_comment( + user_id="U_SRC", + source_workspace_id=1, + target_workspace_id=2, + name_for_target="Nacho", + target_client=client, + channel_id="C1", + text_message_ts=None, + ) + assert text == "Shared by <@UMAPPED>" + client.chat_getPermalink.assert_not_called() + + def test_file_only_falls_back_to_display_name(self): + client = MagicMock(spec=WebClient) + with patch("handlers.messages.helpers.get_mapped_target_user_id", return_value=None): + text = _shared_by_file_initial_comment( + user_id="U_SRC", + source_workspace_id=1, + target_workspace_id=2, + name_for_target="Nacho", + target_client=client, + channel_id="C1", + text_message_ts=None, + ) + assert text == "Shared by Nacho" + + def test_with_text_message_includes_permalink_link(self): + client = MagicMock(spec=WebClient) + client.chat_getPermalink.return_value = {"permalink": "https://example.slack.com/archives/C1/p123"} + with patch("handlers.messages.helpers.get_mapped_target_user_id", return_value="U99"): + text = _shared_by_file_initial_comment( + user_id="U_SRC", + source_workspace_id=1, + target_workspace_id=2, + name_for_target="Nacho", + target_client=client, + channel_id="C1", + text_message_ts="1234.567890", + ) + assert text == "Shared by <@U99> in " + client.chat_getPermalink.assert_called_once_with(channel="C1", message_ts="1234.567890") + + def test_permalink_failure_falls_back_to_shared_by_only(self): + client = MagicMock(spec=WebClient) + client.chat_getPermalink.side_effect = RuntimeError("api error") + with patch("handlers.messages.helpers.get_mapped_target_user_id", return_value=None): + text = _shared_by_file_initial_comment( + user_id="U_SRC", + source_workspace_id=1, + target_workspace_id=2, + name_for_target="Pat", + target_client=client, + channel_id="C1", + text_message_ts="1.0", + ) + assert text == "Shared by Pat" diff --git a/tests/test_groups_handlers.py b/tests/test_groups_handlers.py new file mode 100644 index 0000000..e02e289 --- /dev/null +++ b/tests/test_groups_handlers.py @@ -0,0 +1,48 @@ +"""Focused unit tests for group handler edge branches.""" + +import os +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from handlers.groups import handle_join_group_submit # noqa: E402 + + +class TestJoinGroupSubmit: + def test_invalid_group_code_log_is_sanitized(self): + client = MagicMock() + logger = MagicMock() + workspace = SimpleNamespace(id=42) + + body = { + "user": {"id": "U1"}, + "view": {"state": {"values": {}}}, + } + + with ( + patch("handlers.groups._get_authorized_workspace", return_value=("U1", workspace)), + patch("handlers.groups.forms.ENTER_GROUP_CODE_FORM.get_selected_values", return_value={}), + patch("handlers.groups.helpers._cache_get", return_value=0), + patch("handlers.groups.helpers._cache_set"), + patch("handlers.groups.DbManager.find_records", return_value=[]), + patch("handlers.groups.builders.refresh_home_tab_for_workspace"), + patch("handlers.groups._logger.warning") as warn_log, + ): + handle_join_group_submit(body, client, logger, context={}) + + matched = [ + call + for call in warn_log.call_args_list + if call.args and call.args[0] == "group_code_invalid" + ] + assert matched, "Expected group_code_invalid warning" + extra = matched[0].kwargs["extra"] + assert "code" not in extra + assert extra["workspace_id"] == workspace.id + assert extra["attempt"] == 1 + assert "code_length" in extra diff --git a/tests/test_handlers.py b/tests/test_handlers.py new file mode 100644 index 0000000..5fa5a3f --- /dev/null +++ b/tests/test_handlers.py @@ -0,0 +1,347 @@ +"""Unit tests for handler parsing and dispatch helpers.""" + +import os +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from handlers import ( + EventContext, + _is_own_bot_message, + _parse_event_fields, + _sanitize_text, +) +from handlers.groups import _generate_invite_code + +# ----------------------------------------------------------------------- +# _parse_event_fields +# ----------------------------------------------------------------------- + + +class TestParseEventFields: + def _make_client(self): + client = MagicMock() + client.users_info.return_value = { + "user": { + "id": "U123", + "profile": {"display_name": "TestUser", "real_name": "Test User"}, + } + } + return client + + def test_basic_message(self): + body = { + "team_id": "T001", + "event": { + "type": "message", + "channel": "C001", + "user": "U001", + "text": "Hello world", + "ts": "1234567890.000001", + }, + } + ctx = _parse_event_fields(body, self._make_client()) + assert ctx["team_id"] == "T001" + assert ctx["channel_id"] == "C001" + assert ctx["user_id"] == "U001" + assert ctx["msg_text"] == "Hello world" + assert ctx["event_subtype"] is None + + def test_empty_text_defaults_to_space(self): + body = { + "team_id": "T001", + "event": { + "type": "message", + "channel": "C001", + "user": "U001", + "ts": "1234567890.000001", + }, + } + ctx = _parse_event_fields(body, self._make_client()) + assert ctx["msg_text"] == " " + + def test_message_changed_subtype(self): + body = { + "team_id": "T001", + "event": { + "type": "message", + "subtype": "message_changed", + "channel": "C001", + "message": { + "user": "U001", + "text": "Edited text", + "ts": "1234567890.000001", + }, + }, + } + ctx = _parse_event_fields(body, self._make_client()) + assert ctx["event_subtype"] == "message_changed" + assert ctx["msg_text"] == "Edited text" + assert ctx["user_id"] == "U001" + + def test_message_deleted_subtype(self): + body = { + "team_id": "T001", + "event": { + "type": "message", + "subtype": "message_deleted", + "channel": "C001", + "previous_message": { + "ts": "1234567890.000001", + }, + }, + } + ctx = _parse_event_fields(body, self._make_client()) + assert ctx["event_subtype"] == "message_deleted" + assert ctx["ts"] == "1234567890.000001" + + +# ----------------------------------------------------------------------- +# EventContext TypedDict +# ----------------------------------------------------------------------- + + +class TestEventContextType: + def test_event_context_is_dict(self): + ctx = EventContext( + team_id="T1", + channel_id="C1", + user_id="U1", + msg_text="hi", + mentioned_users=[], + thread_ts=None, + ts="123.456", + event_subtype=None, + ) + assert isinstance(ctx, dict) + assert ctx["team_id"] == "T1" + + +# ----------------------------------------------------------------------- +# _sanitize_text +# ----------------------------------------------------------------------- + + +class TestSanitizeText: + def test_strips_whitespace(self): + assert _sanitize_text(" hello ") == "hello" + + def test_truncates_long_text(self): + result = _sanitize_text("a" * 200, max_length=100) + assert len(result) == 100 + + def test_none_passthrough(self): + assert _sanitize_text(None) is None + + def test_empty_string_passthrough(self): + assert _sanitize_text("") == "" + + def test_custom_max_length(self): + result = _sanitize_text("abcdefgh", max_length=5) + assert result == "abcde" + + +# ----------------------------------------------------------------------- +# _is_own_bot_message +# ----------------------------------------------------------------------- + + +class TestIsOwnBotMessage: + def _make_client_with_bot_id(self, bot_id: str = "B_SYNCBOT"): + client = MagicMock() + client.auth_test.return_value = {"bot_id": bot_id} + return client + + def test_own_bot_message_detected(self): + body = {"event": {"type": "message", "subtype": "bot_message", "bot_id": "B_SYNCBOT", "text": "synced"}} + client = self._make_client_with_bot_id("B_SYNCBOT") + context = {"bot_id": "B_SYNCBOT"} + assert _is_own_bot_message(body, client, context) is True + + def test_other_bot_message_not_flagged(self): + body = {"event": {"type": "message", "subtype": "bot_message", "bot_id": "B_OTHER", "text": "hello"}} + client = self._make_client_with_bot_id("B_SYNCBOT") + context = {"bot_id": "B_SYNCBOT"} + assert _is_own_bot_message(body, client, context) is False + + def test_regular_user_message_not_flagged(self): + body = {"event": {"type": "message", "user": "U001", "text": "hello"}} + client = self._make_client_with_bot_id("B_SYNCBOT") + context = {"bot_id": "B_SYNCBOT"} + assert _is_own_bot_message(body, client, context) is False + + def test_own_bot_in_message_changed(self): + body = { + "event": { + "type": "message", + "subtype": "message_changed", + "channel": "C001", + "message": {"bot_id": "B_SYNCBOT", "subtype": "bot_message", "text": "edited"}, + }, + } + client = self._make_client_with_bot_id("B_SYNCBOT") + context = {"bot_id": "B_SYNCBOT"} + assert _is_own_bot_message(body, client, context) is True + + def test_other_bot_in_message_changed(self): + body = { + "event": { + "type": "message", + "subtype": "message_changed", + "channel": "C001", + "message": {"bot_id": "B_OTHER", "subtype": "bot_message", "text": "edited"}, + }, + } + client = self._make_client_with_bot_id("B_SYNCBOT") + context = {"bot_id": "B_SYNCBOT"} + assert _is_own_bot_message(body, client, context) is False + + def test_fallback_to_auth_test_when_context_empty(self): + body = {"event": {"type": "message", "subtype": "bot_message", "bot_id": "B_SYNCBOT", "text": "hi"}} + client = self._make_client_with_bot_id("B_SYNCBOT") + context = {} + assert _is_own_bot_message(body, client, context) is True + + +class TestParseEventFieldsBotMessage: + def _make_client(self): + client = MagicMock() + client.users_info.return_value = { + "user": {"id": "U123", "profile": {"display_name": "TestUser", "real_name": "Test User"}} + } + return client + + def test_bot_message_has_no_user_id(self): + body = { + "team_id": "T001", + "event": { + "type": "message", + "subtype": "bot_message", + "bot_id": "B_OTHER", + "username": "WeatherBot", + "text": "Today's forecast", + "ts": "1234567890.000001", + "channel": "C001", + }, + } + ctx = _parse_event_fields(body, self._make_client()) + assert ctx["user_id"] is None + assert ctx["event_subtype"] == "bot_message" + assert ctx["msg_text"] == "Today's forecast" + + +# ----------------------------------------------------------------------- +# _generate_invite_code +# ----------------------------------------------------------------------- + + +class TestGenerateInviteCode: + def test_code_format(self): + code = _generate_invite_code() + assert len(code) == 8 # 3 + dash + 4 + assert code[3] == "-" + assert code[:3].isalnum() + assert code[4:].isalnum() + + def test_code_is_uppercase(self): + code = _generate_invite_code() + assert code == code.upper() + + def test_codes_are_unique(self): + codes = {_generate_invite_code() for _ in range(50)} + assert len(codes) > 45 + + def test_custom_length(self): + code = _generate_invite_code(length=8) + assert len(code) == 9 # 3 + dash + 5 + assert code[3] == "-" + + +# ----------------------------------------------------------------------- +# Invite code normalisation (same logic as group invite code) +# ----------------------------------------------------------------------- + + +class TestInviteCodeValidation: + def test_code_normalisation_adds_dash(self): + raw = "a7xk9m" + normalized = raw.strip().upper() + if "-" not in normalized and len(normalized) >= 6: + normalized = f"{normalized[:3]}-{normalized[3:]}" + assert normalized == "A7X-K9M" + + def test_code_already_formatted(self): + raw = "A7X-K9M" + normalized = raw.strip().upper() + if "-" not in normalized and len(normalized) >= 6: + normalized = f"{normalized[:3]}-{normalized[3:]}" + assert normalized == "A7X-K9M" + + def test_code_with_whitespace(self): + raw = " a7x-k9m " + normalized = raw.strip().upper() + if "-" not in normalized and len(normalized) >= 6: + normalized = f"{normalized[:3]}-{normalized[3:]}" + assert normalized == "A7X-K9M" + + +# ----------------------------------------------------------------------- +# get_request_type — group prefix matching +# ----------------------------------------------------------------------- + + +class TestRequestTypeGroupPrefix: + def test_leave_group_prefix_resolved(self): + from helpers import get_request_type + from slack import actions + + body = { + "type": "block_actions", + "actions": [{"action_id": f"{actions.CONFIG_LEAVE_GROUP}_42"}], + } + req_type, req_id = get_request_type(body) + assert req_type == "block_actions" + assert req_id == actions.CONFIG_LEAVE_GROUP + + +# ----------------------------------------------------------------------- +# handle_new_sync_submission (unit-level: verifies the handler wiring) +# ----------------------------------------------------------------------- + + +class TestNewSyncSubmission: + """Verify that handle_new_sync_submission uses conversations.info to get the channel name.""" + + def test_rejects_unauthorized_user(self): + from handlers import handle_new_sync_submission + + client = MagicMock() + client.users_info.return_value = {"user": {"is_admin": False, "is_owner": False}} + body = {"view": {"team_id": "T001"}, "user": {"id": "U001"}} + logger = MagicMock() + + with patch("handlers.sync.helpers.is_user_authorized", return_value=False): + handle_new_sync_submission(body, client, logger, {}) + + client.conversations_info.assert_not_called() + client.conversations_join.assert_not_called() + + def test_rejects_missing_channel_id(self): + from handlers import handle_new_sync_submission + + client = MagicMock() + body = {"view": {"team_id": "T001"}, "user": {"id": "U001"}} + logger = MagicMock() + + with ( + patch("handlers.sync.helpers.is_user_authorized", return_value=True), + patch("handlers.sync.forms.NEW_SYNC_FORM") as mock_form, + ): + mock_form.get_selected_values.return_value = {} + handle_new_sync_submission(body, client, logger, {}) + + client.conversations_info.assert_not_called() diff --git a/tests/test_helpers.py b/tests/test_helpers.py new file mode 100644 index 0000000..b81b937 --- /dev/null +++ b/tests/test_helpers.py @@ -0,0 +1,368 @@ +"""Unit tests for helper utilities under ``syncbot/helpers``.""" + +import os +import time +from unittest.mock import MagicMock, patch + +import pytest + +# Ensure minimal env vars are set before importing app code +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +# Placeholder only; never a real token (avoids secret scanners) +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +import helpers + +# ----------------------------------------------------------------------- +# safe_get +# ----------------------------------------------------------------------- + + +class TestSafeGet: + def test_simple_dict(self): + assert helpers.safe_get({"a": 1}, "a") == 1 + + def test_nested_dict(self): + data = {"a": {"b": {"c": 42}}} + assert helpers.safe_get(data, "a", "b", "c") == 42 + + def test_missing_key_returns_none(self): + assert helpers.safe_get({"a": 1}, "b") is None + + def test_nested_missing_key_returns_none(self): + assert helpers.safe_get({"a": {"b": 1}}, "a", "c") is None + + def test_none_data_returns_none(self): + assert helpers.safe_get(None) is None + + def test_empty_dict_returns_none(self): + assert helpers.safe_get({}, "a") is None + + def test_list_index_access(self): + data = {"items": [{"name": "first"}, {"name": "second"}]} + assert helpers.safe_get(data, "items", 0, "name") == "first" + assert helpers.safe_get(data, "items", 1, "name") == "second" + + def test_list_index_out_of_bounds(self): + data = {"items": [1]} + assert helpers.safe_get(data, "items", 5) is None + + def test_deeply_nested(self): + data = {"a": {"b": {"c": {"d": {"e": "deep"}}}}} + assert helpers.safe_get(data, "a", "b", "c", "d", "e") == "deep" + + +# ----------------------------------------------------------------------- +# Encryption helpers +# ----------------------------------------------------------------------- + + +class TestEncryption: + @patch.dict(os.environ, {"TOKEN_ENCRYPTION_KEY": "my-secret-key"}) + def test_encrypt_decrypt_roundtrip(self): + # Use a non-secret placeholder; encryption accepts any string + token = "xoxb-0-0" + encrypted = helpers.encrypt_bot_token(token) + assert encrypted != token + decrypted = helpers.decrypt_bot_token(encrypted) + assert decrypted == token + + @patch.dict(os.environ, {"TOKEN_ENCRYPTION_KEY": "my-secret-key"}) + def test_decrypt_invalid_token_raises(self): + with pytest.raises(ValueError, match="decryption failed"): + helpers.decrypt_bot_token("not-a-valid-encrypted-token") + + @patch.dict(os.environ, {"TOKEN_ENCRYPTION_KEY": "123"}) + def test_encryption_disabled_with_default_key(self): + token = "xoxb-0-0" + assert helpers.encrypt_bot_token(token) == token + assert helpers.decrypt_bot_token(token) == token + + @patch.dict(os.environ, {}, clear=False) + def test_encryption_disabled_when_key_missing(self): + os.environ.pop("TOKEN_ENCRYPTION_KEY", None) + token = "xoxb-0-0" + assert helpers.encrypt_bot_token(token) == token + assert helpers.decrypt_bot_token(token) == token + + @patch.dict(os.environ, {"TOKEN_ENCRYPTION_KEY": "key-A"}) + def test_wrong_key_raises(self): + token = "xoxb-0-0" + encrypted = helpers.encrypt_bot_token(token) + + with ( + patch.dict(os.environ, {"TOKEN_ENCRYPTION_KEY": "key-B"}), + pytest.raises(ValueError, match="decryption failed"), + ): + helpers.decrypt_bot_token(encrypted) + + +# ----------------------------------------------------------------------- +# In-process cache +# ----------------------------------------------------------------------- + + +class TestCache: + def setup_method(self): + helpers._CACHE.clear() + + def test_cache_set_and_get(self): + helpers._cache_set("k1", "value1") + assert helpers._cache_get("k1") == "value1" + + def test_cache_miss(self): + assert helpers._cache_get("nonexistent") is None + + def test_cache_expiry(self): + helpers._cache_set("k2", "value2", ttl=0) + time.sleep(0.01) + assert helpers._cache_get("k2") is None + + def test_cache_within_ttl(self): + helpers._cache_set("k3", "value3", ttl=60) + assert helpers._cache_get("k3") == "value3" + + +# ----------------------------------------------------------------------- +# get_request_type +# ----------------------------------------------------------------------- + + +class TestGetRequestType: + def test_event_callback(self): + body = {"type": "event_callback", "event": {"type": "message"}} + assert helpers.get_request_type(body) == ("event_callback", "message") + + def test_view_submission(self): + body = {"type": "view_submission", "view": {"callback_id": "my_callback"}} + assert helpers.get_request_type(body) == ("view_submission", "my_callback") + + def test_command(self): + body = {"command": "/config-syncbot"} + assert helpers.get_request_type(body) == ("command", "/config-syncbot") + + def test_unknown(self): + body = {"type": "something_else"} + assert helpers.get_request_type(body) == ("unknown", "unknown") + + +# ----------------------------------------------------------------------- +# slack_retry decorator +# ----------------------------------------------------------------------- + + +# ----------------------------------------------------------------------- +# get_bot_info_from_event +# ----------------------------------------------------------------------- + + +class TestGetBotInfoFromEvent: + def test_extracts_username_and_icon(self): + body = { + "event": { + "type": "message", + "subtype": "bot_message", + "bot_id": "B123", + "username": "WeatherBot", + "icons": {"image_48": "https://example.com/icon48.png"}, + "text": "hello", + } + } + name, icon = helpers.get_bot_info_from_event(body) + assert name == "WeatherBot" + assert icon == "https://example.com/icon48.png" + + def test_fallback_name_when_no_username(self): + body = {"event": {"type": "message", "subtype": "bot_message", "bot_id": "B123", "text": "hello"}} + name, icon = helpers.get_bot_info_from_event(body) + assert name == "Bot" + assert icon is None + + def test_icon_fallback_order(self): + body = { + "event": { + "type": "message", + "subtype": "bot_message", + "bot_id": "B123", + "username": "MyBot", + "icons": {"image_36": "https://example.com/icon36.png", "image_72": "https://example.com/icon72.png"}, + "text": "hello", + } + } + name, icon = helpers.get_bot_info_from_event(body) + assert icon == "https://example.com/icon36.png" + + +# ----------------------------------------------------------------------- +# slack_retry decorator +# ----------------------------------------------------------------------- + + +class TestSlackRetry: + def test_success_on_first_try(self): + @helpers.slack_retry + def fn(): + return "ok" + + assert fn() == "ok" + + def test_retries_on_429(self): + from slack_sdk.errors import SlackApiError + + call_count = 0 + + mock_response = MagicMock() + mock_response.status_code = 429 + mock_response.headers = {"Retry-After": "0"} + + @helpers.slack_retry + def fn(): + nonlocal call_count + call_count += 1 + if call_count < 3: + raise SlackApiError("rate_limited", response=mock_response) + return "ok" + + assert fn() == "ok" + assert call_count == 3 + + def test_non_retryable_error_raises_immediately(self): + from slack_sdk.errors import SlackApiError + + mock_response = MagicMock() + mock_response.status_code = 404 + + @helpers.slack_retry + def fn(): + raise SlackApiError("not_found", response=mock_response) + + with pytest.raises(SlackApiError): + fn() + + +# ----------------------------------------------------------------------- +# resolve_channel_references +# ----------------------------------------------------------------------- + + +class TestResolveChannelReferences: + """Tests for helpers.resolve_channel_references (archive URL generation).""" + + def setup_method(self): + helpers._CACHE.clear() + + def _make_workspace(self, team_id="T123", name="Acme"): + ws = MagicMock() + ws.team_id = team_id + ws.workspace_name = name + return ws + + def _make_client(self, channel_name="general", domain="acme"): + client = MagicMock() + client.conversations_info.return_value = {"channel": {"name": channel_name}} + client.team_info.return_value = {"team": {"domain": domain}} + return client + + def test_no_channel_refs_unchanged(self): + result = helpers.resolve_channel_references("hello world", MagicMock()) + assert result == "hello world" + + def test_empty_text(self): + result = helpers.resolve_channel_references("", MagicMock()) + assert result == "" + + def test_none_text(self): + result = helpers.resolve_channel_references(None, MagicMock()) + assert result is None + + def test_archive_url_with_workspace(self): + client = self._make_client(channel_name="general", domain="acme") + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references("see <#CABC123>", client, ws) + assert "https://acme.slack.com/archives/CABC123" in result + assert "#general (Acme)" in result + + def test_archive_url_without_workspace(self): + client = self._make_client(channel_name="general", domain="acme") + result = helpers.resolve_channel_references("see <#CABC123>", client, None) + assert "#general" in result + + def test_fallback_when_domain_unavailable(self): + client = MagicMock() + client.conversations_info.return_value = {"channel": {"name": "general"}} + client.team_info.side_effect = Exception("api error") + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references("see <#CABC123>", client, ws) + assert result == "see `[#general (Acme)]`" + assert "slack.com" not in result + + def test_fallback_when_channel_unresolvable(self): + client = MagicMock() + client.conversations_info.side_effect = Exception("channel_not_found") + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references("see <#CABC123>", client, ws) + assert result == "see #CABC123" + + def test_channel_ref_with_label(self): + client = self._make_client(channel_name="general", domain="acme") + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references("see <#CABC123|general>", client, ws) + assert "https://acme.slack.com/archives/CABC123" in result + + def test_multiple_channel_refs(self): + client = MagicMock() + call_count = 0 + + def conv_info(channel): + nonlocal call_count + call_count += 1 + names = {"CABC111": "alpha", "CABC222": "beta"} + return {"channel": {"name": names.get(channel, channel)}} + + client.conversations_info.side_effect = conv_info + client.team_info.return_value = {"team": {"domain": "acme"}} + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references( + "see <#CABC111> and <#CABC222>", client, ws + ) + assert "archives/CABC111" in result + assert "archives/CABC222" in result + assert "#alpha" in result + assert "#beta" in result + + def test_no_app_redirect_in_output(self): + client = self._make_client(channel_name="general", domain="acme") + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references("see <#CABC123>", client, ws) + assert "app_redirect" not in result + + @patch("helpers.user_matching.find_synced_channel_in_target") + def test_native_channel_when_synced_to_target(self, mock_find): + mock_find.return_value = "C_LOCAL_TARGET" + client = self._make_client(channel_name="general", domain="acme") + ws = self._make_workspace(team_id="T123", name="Acme") + result = helpers.resolve_channel_references( + "see <#CSOURCE123>", client, ws, target_workspace_id=42 + ) + assert result == "see <#C_LOCAL_TARGET>" + mock_find.assert_called_with("CSOURCE123", 42) + assert "slack.com" not in result + + @patch("helpers.user_matching.find_synced_channel_in_target") + def test_archive_mrkdwn_rewritten_to_native_when_synced(self, mock_find): + mock_find.return_value = "C_LOCAL" + client = MagicMock() + text = "see " + result = helpers.resolve_channel_references(text, client, None, target_workspace_id=1) + assert result == "see <#C_LOCAL>" + + @patch("helpers.user_matching.find_synced_channel_in_target") + def test_archive_mrkdwn_unchanged_when_not_synced(self, mock_find): + mock_find.return_value = None + client = MagicMock() + text = "see " + result = helpers.resolve_channel_references(text, client, None, target_workspace_id=1) + assert result == text diff --git a/tests/test_message_event_dedup.py b/tests/test_message_event_dedup.py new file mode 100644 index 0000000..94d5894 --- /dev/null +++ b/tests/test_message_event_dedup.py @@ -0,0 +1,109 @@ +"""Tests for message event deduplication (file_share vs plain message, Slack retries).""" + +import os +from unittest.mock import MagicMock, patch + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from handlers.messages import ( # noqa: E402 + _should_skip_slack_event_retry, + respond_to_message_event, +) + + +class TestShouldSkipSlackEventRetry: + def test_skips_when_context_slack_retry_num_ge_1(self): + assert _should_skip_slack_event_retry({}, {"slack_retry_num": 1}) is True + + def test_no_skip_when_slack_retry_num_zero(self): + assert _should_skip_slack_event_retry({}, {"slack_retry_num": 0}) is False + + def test_skips_when_body_retry_attempt_ge_1(self): + assert _should_skip_slack_event_retry({"retry_attempt": 1}, {}) is True + + def test_no_skip_first_delivery(self): + assert _should_skip_slack_event_retry({}, {}) is False + + +class TestRespondToMessageEventDedup: + def _base_body(self): + return { + "team_id": "T001", + "event": { + "type": "message", + "channel": "C001", + "user": "U001", + "text": "Hello", + "ts": "1234567890.000001", + }, + } + + def test_text_only_no_subtype_still_calls_new_post(self): + client = MagicMock() + logger = MagicMock() + context = {} + + with ( + patch("handlers.messages._is_own_bot_message", return_value=False), + patch("handlers.messages._handle_new_post") as mock_new, + patch("handlers.messages._build_file_context", return_value=([], [], [])), + ): + respond_to_message_event(self._base_body(), client, logger, context) + + mock_new.assert_called_once() + + def test_no_subtype_with_files_skips_without_building_file_context(self): + body = self._base_body() + body["event"]["files"] = [{"id": "F1", "mimetype": "image/jpeg"}] + + client = MagicMock() + logger = MagicMock() + context = {} + + with ( + patch("handlers.messages._is_own_bot_message", return_value=False), + patch("handlers.messages._handle_new_post") as mock_new, + patch("handlers.messages._build_file_context") as build_fc, + ): + respond_to_message_event(body, client, logger, context) + + mock_new.assert_not_called() + build_fc.assert_not_called() + + def test_file_share_subtype_still_calls_new_post(self): + body = self._base_body() + body["event"]["subtype"] = "file_share" + body["event"]["files"] = [{"id": "F1", "mimetype": "image/jpeg"}] + + client = MagicMock() + logger = MagicMock() + context = {} + + with ( + patch("handlers.messages._is_own_bot_message", return_value=False), + patch("handlers.messages._handle_new_post") as mock_new, + patch("handlers.messages._build_file_context", return_value=([], [], [{"path": "/tmp/x", "name": "x.jpg", "mimetype": "image/jpeg"}])), + ): + respond_to_message_event(body, client, logger, context) + + mock_new.assert_called_once() + assert mock_new.call_args is not None + + def test_retry_skips_handler(self): + client = MagicMock() + logger = MagicMock() + context = {"slack_retry_num": 1} + + with ( + patch("handlers.messages._is_own_bot_message", return_value=False), + patch("handlers.messages._handle_new_post") as mock_new, + patch("handlers.messages._build_file_context") as build_fc, + ): + respond_to_message_event(self._base_body(), client, logger, context) + + mock_new.assert_not_called() + build_fc.assert_not_called() diff --git a/tests/test_message_sync.py b/tests/test_message_sync.py new file mode 100644 index 0000000..3f92d11 --- /dev/null +++ b/tests/test_message_sync.py @@ -0,0 +1,107 @@ +"""Tests for sync list / post record deduplication and join-sync duplicate guard.""" + +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +from handlers.sync import handle_join_sync_submission +from helpers.slack_api import get_post_records +from helpers.workspace import get_sync_list +from slack import actions + + +class TestGetSyncListDeduplication: + def test_deduplicates_same_workspace_and_channel(self): + ws = SimpleNamespace(id=42, team_id="T1", workspace_name="WS") + sc_source = SimpleNamespace(id=1, sync_id=7, channel_id="Csource") + sc_dup_a = SimpleNamespace(id=2, sync_id=7, channel_id="C999") + sc_dup_b = SimpleNamespace(id=3, sync_id=7, channel_id="C999") + + with ( + patch("helpers.workspace._cache_get", return_value=None), + patch("helpers.workspace._cache_set") as cache_set, + patch("helpers.workspace.DbManager.find_records", return_value=[sc_source]), + patch( + "helpers.workspace.DbManager.find_join_records2", + return_value=[(sc_dup_a, ws), (sc_dup_b, ws)], + ), + ): + result = get_sync_list("T1", "Csource") + + assert len(result) == 1 + assert result[0][0] is sc_dup_a + assert result[0][1] is ws # first wins among duplicates + cache_set.assert_called_once() + + +class TestGetPostRecordsDeduplication: + def test_deduplicates_same_workspace_and_channel(self): + pm = SimpleNamespace(id=1, post_id="p1", ts=123.456789) + ws = SimpleNamespace(id=42) + sc_a = SimpleNamespace(id=10, channel_id="C777") + sc_b = SimpleNamespace(id=11, channel_id="C777") + + with ( + patch("helpers.slack_api.DbManager.find_records", return_value=[pm]), + patch( + "helpers.slack_api.DbManager.find_join_records3", + return_value=[(pm, sc_a, ws), (pm, sc_b, ws)], + ), + ): + result = get_post_records("123.456789") + + assert len(result) == 1 + assert result[0][1] is sc_a + + def test_dedup_prefers_lower_post_meta_id_for_split_file_alias(self): + """Reactions on file thread replies share post_id; primary text row must win.""" + pm_file = SimpleNamespace(id=99, post_id="p1", ts=888.888) + pm_text = SimpleNamespace(id=10, post_id="p1", ts=111.111) + ws = SimpleNamespace(id=42) + sc = SimpleNamespace(id=10, channel_id="C777") + + with ( + patch("helpers.slack_api.DbManager.find_records", return_value=[pm_file]), + patch( + "helpers.slack_api.DbManager.find_join_records3", + return_value=[(pm_file, sc, ws), (pm_text, sc, ws)], + ), + ): + result = get_post_records("888.888") + + assert len(result) == 1 + assert result[0][0].id == 10 + assert result[0][0].ts == 111.111 + + +class TestJoinSyncDuplicateSkip: + def test_duplicate_channel_skips_join_and_create(self): + client = MagicMock() + logger = MagicMock() + context = {} + workspace = SimpleNamespace(id=10, team_id="T1") + sync_record = SimpleNamespace(id=5, title="Other") + + body = { + "user": {"id": "Uadmin"}, + "view": {"team_id": "T1", "state": {"values": {}}}, + } + form_values = { + actions.CONFIG_JOIN_SYNC_SELECT: 5, + actions.CONFIG_JOIN_SYNC_CHANNEL_SELECT: "Cdup", + } + + with ( + patch("handlers.sync.helpers.get_user_id_from_body", return_value="Uadmin"), + patch("handlers.sync.helpers.is_user_authorized", return_value=True), + patch("handlers.sync.forms.JOIN_SYNC_FORM.get_selected_values", return_value=form_values), + patch("handlers.sync.DbManager.get_record", side_effect=[workspace, sync_record]), + patch("handlers.sync.DbManager.find_records", return_value=[object()]), + patch("handlers.sync.DbManager.create_record") as create_record, + patch("handlers.sync.helpers.format_admin_label", return_value=("Admin", "Admin")), + patch("handlers.sync.builders.refresh_home_tab_for_workspace") as refresh_home, + ): + handle_join_sync_submission(body, client, logger, context) + + create_record.assert_not_called() + client.conversations_join.assert_not_called() + refresh_home.assert_called_once() diff --git a/tests/test_oauth.py b/tests/test_oauth.py new file mode 100644 index 0000000..6d9bb83 --- /dev/null +++ b/tests/test_oauth.py @@ -0,0 +1,108 @@ +"""Unit tests for OAuth flow construction.""" + +import os +from unittest.mock import patch + +from slack_manifest_scopes import USER_SCOPES + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from helpers.oauth import get_oauth_flow + + +class TestGetOAuthFlow: + @patch("helpers.oauth.constants.LOCAL_DEVELOPMENT", True) + @patch.dict(os.environ, {}, clear=True) + def test_local_dev_without_oauth_credentials_returns_none(self): + assert get_oauth_flow() is None + + @patch("helpers.oauth.constants.LOCAL_DEVELOPMENT", True) + @patch.dict( + os.environ, + { + "SLACK_CLIENT_ID": "cid", + "SLACK_CLIENT_SECRET": "csecret", + "SLACK_BOT_SCOPES": "chat:write,channels:read", + }, + clear=True, + ) + @patch("db.get_engine") + @patch("helpers.oauth.SQLAlchemyOAuthStateStore") + @patch("helpers.oauth.SQLAlchemyInstallationStore") + def test_local_dev_with_credentials_uses_sql_stores( + self, + mock_installation_store_cls, + mock_state_store_cls, + mock_get_engine, + ): + engine = object() + mock_get_engine.return_value = engine + + flow = get_oauth_flow() + + assert flow is not None + mock_get_engine.assert_called_once_with() + mock_installation_store_cls.assert_called_once_with(client_id="cid", engine=engine) + mock_state_store_cls.assert_called_once_with(expiration_seconds=600, engine=engine) + + @patch("helpers.oauth.constants.LOCAL_DEVELOPMENT", False) + @patch.dict( + os.environ, + { + "SLACK_CLIENT_ID": "prod-cid", + "SLACK_CLIENT_SECRET": "prod-secret", + "SLACK_BOT_SCOPES": "chat:write,groups:read", + }, + clear=True, + ) + @patch("db.get_engine") + @patch("helpers.oauth.SQLAlchemyOAuthStateStore") + @patch("helpers.oauth.SQLAlchemyInstallationStore") + def test_production_uses_sql_stores_without_s3( + self, + mock_installation_store_cls, + mock_state_store_cls, + mock_get_engine, + ): + engine = object() + mock_get_engine.return_value = engine + + flow = get_oauth_flow() + + assert flow is not None + assert flow.settings.scopes == ["chat:write", "groups:read"] + assert flow.settings.user_scopes == list(USER_SCOPES) + mock_get_engine.assert_called_once_with() + mock_installation_store_cls.assert_called_once_with(client_id="prod-cid", engine=engine) + mock_state_store_cls.assert_called_once_with(expiration_seconds=600, engine=engine) + + @patch("helpers.oauth.constants.LOCAL_DEVELOPMENT", True) + @patch.dict( + os.environ, + { + "SLACK_CLIENT_ID": "cid", + "SLACK_CLIENT_SECRET": "csecret", + "SLACK_BOT_SCOPES": "chat:write", + "SLACK_USER_SCOPES": "chat:write,users:read", + }, + clear=True, + ) + @patch("db.get_engine") + @patch("helpers.oauth.SQLAlchemyOAuthStateStore") + @patch("helpers.oauth.SQLAlchemyInstallationStore") + def test_slack_user_scopes_env_overrides_default( + self, + mock_installation_store_cls, + mock_state_store_cls, + mock_get_engine, + ): + mock_get_engine.return_value = object() + + flow = get_oauth_flow() + + assert flow is not None + assert flow.settings.user_scopes == ["chat:write", "users:read"] diff --git a/tests/test_primary_workspace_gates.py b/tests/test_primary_workspace_gates.py new file mode 100644 index 0000000..2c766f1 --- /dev/null +++ b/tests/test_primary_workspace_gates.py @@ -0,0 +1,56 @@ +"""Tests for PRIMARY_WORKSPACE backup gate and ENABLE_DB_RESET boolean.""" + +import os +from unittest.mock import patch + +import pytest + +os.environ.setdefault("DATABASE_HOST", "localhost") +os.environ.setdefault("DATABASE_USER", "root") +os.environ.setdefault("DATABASE_PASSWORD", "test") +os.environ.setdefault("DATABASE_SCHEMA", "syncbot") +os.environ.setdefault("SLACK_BOT_TOKEN", "xoxb-0-0") + +from helpers.core import ( # noqa: E402 + is_backup_visible_for_workspace, + is_db_reset_visible_for_workspace, +) + + +class TestIsBackupVisibleForWorkspace: + def test_unset_primary_denies_all(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": ""}): + assert is_backup_visible_for_workspace("T111") is False + assert is_backup_visible_for_workspace(None) is False + + def test_matching_team_allowed(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TABC123"}): + assert is_backup_visible_for_workspace("TABC123") is True + + def test_non_matching_team_denied(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TABC123"}): + assert is_backup_visible_for_workspace("TOTHER") is False + + +class TestIsDbResetVisibleForWorkspace: + def test_unset_primary_denies(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "", "ENABLE_DB_RESET": "true"}): + assert is_db_reset_visible_for_workspace("T111") is False + + def test_primary_match_and_true_enables(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TABC123", "ENABLE_DB_RESET": "true"}): + assert is_db_reset_visible_for_workspace("TABC123") is True + + @pytest.mark.parametrize("truthy", ("true", "1", "yes")) + def test_truthy_strings(self, truthy: str): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TABC123", "ENABLE_DB_RESET": truthy}): + assert is_db_reset_visible_for_workspace("TABC123") is True + + def test_unset_enable_db_reset_denies(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TABC123"}, clear=False): + os.environ.pop("ENABLE_DB_RESET", None) + assert is_db_reset_visible_for_workspace("TABC123") is False + + def test_team_mismatch_denies(self): + with patch.dict(os.environ, {"PRIMARY_WORKSPACE": "TABC123", "ENABLE_DB_RESET": "true"}): + assert is_db_reset_visible_for_workspace("TOTHER") is False diff --git a/tests/test_routing_deferred_ack.py b/tests/test_routing_deferred_ack.py new file mode 100644 index 0000000..c024888 --- /dev/null +++ b/tests/test_routing_deferred_ack.py @@ -0,0 +1,25 @@ +"""Invariant: deferred-ack view callback IDs stay registered in VIEW_ACK_MAPPER / VIEW_MAPPER.""" + +from routing import VIEW_ACK_MAPPER, VIEW_MAPPER +from slack import actions +from slack.deferred_ack_views import DEFERRED_ACK_VIEW_CALLBACK_IDS + + +def test_deferred_ack_matches_view_ack_mapper(): + assert frozenset(VIEW_ACK_MAPPER.keys()) == DEFERRED_ACK_VIEW_CALLBACK_IDS + + +def test_publish_mode_is_ack_only_not_in_work_mapper(): + assert actions.CONFIG_PUBLISH_MODE_SUBMIT in VIEW_ACK_MAPPER + assert actions.CONFIG_PUBLISH_MODE_SUBMIT not in VIEW_MAPPER + + +def test_deferred_work_views_have_work_handlers(): + for callback_id in DEFERRED_ACK_VIEW_CALLBACK_IDS: + if callback_id == actions.CONFIG_PUBLISH_MODE_SUBMIT: + continue + assert callback_id in VIEW_MAPPER, f"missing VIEW_MAPPER work entry for {callback_id!r}" + + +def test_deferred_ack_set_is_nonempty(): + assert len(DEFERRED_ACK_VIEW_CALLBACK_IDS) >= 1 diff --git a/tests/test_slack_manifest_scopes.py b/tests/test_slack_manifest_scopes.py new file mode 100644 index 0000000..9153d84 --- /dev/null +++ b/tests/test_slack_manifest_scopes.py @@ -0,0 +1,62 @@ +"""slack-manifest.json stays aligned with syncbot/slack_manifest_scopes.py.""" + +import json +import re +from pathlib import Path + +from slack_manifest_scopes import ( + BOT_SCOPES, + USER_SCOPES, + bot_scopes_comma_separated, + user_scopes_comma_separated, +) + + +def _manifest() -> dict: + root = Path(__file__).resolve().parent.parent + return json.loads(root.joinpath("slack-manifest.json").read_text(encoding="utf-8")) + + +def test_slack_manifest_bot_scopes_match_constants(): + bot = _manifest()["oauth_config"]["scopes"]["bot"] + assert bot == list(BOT_SCOPES) + + +def test_slack_manifest_user_scopes_match_constants(): + user = _manifest()["oauth_config"]["scopes"]["user"] + assert user == list(USER_SCOPES) + + +def test_sam_template_slack_oauth_default_matches_bot_scopes(): + """infra/aws/template.yaml SlackOauthBotScopes Default must match BOT_SCOPES.""" + root = Path(__file__).resolve().parent.parent + text = root.joinpath("infra/aws/template.yaml").read_text(encoding="utf-8") + m = re.search( + r'^\s*SlackOauthBotScopes:\s*\n(?:^\s+.*\n)*?\s*Default:\s*"([^"]+)"', + text, + re.MULTILINE, + ) + assert m, "SlackOauthBotScopes Default not found in template.yaml" + assert m.group(1) == bot_scopes_comma_separated() + + +def test_sam_template_slack_user_oauth_default_matches_user_scopes(): + """infra/aws/template.yaml SlackOauthUserScopes Default must match USER_SCOPES.""" + root = Path(__file__).resolve().parent.parent + text = root.joinpath("infra/aws/template.yaml").read_text(encoding="utf-8") + m = re.search( + r'^\s*SlackOauthUserScopes:\s*\n(?:^\s+.*\n)*?\s*Default:\s*"([^"]*)"', + text, + re.MULTILINE, + ) + assert m, "SlackOauthUserScopes Default not found in template.yaml" + assert m.group(1) == user_scopes_comma_separated() + + +def test_bot_scopes_comma_separated_roundtrip(): + assert bot_scopes_comma_separated().split(",") == list(BOT_SCOPES) + + +def test_user_scopes_comma_separated_roundtrip(): + s = user_scopes_comma_separated() + assert [x.strip() for x in s.split(",") if x.strip()] == list(USER_SCOPES) diff --git a/tests/test_split_message_reactions.py b/tests/test_split_message_reactions.py new file mode 100644 index 0000000..44a21ff --- /dev/null +++ b/tests/test_split_message_reactions.py @@ -0,0 +1,118 @@ +"""Tests for PostMeta rows on split text+file sync (reaction resolution).""" + +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +from slack_sdk.web import WebClient + +from handlers.messages import _handle_new_post, _handle_thread_reply + + +class TestSplitMessagePostMeta: + def test_new_post_text_plus_file_stores_file_ts_same_post_id(self): + logger = MagicMock() + client = MagicMock(spec=WebClient) + + sc_source = SimpleNamespace(id=1, channel_id="C_SRC", sync_id=7) + ws_source = SimpleNamespace(id=10, team_id="T1", bot_token="enc", workspace_name="A") + sc_target = SimpleNamespace(id=2, channel_id="C_TGT", sync_id=7) + ws_target = SimpleNamespace(id=20, team_id="T2", bot_token="enc", workspace_name="B") + + body = { + "event": { + "channel": "C_SRC", + "ts": "100.000000", + "team": "T1", + } + } + ctx = { + "team_id": "T1", + "channel_id": "C_SRC", + "msg_text": "hello", + "mentioned_users": [], + "user_id": "U1", + } + direct_files = [{"path": "/tmp/f.jpg", "name": "f.jpg"}] + + created: list = [] + + def capture_post_meta(rows): + created.extend(rows) + + with ( + patch("handlers.messages.helpers.get_sync_list", return_value=[(sc_source, ws_source), (sc_target, ws_target)]), + patch("handlers.messages.helpers.get_user_info", return_value=("N", "http://i")), + patch("handlers.messages.helpers.get_mapped_target_user_id", return_value=None), + patch("handlers.messages.helpers.get_federated_workspace_for_sync", return_value=None), + patch("handlers.messages.helpers.decrypt_bot_token", return_value="xoxb-test"), + patch("handlers.messages.helpers.apply_mentioned_users", side_effect=lambda t, *a, **k: t), + patch("handlers.messages.helpers.resolve_channel_references", side_effect=lambda t, *a, **k: t), + patch("handlers.messages.helpers.get_workspace_by_id", return_value=None), + patch( + "handlers.messages.helpers.get_display_name_and_icon_for_synced_message", + return_value=("N", None), + ), + patch("handlers.messages.helpers.post_message", return_value={"ts": "200.000000"}), + patch("handlers.messages.helpers.upload_files_to_slack", return_value=(None, "300.000000")), + patch("handlers.messages.helpers.cleanup_temp_files"), + patch("handlers.messages.DbManager.create_records", side_effect=capture_post_meta), + ): + _handle_new_post(body, client, logger, ctx, [], [], direct_files) + + assert len(created) == 3 + assert {m.sync_channel_id for m in created} == {1, 2} + target_rows = [m for m in created if m.sync_channel_id == 2] + assert len(target_rows) == 2 + assert target_rows[0].post_id == target_rows[1].post_id + assert {target_rows[0].ts, target_rows[1].ts} == {200.0, 300.0} + + def test_thread_reply_text_plus_file_stores_file_ts_same_post_id(self): + logger = MagicMock() + client = MagicMock(spec=WebClient) + + pm_src = SimpleNamespace(id=1, post_id="parent", ts=10.0) + pm_tgt = SimpleNamespace(id=2, post_id="parent", ts=20.0) + sc_source = SimpleNamespace(id=11, channel_id="C_SRC", sync_id=7) + ws_source = SimpleNamespace(id=10, workspace_name="A", bot_token="enc") + sc_target = SimpleNamespace(id=22, channel_id="C_TGT", sync_id=7) + ws_target = SimpleNamespace(id=20, workspace_name="B", bot_token="enc") + + post_records = [(pm_src, sc_source, ws_source), (pm_tgt, sc_target, ws_target)] + + body = {"event": {"channel": "C_SRC", "ts": "150.000000"}} + ctx = { + "channel_id": "C_SRC", + "msg_text": "reply", + "mentioned_users": [], + "user_id": "U1", + "thread_ts": "10.000000", + } + direct_files = [{"path": "/tmp/f.jpg", "name": "f.jpg"}] + + created: list = [] + + with ( + patch("handlers.messages.helpers.get_post_records", return_value=post_records), + patch("handlers.messages.helpers.get_user_info", return_value=("N", "http://i")), + patch("handlers.messages.helpers.get_mapped_target_user_id", return_value=None), + patch("handlers.messages.helpers.get_federated_workspace_for_sync", return_value=None), + patch("handlers.messages.helpers.decrypt_bot_token", return_value="xoxb-test"), + patch("handlers.messages.helpers.apply_mentioned_users", side_effect=lambda t, *a, **k: t), + patch("handlers.messages.helpers.resolve_channel_references", side_effect=lambda t, *a, **k: t), + patch("handlers.messages.helpers.get_workspace_by_id", return_value=None), + patch( + "handlers.messages.helpers.get_display_name_and_icon_for_synced_message", + return_value=("N", None), + ), + patch("handlers.messages.helpers.post_message", return_value={"ts": "250.000000"}), + patch("handlers.messages.helpers.upload_files_to_slack", return_value=(None, "350.000000")), + patch("handlers.messages.helpers.cleanup_temp_files"), + patch("handlers.messages.DbManager.create_records", side_effect=lambda rows: created.extend(rows)), + ): + _handle_thread_reply(body, client, logger, ctx, [], direct_files) + + assert len(created) == 3 + target_rows = [m for m in created if m.sync_channel_id == 22] + assert len(target_rows) == 2 + assert target_rows[0].post_id == target_rows[1].post_id + assert {target_rows[0].ts, target_rows[1].ts} == {250.0, 350.0}