From abd592d7bcc7f001493767f9ca57aa5cef804364 Mon Sep 17 00:00:00 2001 From: al Date: Tue, 7 Oct 2025 02:27:36 -0400 Subject: [PATCH 1/5] feat: that waw easy: --- .env.hcp | 3 - .env.project | 43 + .env.vault | 51 + .github/workflows/cd/docker.yml | 0 .github/workflows/docker/docker-build.yml | 50 - .github/workflows/iac/deploy.yml | 147 -- .github/workflows/iac/terraform-plan.yml | 123 -- .gitignore | 5 - .kamal/secrets | 27 +- .ruby-version | 1 - Makefile | 132 +- README.md | 394 ++++- assets/styles/animations.css | 40 - assets/styles/theme.css | 67 - bin/config | 329 ++++ bin/deploy | 176 +++ bin/dev | 121 ++ bin/iac | 77 + bin/kamal | 163 +- bin/ssh | 87 ++ bin/tf | 49 - bin/tfc | 265 ++++ py/bin/utils.sh => bin/utils | 47 +- bin/vault | 215 ++- branding/README.md | 323 ++++ branding/assets/README.md | 38 + {assets/icons => branding/assets}/favicon.ico | Bin {assets/icons => branding/assets}/favicon.png | Bin {assets/icons => branding/assets}/icon.png | Bin {assets/icons => branding/assets}/icon.svg | 0 branding/core/tokens.json | 176 +++ branding/index.js | 47 + branding/package.json | 23 + branding/styles/animations.css | 199 +++ branding/styles/utilities.css | 163 ++ branding/styles/variables.css | 67 + branding/tailwind/preset.js | 140 ++ config/deploy/py.yml | 55 +- config/deploy/static.yml | 26 +- config/deploy/ts-web.yml | 33 +- config/example.yml | 116 +- docs/deployment/KAMAL.md | 474 ++++++ docs/development/LOCAL.md | 485 ++++++ docs/setup/WALKTHROUGH.md | 505 ++++++ hcp.yaml | 5 - iac/Makefile | 106 -- iac/envs/common/main.tf | 49 - iac/envs/common/outputs.tf | 57 - iac/envs/container-registry/main.tf | 10 - iac/envs/container-registry/outputs.tf | 12 - iac/envs/container-registry/terraform.tf | 10 - iac/envs/container-registry/variables.tf | 38 - iac/envs/production/main.tf | 22 - iac/envs/production/outputs.tf | 45 - iac/envs/production/terraform.tf | 11 - iac/envs/production/variables.tf | 5 - iac/modules/cloudflare/dns/main.tf | 6 +- iac/modules/cloudflare/dns/variables.tf | 24 +- iac/modules/common/main.tf | 50 + iac/modules/common/outputs.tf | 15 + iac/{envs => modules}/common/variables.tf | 21 +- iac/{envs => modules}/common/versions.tf | 0 .../digitalocean/container-registry/main.tf | 11 - .../container-registry/outputs.tf | 13 - .../container-registry/provider.tf | 8 - .../container-registry/variables.tf | 24 - iac/modules/digitalocean/droplet/variables.tf | 2 +- iac/modules/digitalocean/provider.tf | 8 - iac/modules/digitalocean/ssh_key/main.tf | 3 +- iac/stages/container-registry/.gitignore | 20 + iac/stages/container-registry/main.tf | 42 + iac/stages/container-registry/providers.tf | 11 + iac/stages/container-registry/terraform.tf | 9 + iac/stages/container-registry/variables.tf | 27 + iac/stages/production/.gitignore | 29 + iac/stages/production/main.tf | 23 + iac/stages/production/outputs.tf | 11 + iac/stages/production/providers.tf | 15 + iac/stages/production/terraform.tf | 9 + iac/stages/production/variables.tf | 20 + py/.env.example | 7 +- py/.gitignore | 8 +- py/Dockerfile | 44 +- py/Makefile | 59 +- py/README.md | 239 ++- py/alembic.ini | 2 +- ...y => 20250926_113958_74e9156d0365_init.py} | 6 +- py/bin/app/dev.sh | 57 + py/bin/app/run.sh | 10 + py/bin/check.sh | 123 -- py/bin/{ => ci}/fmt.sh | 11 +- py/bin/{ => ci}/lint.sh | 11 +- py/bin/{ => ci}/test.sh | 15 +- py/bin/{ => ci}/types.sh | 13 +- py/bin/config.sh | 32 - py/bin/{postgres.sh => db.sh} | 161 +- py/bin/dev.sh | 59 - py/bin/docker.sh | 294 ---- py/bin/install.sh | 6 - py/bin/migrate.sh | 42 - py/bin/prepare_migration.sh | 37 - py/bin/run.sh | 24 - py/bin/styles.sh | 87 ++ py/bin/tailwind.sh | 12 - py/hcp.yaml | 6 - py/src/config.py | 27 +- py/src/server/__init__.py | 12 +- py/src/server/auth/__init__.py | 2 +- py/static/css/main.css | 1389 +---------------- py/styles/animations.css | 167 +- py/styles/main.css | 108 +- py/tailwind.config.js | 49 +- py/templates/components/footer.html | 73 +- py/templates/layouts/app.html | 121 +- py/templates/layouts/base.html | 326 ++-- py/templates/layouts/home.html | 6 +- py/templates/pages/app/dashboard.html | 6 +- py/templates/pages/app/login.html | 46 +- py/templates/pages/index.html | 8 +- static/Dockerfile | 6 +- ts/Makefile | 23 +- ts/README.md | 249 +++ ts/apps/web/Dockerfile | 6 +- ts/apps/web/public/favicon.ico | Bin 15406 -> 39 bytes ts/apps/web/public/favicon.png | 1 + ts/apps/web/public/icon.png | 1 + ts/apps/web/public/icon.svg | 110 +- ts/bin/styles.sh | 50 + 128 files changed, 6453 insertions(+), 3951 deletions(-) delete mode 100644 .env.hcp create mode 100644 .env.project create mode 100644 .env.vault delete mode 100644 .github/workflows/cd/docker.yml delete mode 100644 .github/workflows/docker/docker-build.yml delete mode 100644 .github/workflows/iac/deploy.yml delete mode 100644 .github/workflows/iac/terraform-plan.yml delete mode 100644 .ruby-version delete mode 100644 assets/styles/animations.css delete mode 100644 assets/styles/theme.css create mode 100755 bin/config create mode 100755 bin/deploy create mode 100755 bin/dev create mode 100755 bin/iac create mode 100755 bin/ssh delete mode 100755 bin/tf create mode 100755 bin/tfc rename py/bin/utils.sh => bin/utils (55%) create mode 100644 branding/README.md create mode 100644 branding/assets/README.md rename {assets/icons => branding/assets}/favicon.ico (100%) rename {assets/icons => branding/assets}/favicon.png (100%) rename {assets/icons => branding/assets}/icon.png (100%) rename {assets/icons => branding/assets}/icon.svg (100%) create mode 100644 branding/core/tokens.json create mode 100644 branding/index.js create mode 100644 branding/package.json create mode 100644 branding/styles/animations.css create mode 100644 branding/styles/utilities.css create mode 100644 branding/styles/variables.css create mode 100644 branding/tailwind/preset.js create mode 100644 docs/deployment/KAMAL.md create mode 100644 docs/development/LOCAL.md create mode 100644 docs/setup/WALKTHROUGH.md delete mode 100644 hcp.yaml delete mode 100644 iac/Makefile delete mode 100644 iac/envs/common/main.tf delete mode 100644 iac/envs/common/outputs.tf delete mode 100644 iac/envs/container-registry/main.tf delete mode 100644 iac/envs/container-registry/outputs.tf delete mode 100644 iac/envs/container-registry/terraform.tf delete mode 100644 iac/envs/container-registry/variables.tf delete mode 100644 iac/envs/production/main.tf delete mode 100644 iac/envs/production/outputs.tf delete mode 100644 iac/envs/production/terraform.tf delete mode 100644 iac/envs/production/variables.tf create mode 100644 iac/modules/common/main.tf create mode 100644 iac/modules/common/outputs.tf rename iac/{envs => modules}/common/variables.tf (64%) rename iac/{envs => modules}/common/versions.tf (100%) delete mode 100644 iac/modules/digitalocean/container-registry/main.tf delete mode 100644 iac/modules/digitalocean/container-registry/outputs.tf delete mode 100644 iac/modules/digitalocean/container-registry/provider.tf delete mode 100644 iac/modules/digitalocean/container-registry/variables.tf delete mode 100644 iac/modules/digitalocean/provider.tf create mode 100644 iac/stages/container-registry/.gitignore create mode 100644 iac/stages/container-registry/main.tf create mode 100644 iac/stages/container-registry/providers.tf create mode 100644 iac/stages/container-registry/terraform.tf create mode 100644 iac/stages/container-registry/variables.tf create mode 100644 iac/stages/production/.gitignore create mode 100644 iac/stages/production/main.tf create mode 100644 iac/stages/production/outputs.tf create mode 100644 iac/stages/production/providers.tf create mode 100644 iac/stages/production/terraform.tf create mode 100644 iac/stages/production/variables.tf rename py/alembic/versions/{64d96d2d5ba4_init.py => 20250926_113958_74e9156d0365_init.py} (90%) create mode 100755 py/bin/app/dev.sh create mode 100644 py/bin/app/run.sh delete mode 100755 py/bin/check.sh rename py/bin/{ => ci}/fmt.sh (63%) rename py/bin/{ => ci}/lint.sh (63%) rename py/bin/{ => ci}/test.sh (87%) rename py/bin/{ => ci}/types.sh (70%) delete mode 100755 py/bin/config.sh rename py/bin/{postgres.sh => db.sh} (60%) delete mode 100755 py/bin/dev.sh delete mode 100755 py/bin/docker.sh delete mode 100755 py/bin/install.sh delete mode 100755 py/bin/migrate.sh delete mode 100755 py/bin/prepare_migration.sh delete mode 100755 py/bin/run.sh create mode 100755 py/bin/styles.sh delete mode 100755 py/bin/tailwind.sh delete mode 100644 py/hcp.yaml create mode 100644 ts/README.md mode change 100644 => 120000 ts/apps/web/public/favicon.ico create mode 120000 ts/apps/web/public/favicon.png create mode 120000 ts/apps/web/public/icon.png mode change 100644 => 120000 ts/apps/web/public/icon.svg create mode 100755 ts/bin/styles.sh diff --git a/.env.hcp b/.env.hcp deleted file mode 100644 index e273cf4..0000000 --- a/.env.hcp +++ /dev/null @@ -1,3 +0,0 @@ -# HCP Vault configuration -# TODO: Replace with your actual project ID -VAULT_PROJECT_ID=a9087abb-c3b8-405c-b77f-f45e9bed9d09 \ No newline at end of file diff --git a/.env.project b/.env.project new file mode 100644 index 0000000..ef066c0 --- /dev/null +++ b/.env.project @@ -0,0 +1,43 @@ +# Project configuration +# TODO (amiller68): i should be kinda to myself and +# write a bin/config that validates this according to the +# notes i have here. + +# the name of your project -- this will control +# the naming of resources and apps, as well +# as parameterize where we read env vars from +# in 1password +# NOTE (amiller68): do not try to change this after you initialize +# workspaces, dev envs, and infra! +# Idt it would strictly *break* anything, but might result in some +# PITA bugs +PROJECT_NAME=krondor-generic + +# the name of the vault we will load cloud +# provider credentials from +# NOTE (amiller68): this should exist in your op account, managed by an admin +# this basically describes the 'keys to the infra kingdom' +# you should come up with a more a better way to secure these providers +# on a dedicated account once your project is actually up and running. +CLOUD_VAULT=cloud-providers + +# List of services (must match config/deploy/*.yml files) +# Format: service_name:subdomain (empty subdomain = root domain) +# Where subdomain describes where the service will be deployed +# relative to DNS_ROOT_ZONE +# e.g. these will deploy to ,app. for production +# SERVICES="static:,ts-web:ts,py:app" +SERVICES="static:generic,py:py.generic,ts-web:ts.generic" + +# Container registry configuration +# Whether to use private repositories on Docker Hub +USE_PRIVATE_REPOS=false + +# DNS configuration +# Root domain for your project. +# We use cloudflare for our DNS service, +# So this should point to a root zone within your +# cloudflare account that you configure in your +# cloud provider vault. See .env.vault for more details +# TODO (amiller68): replace with better branded domain +DNS_ROOT_ZONE=krondor.org diff --git a/.env.vault b/.env.vault new file mode 100644 index 0000000..ab1f12b --- /dev/null +++ b/.env.vault @@ -0,0 +1,51 @@ +# TODO (amiller68): docs describing how to get stuff from here injectable in kamal +# it is potentially a little annoying + +# Slug to describe what stage to load your environment from in op +# These variables should be gauranteed by: +# - project config +# - and our scripts that source this file +VAULT_SLUG=${PROJECT_NAME}-${STAGE} + +# Cloud provider credentials +# We assume that the `CLOUD_VAULT` var is exported +# before you source this +# Everything in here in theory should be portable to other +# projects you might manage in the same cloud provider. + +# Terraform Cloud API token for workspace management. +# This account will be set up with a new org for the project +# Set up with new workspaces for all of our stages. +TF_TOKEN=op://${CLOUD_VAULT}/TERRAFORM_CLOUD_API_TOKEN/credential + +# Docker Hub Access Token for container registry +# This account will host +DOCKER_HUB_USERNAME=op://${CLOUD_VAULT}/DOCKER_HUB_LOGIN/username +# NOTE (amiller68): annoyingly this has to be your full ass docker hub password. +# I am big assuming that if the project you're building off of this is any way successful +# you will do something more secure, or have a more production friendly container registry +DOCKER_HUB_PASSWORD=op://${CLOUD_VAULT}/DOCKER_HUB_LOGIN/credential +DOCKER_HUB_TOKEN=op://${CLOUD_VAULT}/DOCKER_HUB_TOKEN/credential + +# For managing DNS zones across your account in Cloudflare +# This account must: +# - own the root dns zone name you set up in .env.project +# - have permissions to manage its DNS config +CLOUDFLARE_API_TOKEN=op://${CLOUD_VAULT}/CLOUDFLARE_DNS_API_TOKEN/credential + +# NOTE (amiller68): i mapped the var name since the TF DigitalOcean provider expects DIGITALOCEAN_TOKEN +# for deploying droplets on Digital Ocean +# Api token with access to a Digital Ocean Account +# This account will end up owning the created resources. +# It should have billing set up and have enough limits to deploy a few droplets +DIGITALOCEAN_TOKEN=op://${CLOUD_VAULT}/DO_API_TOKEN/credential + +# App Configs +# Everything below is stuff you need to power your applications. +# These should be configured by stage + +# /py + +# Google OAuth Client +GOOGLE_O_AUTH_CLIENT_ID=op://${VAULT_SLUG}/GOOGLE_O_AUTH_CLIENT/username +GOOGLE_O_AUTH_CLIENT_SECRET=op://${VAULT_SLUG}/GOOGLE_O_AUTH_CLIENT/credential diff --git a/.github/workflows/cd/docker.yml b/.github/workflows/cd/docker.yml deleted file mode 100644 index e69de29..0000000 diff --git a/.github/workflows/docker/docker-build.yml b/.github/workflows/docker/docker-build.yml deleted file mode 100644 index d916e37..0000000 --- a/.github/workflows/docker/docker-build.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: 🐳 Docker Build - -on: - pull_request: - paths: - - "apps/**" - - "packages/**" - - ".github/workflows/docker-build.yml" - -jobs: - detect-changes: - runs-on: ubuntu-latest - outputs: - matrix: ${{ steps.detect.outputs.matrix }} - steps: - - name: Checkout code - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Detect service changes - id: detect - uses: ./.github/actions/detect-service-changes - with: - pr_mode: "true" - - build: - needs: detect-changes - if: ${{ fromJSON(needs.detect-changes.outputs.matrix).service[0] != null }} - runs-on: ubuntu-latest - strategy: - matrix: ${{ fromJSON(needs.detect-changes.outputs.matrix) }} - fail-fast: false - steps: - - name: Checkout code - uses: actions/checkout@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Build Docker image - id: build_image - run: | - chmod +x bin/docker - ./bin/docker build ${{ matrix.service }} - - - name: Verify build - run: | - echo "Docker image built successfully!" - docker images | grep ${{ matrix.service }} diff --git a/.github/workflows/iac/deploy.yml b/.github/workflows/iac/deploy.yml deleted file mode 100644 index 387c549..0000000 --- a/.github/workflows/iac/deploy.yml +++ /dev/null @@ -1,147 +0,0 @@ - name: 🚀 Deploy Infrastructure and Services - - on: - push: - branches: [main, dev] - paths: - - "apps/**" - - "packages/**" - - "iac/**" - - ".github/workflows/deploy.yml" - workflow_dispatch: - inputs: - services: - description: "Comma-separated list of services to build" - required: false - default: "" - type: string - skip-build: - description: "Skip the build step" - required: false - default: false - type: boolean - - permissions: - contents: read - - jobs: - terraform-apply: - name: Terraform Apply - runs-on: ubuntu-latest - defaults: - run: - working-directory: iac/envs - - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Setup Terraform - uses: hashicorp/setup-terraform@v2 - with: - terraform_version: 1.5.7 - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_KEY }} - aws-region: us-east-1 - - - name: Determine Environment - id: env - run: | - if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then - echo "env_path=production" >> $GITHUB_OUTPUT - else - echo "Unsupported branch: ${{ github.ref }}" - exit 1 - fi - - - name: Terraform Init and Apply ECR - run: | - cd ./ecr - terraform init - terraform apply -auto-approve - - - name: Terraform Init and Apply Environment - run: | - cd ./${{ steps.env.outputs.env_path }} - terraform init - terraform apply -auto-approve - - detect-changes: - needs: terraform-apply - runs-on: ubuntu-latest - outputs: - matrix: ${{ steps.detect.outputs.matrix }} - steps: - - name: Checkout code - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Detect service changes - id: detect - uses: ./.github/actions/detect-service-changes - with: - pr_mode: "false" - services_input: ${{ github.event.inputs.services }} - - build-and-push: - needs: detect-changes - if: | - fromJSON(needs.detect-changes.outputs.matrix).service[0] != null && - github.event.inputs.skip-build != 'true' - runs-on: ubuntu-latest - strategy: - matrix: ${{ fromJSON(needs.detect-changes.outputs.matrix) }} - fail-fast: false - steps: - - name: Checkout code - uses: actions/checkout@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_KEY }} - aws-region: us-east-1 - - - name: Build and Push to ECR - run: | - chmod +x bin/docker - ./bin/docker build ${{ matrix.service }} -p linux/amd64 - ./bin/docker push ${{ matrix.service }} - - deploy-to-ecs: - needs: [detect-changes, build-and-push] - runs-on: ubuntu-latest - strategy: - matrix: ${{ fromJSON(needs.detect-changes.outputs.matrix) }} - fail-fast: false - steps: - - name: Checkout code - uses: actions/checkout@v3 - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_KEY }} - aws-region: us-east-1 - - - name: Deploy to ECS - run: | - chmod +x bin/ecs - if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then - STAGE="production" - else - echo "Error: Deployment triggered from unsupported branch: ${{ github.ref }}" - exit 1 - fi - - ./bin/ecs deploy $STAGE ${{ matrix.service }} diff --git a/.github/workflows/iac/terraform-plan.yml b/.github/workflows/iac/terraform-plan.yml deleted file mode 100644 index 66aae9f..0000000 --- a/.github/workflows/iac/terraform-plan.yml +++ /dev/null @@ -1,123 +0,0 @@ -name: 🏗️ Terraform Plan - -on: - pull_request: - paths: - - "iac/**" - - ".github/workflows/terraform-*.yml" - -permissions: - contents: read - pull-requests: write - issues: write - -jobs: - terraform-plan: - name: Terraform Plan - runs-on: ubuntu-latest - defaults: - run: - working-directory: iac/envs - - steps: - - name: Checkout - uses: actions/checkout@v3 - - - name: Setup Terraform - uses: hashicorp/setup-terraform@v2 - with: - terraform_version: 1.5.7 - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@v2 - with: - aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_SECRET_KEY }} - aws-region: us-east-1 - - - name: Terraform Format - id: fmt - run: terraform fmt -check -recursive - continue-on-error: true - - - name: Determine Environment - id: env - run: | - if [[ "${{ github.base_ref }}" == "main" ]]; then - echo "env_path=production" >> $GITHUB_OUTPUT - else - echo "Unsupported target branch: ${{ github.base_ref }}" - exit 1 - fi - - - name: Terraform Init ECR - id: init-ecr - run: | - cd ./ecr - terraform init - - # TODO: if you add a new ecr repo, then this step will fail. - # Obviously we should be able to t account for this, but for - # the sake of a demo / example this is fine - - name: Terraform Plan ECR - id: plan-ecr - run: | - cd ./ecr - terraform plan -no-color - continue-on-error: true - - - name: Terraform Init Environment - id: init-env - run: | - cd ./production - terraform init - - - name: Terraform Plan Environment - id: plan-env - run: | - cd ./production - terraform plan -no-color - continue-on-error: true - - - name: Update Pull Request - uses: actions/github-script@v6 - env: - PLAN_ECR: "${{ steps.plan-ecr.outputs.stdout }}" - PLAN_ENV: "${{ steps.plan-env.outputs.stdout }}" - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - script: | - let output = `#### Terraform Format and Style 🖌\`${{ steps.fmt.outcome }}\`\n`; - - output += ` - ### ECR Changes - #### Terraform Plan 📖\`${{ steps.plan-ecr.outcome }}\` -
Show ECR Plan - - \`\`\`\n - ${process.env.PLAN_ECR} - \`\`\` - -
\n`; - - output += ` - ### Environment Changes (${{ steps.env.outputs.env_path }}) - #### Terraform Plan 📖\`${{ steps.plan-env.outcome }}\` -
Show Environment Plan - - \`\`\`\n - ${process.env.PLAN_ENV} - \`\`\` - -
\n`; - - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: output - }) - - - name: Terraform Plan Status - if: steps.plan-ecr.outcome == 'failure' || steps.plan-env.outcome == 'failure' - run: exit 1 diff --git a/.gitignore b/.gitignore index efdc0f4..90d6b34 100644 --- a/.gitignore +++ b/.gitignore @@ -5,10 +5,6 @@ node_modules .pnp .pnp.js -# Kamal -.kamal/ssh_key -.kamal/secrets - .aider* # Local env files @@ -33,7 +29,6 @@ out/ build dist - # Debug npm-debug.log* yarn-debug.log* diff --git a/.kamal/secrets b/.kamal/secrets index 1b9c035..06d3745 100644 --- a/.kamal/secrets +++ b/.kamal/secrets @@ -1,12 +1,21 @@ # Secrets for Kamal deployment -# These are injected via bin/vault when running bin/kamal # Docker registry credentials -DOCKERHUB_TOKEN=$DOCKERHUB_TOKEN -SSH_PRIVATE_KEY=$SSH_PRIVATE_KEY - -# Py Service Secrets -# TODO: these should be prefixed with PY_ -# I just don't know how to get the secret mapping to work -GOOGLE_CLIENT_ID=$PY_GOOGLE_CLIENT_ID -GOOGLE_CLIENT_SECRET=$PY_GOOGLE_CLIENT_SECRET \ No newline at end of file +DOCKER_HUB_TOKEN=$DOCKER_HUB_TOKEN + +# NOTE (amiller68): in order to make secrets to be visible to your deploy +# scripts, you need to register them here AND make sure they are exported +# by ./bin/kamal +# That is, ./bin/kamal will handle making sure everything required from your configured +# vault is available to the deployment environment, kamal just requires you to also export them +# here for anything that can't or shouldn't be represented as plain text + +# Example: +# MY_SECRET=$MY_SECRET + +# ts/next +QUOTIENT_PRIVATE_API_KEY=$QUOTIENT_PRIVATE_API_KEY + +# py +GOOGLE_O_AUTH_CLIENT_ID=$GOOGLE_O_AUTH_CLIENT_ID +GOOGLE_O_AUTH_CLIENT_SECRET=$GOOGLE_O_AUTH_CLIENT_SECRET diff --git a/.ruby-version b/.ruby-version deleted file mode 100644 index 15a2799..0000000 --- a/.ruby-version +++ /dev/null @@ -1 +0,0 @@ -3.3.0 diff --git a/Makefile b/Makefile index 21a5034..ee95a56 100644 --- a/Makefile +++ b/Makefile @@ -1,21 +1,22 @@ ARGS ?= +# TODO: Add more projects as needed here +PROJECTS := py ts + .PHONY: help help: ## Show this help message @echo 'Usage: make [target]' @echo '' - @echo 'Available targets:' - @echo ' check: Check all projects' - @echo ' check-%: Check the given project' - @echo ' install-%: Install dependencies for the given project' - @echo ' build-%: Build the given project' - @echo ' test-%: Run tests for the given project' - @echo ' lint-%: Run linters for the given project' - @echo ' fmt-%: Format the given project' - @echo ' fmt-check-%: Check formatting for the given project' - @echo ' types-%: Run type checking for the given project' - @echo ' docker-build-%: Build the given project' - @echo ' clean-%: Clean the given project' + @echo 'Run-all targets (operate on all projects):' + @awk 'BEGIN {FS = ":.*##"} /^[a-zA-Z_-]+:.*?##/ && !/^[a-zA-Z_-]+-%:/ { printf " %-20s %s\n", $$1, $$2 }' $(MAKEFILE_LIST) + @echo '' + @echo 'Project-specific targets (use with - suffix, e.g., -py, -ts):' + @awk 'BEGIN {FS = ":.*##"} /^[a-zA-Z_-]+-%:.*?##/ { gsub(/-%/, "-", $$1); printf " %-20s %s\n", $$1, $$2 }' $(MAKEFILE_LIST) + @echo '' + @echo 'Available projects:' + @for project in $(PROJECTS); do \ + echo " - $$project"; \ + done # run a make command in the given directory run-for: @@ -32,49 +33,150 @@ run-for: fi .PHONY: check -check: check-py check-ts +check: ## Check all projects + @for project in $(PROJECTS); do \ + $(MAKE) run-for PROJECT=$$project CMD=check; \ + done .PHONY: check-% check-%: ## Check the given project @$(MAKE) run-for PROJECT=$(@:check-%=%) CMD=check +.PHONY: install +install: ## Install dependencies for all projects + @for project in $(PROJECTS); do \ + $(MAKE) run-for PROJECT=$$project CMD=install; \ + done + .PHONY: install-% install-%: ## Install dependencies for the given project @$(MAKE) run-for PROJECT=$(@:install-%=%) CMD=install +.PHONY: dev +dev: ## Run all development servers in tmux (use ARGS="--kill" to kill session) + @./bin/dev $(ARGS) + .PHONY: dev-% dev-%: ## Run development server for the given project @$(MAKE) run-for PROJECT=$(@:dev-%=%) CMD=dev +.PHONY: build +build: ## Build all projects + @for project in $(PROJECTS); do \ + $(MAKE) run-for PROJECT=$$project CMD=build; \ + done + .PHONY: build-% build-%: ## Build the given project @$(MAKE) run-for PROJECT=$(@:build-%=%) CMD=build +.PHONY: test +test: ## Run tests for all projects + @for project in $(PROJECTS); do \ + $(MAKE) run-for PROJECT=$$project CMD=test; \ + done + .PHONY: test-% test-%: ## Run tests for the given project @$(MAKE) run-for PROJECT=$(@:test-%=%) CMD=test +.PHONY: lint +lint: ## Run linters for all projects + @for project in $(PROJECTS); do \ + $(MAKE) run-for PROJECT=$$project CMD=lint; \ + done + .PHONY: lint-% lint-%: ## Run linters for the given project @$(MAKE) run-for PROJECT=$(@:lint-%=%) CMD=lint +.PHONY: fmt +fmt: ## Format all projects + @for project in $(PROJECTS); do \ + $(MAKE) run-for PROJECT=$$project CMD=fmt; \ + done + .PHONY: fmt-% fmt-%: ## Format the given project @$(MAKE) run-for PROJECT=$(@:fmt-%=%) CMD=fmt +.PHONY: fmt-check +fmt-check: ## Check formatting for all projects + @for project in $(PROJECTS); do \ + $(MAKE) run-for PROJECT=$$project CMD=fmt-check; \ + done + .PHONY: fmt-check-% fmt-check-%: ## Check formatting for the given project @$(MAKE) run-for PROJECT=$(@:fmt-check-%=%) CMD=fmt-check +.PHONY: types +types: ## Run type checking for all projects + @for project in $(PROJECTS); do \ + $(MAKE) run-for PROJECT=$$project CMD=types; \ + done + .PHONY: types-% types-%: ## Run type checking for the given project @$(MAKE) run-for PROJECT=$(@:types-%=%) CMD=types +.PHONY: docker-build +docker-build: ## Build Docker images for all projects + @for project in $(PROJECTS); do \ + $(MAKE) run-for PROJECT=$$project CMD=docker-build; \ + done + .PHONY: docker-build-% -docker-build-%: ## Build the given project +docker-build-%: ## Build Docker image for the given project @$(MAKE) run-for PROJECT=$(@:docker-build-%=%) CMD=docker-build +.PHONY: clean +clean: ## Clean all projects + @for project in $(PROJECTS); do \ + $(MAKE) run-for PROJECT=$$project CMD=clean; \ + done + .PHONY: clean-% clean-%: ## Clean the given project - @$(MAKE) run-for PROJECT=$(@:clean-%=%) CMD=clean \ No newline at end of file + @$(MAKE) run-for PROJECT=$(@:clean-%=%) CMD=clean + +.PHONY: styles +styles: ## Build styles for all projects + @for project in $(PROJECTS); do \ + $(MAKE) run-for PROJECT=$$project CMD=styles; \ + done + +.PHONY: styles-% +styles-%: ## Build styles for the given project + @$(MAKE) run-for PROJECT=$(@:styles-%=%) CMD=styles + +.PHONY: styles-watch +styles-watch: ## Watch styles for all projects + @for project in $(PROJECTS); do \ + $(MAKE) run-for PROJECT=$$project CMD=styles-watch; \ + done + +.PHONY: styles-watch-% +styles-watch-%: ## Watch styles for the given project + @$(MAKE) run-for PROJECT=$(@:styles-watch-%=%) CMD=styles-watch + +# Terraform Cloud management - pass all arguments after 'tfc' to the script +.PHONY: tfc +tfc: ## Terraform Cloud management - pass all arguments after 'tfc' to the script + @./bin/tfc $(filter-out $@,$(MAKECMDGOALS)) + + +# Infrastructure management - pass all arguments after 'iac' to the script +.PHONY: iac +iac: ## Infrastructure management - run terraform with vault secrets + @./bin/iac $(filter-out $@,$(MAKECMDGOALS)) + +# Deployment management - pass all arguments after 'deploy' to the kamal script +.PHONY: kamal +kamal: ## Deploy services using Kamal - usage: make deploy ARGS=" " + @./bin/kamal $(ARGS) + +# Catch additional arguments to tfc, ghcr, iac, and deploy commands +%: + @: diff --git a/README.md b/README.md index 99a81d5..a1c9108 100644 --- a/README.md +++ b/README.md @@ -1,108 +1,338 @@ # Generic Fullstack -Take a look at the [deployed demo](https://generic.krondor.org) - -This repository contains a couple different patterns for building full stack applications I've developed over the years, with focuses on: -- type safe and ergonomic implementation patterns -- rapid iteration and full featured CICD -- owning the deployment pipeline -- and a reliance on running applications in containers for portability +An extensible Docker-based deployment framework for rapidly building and deploying different types of applications with TypeScript and Python. -The purpose of it is to track and publish the sum total of experience i have shipping - quick weekend projects and experiments with the additional satisfaction of doing so - with my tools of choice. Note, the described methods herin are: - - **not audited**: use at your own risk - - **not infinitely scalable**: note the lack of lambdas, external service providers, and orchestrators. - These templates are meant to provide a base for a proof of concept that you might extend into a long lived - application. It is up to you to decide on the right deployment surface and implement it. That being - said, monoliths are pretty much enough for lots of projects, and that's exactly what you get here! -- **unfinished** and likely will always be so. Last year I was writing bespoke ansible and managing my SSH - keys in my password manager -- things change! be sure to check back for updates, or feel free to contribute one. +**[Live Demo](https://generic.krondor.org)** -For more info on my deployment posture, take a look at [my dev ops docs](./docs/dev-ops/index.md) +## Philosophy -## Templates +This framework is built on the principle that shipping quickly shouldn't mean sacrificing control. It provides: -### Typescript +- **Type-safe, ergonomic patterns** for both frontend and backend +- **Rapid iteration** with hot-reload dev environments +- **Own your deployment** - no vendor lock-in, deploy to any VM +- **Container-first** - portable, reproducible builds +- **Single source of truth** - all config in `.env.project` and `.env.vault` +- **Extensible architecture** - start with templates, adapt to your needs -**Typescript + pnpm + turbo repo** +**Note**: These templates are designed for rapid prototyping and small-to-medium projects. Not audited for enterprise security, not infinitely scalable (no lambdas, no Kubernetes). Perfect for weekend projects, MVPs, and proof-of-concepts that you can grow into production apps. -After many years banging my head against the typescript ecosystem, I finally found a development - pattern that (more or less) works: `pnpm` for package management and `turbo` for monorepo support. +For deployment philosophy and architecture details, see [docs/dev-ops/index.md](./docs/dev-ops/index.md) -This template is great for: -- writing simple static or client driven sites -- one off express services or public APIs -- situtations where you need access to typescript -- or otherwise typescript is your team's core skillset +## What's Included -As is, it comes with: -- a simple single page static vite application -- and an express api app with type safe handlers +### TypeScript Stack (`ts/`) -It could be readily extended to projects such as: -- a crypto app with no backend state -- a quick hackathon project -- or a public portfolio or static blog +**pnpm + Turbo monorepo with Vite and Express** -I would really like to extend it with: -- some sort of auth pattern, either based on: - - client-side crytpographic keys - - a drop in auth proxy -- backend state via either - - a sql dialect - - (I have yet to find a fully featured and type safe sql tool for typescript projects that I would want to maintain here) - - or mongo, which is pretty easy to hack with in typescript +- Vite + React web application with Tailwind CSS +- Express API server with type-safe handlers +- Shared TypeScript configs and types +- Development hot-reload and production builds -### Python +See [ts/README.md](./ts/README.md) for details. -**Python + uv + FastAPI + SqlAchemy + HATEOAS** +### Python Stack (`py/`) +**FastAPI + SQLAlchemy + HATEOAS** -I recently fell in love with [HATEOAS](https://en.wikipedia.org/wiki/HATEOAS) as a development pattern - for apps built on top of REST APIs. I've had alot of fun using tools like [htmx](https://htmx.org/) and [franken ui](https://franken-ui.dev) - for building pretty, responsive full stack applications against a server written in the language of your choice. +- FastAPI server with server-side rendering +- SQLAlchemy ORM with Alembic migrations +- PostgreSQL database (local + production) +- Google OAuth authentication +- htmx + Tailwind CSS for responsive UI -I decided to start protoyping new ideas entirely in Python using this pattern because: -- there are python libraries for pretty much anything -- it gets the latest and greatest in patterns for working with LLMs -- and declaritive execution is handy when it comes to rapid iteration +See [py/README.md](./py/README.md) for details. -At the moment this template includes: -- Handy dev tooling, including hot reloading in response to code changes -- Ready-to-go local postgres for local hacking -- Google OAuth (which is pretty much all you need for a simple product) -- and easy to extend patterns for writing SSR pages and components +### Deployment Framework +**Terraform + Kamal + 1Password + Digital Ocean** -This template is great for: -- all sorts of full stack applications -- quick protoyping +- Infrastructure as Code with Terraform +- Container orchestration with Kamal +- Secrets management with 1Password +- DNS with Cloudflare +- Automated SSL via Let's Encrypt -I would really like to extend it with: -- Ready-to-go local redis + background jobs using Arq -- common sense LLM patterns + maybe a fly wheel implementation against [Tensor Zero](https://www.tensorzero.com/) +## Quick Start -### Rust +### Prerequisites -TODO: -if you peak at [my github repos](https://github.com/amiller68?tab=repositories), you can see I have a few opinions on what makes for a manageable full-stack Rust Web App, largely informed by [my friend sam's work](https://github.com/sstelfox/web-app-template). I'll be moving examples of that work here, and it should eventually include am example detailing: -- a full-stack axum + htmx web app -- with OIDC based authorization -- backed by SQLX and [insert-sql-flavor-here] -- and a companion CLI tool template +1. **1Password account** with CLI installed (`op`) +2. **Terraform Cloud account** (free tier) +3. **Digital Ocean account** with API token +4. **Cloudflare account** managing your DNS zone +5. **Docker Hub account** (or GitHub Container Registry) -and maybe some handy patterns for shipping wasm directly to the browser if I get a lil crazy with it. +### Setup Workflow -I don't think this stack is necessarily the best one for quickly protoyping ideas, considering that: -- the Rust ecosystem is still pretty immature, and you won't find the support for the library or framework you need 100% of the time -- type checking, a good generic system, and memory safety are amazing, but sometimes get in the way of moving fast -- good Rust engineers are hard to find, and hard to hire for +Follow these guides in order: -BUT its still my favorite in that: -- Rust is incredibly portable -- The community's documentation for crates is unmatched by any other language I've worked with -- Cargo provides an unbeatable developer experience for working with dependencies and feature gaurds +1. **[Complete Walkthrough](./docs/setup/WALKTHROUGH.md)** - Step-by-step setup guide +2. **[1Password Setup](./docs/setup/ONE_PASSWORD.md)** - Configure vaults and secrets +3. **[Terraform Cloud Setup](./docs/setup/TERRAFORM_CLOUD.md)** - Create org and workspaces +4. **[Infrastructure Setup](./docs/setup/INFRASTRUCTURE.md)** - Deploy container registry and servers +5. **[Kamal Deployment](./docs/deployment/KAMAL.md)** - Deploy your applications +### Development -## TODOs - -- I think pulumi might actually be a better fit for provisioning the scope of infrastructure we define for a project like this. +```bash +# Install dependencies for all projects +make install + +# Run all dev servers in tmux +make dev + +# Run specific project +make dev-py # Python API only +make dev-ts # TypeScript apps only + +# Build styles +make styles + +# Run checks (format, types, tests) +make check +``` + +See [docs/development/LOCAL.md](./docs/development/LOCAL.md) for detailed development workflows. + +## Project Structure + +``` +. +├── bin/ # Deployment and infrastructure scripts +│ ├── tfc # Terraform Cloud management +│ ├── iac # Infrastructure as Code (Terraform wrapper) +│ ├── kamal # Deployment with Kamal +│ ├── vault # 1Password secrets access +│ ├── ssh # SSH into servers +│ └── dev # Multi-project tmux dev environment +├── config/ +│ └── deploy/ # Kamal deployment configs per service +├── iac/ +│ ├── modules/ # Reusable Terraform modules +│ └── stages/ # Stage-specific configs (production, staging) +├── py/ # Python FastAPI application +├── ts/ # TypeScript monorepo (Vite, Express) +├── branding/ # Shared brand assets (CSS, icons, fonts) +├── docs/ # Detailed documentation +│ ├── setup/ # Setup guides +│ ├── deployment/ # Deployment guides +│ └── development/ # Development guides +├── .env.project # Project configuration +└── .env.vault # Vault paths for secrets +``` + +## Configuration + +### `.env.project` - Project Settings + +Single source of truth for project-wide configuration: + +```bash +PROJECT_NAME=generic # Must be globally unique (used for TFC org) +DNS_ROOT_ZONE=yourdomain.com # Your Cloudflare domain +SERVICES="py:app,ts-web:" # Services and subdomains +CLOUD_VAULT=cloud-providers # 1Password vault for cloud credentials +``` + +**Important**: `PROJECT_NAME` must be globally unique as it becomes your Terraform Cloud organization name (`${PROJECT_NAME}-org`). + +All infrastructure, deployment, and app configs derive from this file. + +### `.env.vault` - Secrets Paths + +Defines paths to secrets in 1Password: + +```bash +# Cloud provider credentials +TF_TOKEN=op://cloud-providers/TERRAFORM_CLOUD_API_TOKEN/credential +DOCKER_HUB_USERNAME=op://cloud-providers/DOCKER_HUB_LOGIN/username +CLOUDFLARE_API_TOKEN=op://cloud-providers/CLOUDFLARE_DNS_API_TOKEN/credential +DIGITALOCEAN_TOKEN=op://cloud-providers/DO_API_TOKEN/credential + +# Stage-specific app secrets +GOOGLE_O_AUTH_CLIENT_ID=op://${VAULT_SLUG}/GOOGLE_O_AUTH_CLIENT/username +``` + +Where `${VAULT_SLUG}` = `${PROJECT_NAME}-${STAGE}` (e.g., `generic-production`). + +See [docs/setup/ONE_PASSWORD.md](./docs/setup/ONE_PASSWORD.md) for vault structure. + +## Common Commands + +### Infrastructure Management + +```bash +# Terraform Cloud setup +make tfc up # Create org and workspaces +make tfc status # Check TFC status + +# Infrastructure deployment +make iac + +# Examples: +make iac container-registry init # Setup container registry +make iac container-registry apply # Deploy registry +make iac production init # Initialize production +make iac production plan # Review infrastructure changes +make iac production apply # Deploy production infrastructure +make iac production output # Show outputs (IPs, keys, etc) +``` + +### Application Deployment + +```bash +# Deploy with Kamal +make kamal ARGS=" " + +# Examples: +make kamal ARGS="py production deploy" # Deploy Python API to production +make kamal ARGS="ts-web production deploy" # Deploy web app to production +make kamal ARGS="py production logs" # View Python API logs +make kamal ARGS="py production app exec" # Execute command in container +``` + +### SSH Access + +```bash +# SSH into production server +bin/ssh production + +# SSH as specific user +bin/ssh production ubuntu +``` + +### Development + +```bash +# Multi-project development +make dev # All projects in tmux +make dev ARGS="--kill" # Kill tmux session + +# Single project development +make dev-py # Python only +make dev-ts # TypeScript only + +# Build and checks +make build # Build all projects +make check # Run all checks (format, types, tests) +make styles # Build styles for all projects +``` + +## Architecture + +### Local Development +``` +Developer Machine +├── tmux session (make dev) +│ ├── Python FastAPI (port 8000) +│ ├── TypeScript Vite web (port 5173) +│ ├── TypeScript Express API (port 3001) +│ └── Local PostgreSQL (Docker, port 5432) +└── 1Password vault integration +``` + +### Production Deployment +``` +Internet → Cloudflare DNS → Digital Ocean Droplet + └── Kamal Proxy (Traefik) + ├── Web app (yourdomain.com) + ├── Python API (app.yourdomain.com) + └── PostgreSQL (internal only) +``` + +## Deployment Workflow + +1. **Configure** `.env.project` with your project name and domain +2. **Setup 1Password** vaults with cloud credentials +3. **Create TFC workspaces**: `make tfc up` +4. **Deploy container registry**: `make iac container-registry apply` +5. **Deploy infrastructure**: `make iac production apply` +6. **Bootstrap server**: `make kamal ARGS="py production setup"` (first time only) +7. **Boot accessories**: `make kamal ARGS="py production accessory boot postgres"` (if service has them) +8. **Deploy services**: `make kamal ARGS="py production deploy"` + +Each step is automated and uses secrets from 1Password. No manual credential management. + +## Documentation + +### Setup Guides +- [Complete Walkthrough](./docs/setup/WALKTHROUGH.md) - Step-by-step setup guide +- [1Password Setup](./docs/setup/ONE_PASSWORD.md) - Configure vaults and secrets +- [Terraform Cloud Setup](./docs/setup/TERRAFORM_CLOUD.md) - Create org and workspaces +- [Infrastructure Setup](./docs/setup/INFRASTRUCTURE.md) - Deploy servers and DNS + +### Deployment Guides +- [Kamal Deployment](./docs/deployment/KAMAL.md) - Deploy and manage services + +### Development Guides +- [Local Development](./docs/development/LOCAL.md) - Dev environment setup +- [TypeScript Projects](./ts/README.md) - Vite app, Express API, and monorepo +- [Python Application](./py/README.md) - FastAPI, database, migrations + +### Operations +- [DevOps Philosophy](./docs/dev-ops/index.md) - Architecture and design decisions + +## Troubleshooting + +**"Unable to authenticate"** +- Check 1Password credentials: `bin/vault read DOCKER_HUB_USERNAME` +- Verify vault names match `.env.project` and `.env.vault` + +**"No infrastructure deployed"** +- Run `make iac production apply` first +- Check Terraform Cloud workspaces: `make tfc status` + +**"Container won't start"** +- Check logs: `make kamal ARGS=" production logs"` +- Verify environment variables in 1Password vault + +**"SSH connection refused"** +- Check firewall allows your IP +- Get SSH command: `make iac production output ssh_connect_command` + +See individual docs for more troubleshooting. + +## Extending the Framework + +This framework is designed to be extensible: + +### Adding New Application Stacks + +The TypeScript and Python templates demonstrate two different patterns. You can: +- Add new language stacks (Rust, Go, etc.) +- Mix and match components +- Customize deployment configs + +### Planned Extensions + +**Rust Stack** (Planned): +- Axum + htmx web app +- OIDC authorization +- SQLX database integration +- Companion CLI tool + +**Python Enhancements**: +- Redis + background jobs using Arq +- LLM patterns with Tensor Zero + +**TypeScript Enhancements**: +- Auth patterns (OAuth, crypto keys) +- SQL integration for backend state + +## Cost Estimate + +Running this stack costs approximately: +- **Digital Ocean Droplet** (s-1vcpu-1gb): $6/month +- **Cloudflare DNS**: Free +- **Terraform Cloud**: Free (up to 500 resources) +- **1Password**: Varies by plan + +**Total minimum**: ~$6-15/month depending on your 1Password plan. + +## Contributing + +This framework is under active development. Expect frequent changes and improvements. Check back for updates or contribute your own! + +## License + +Use at your own risk. Not audited, not infinitely scalable, not finished. Perfect for learning and rapid prototyping. diff --git a/assets/styles/animations.css b/assets/styles/animations.css deleted file mode 100644 index 8d13335..0000000 --- a/assets/styles/animations.css +++ /dev/null @@ -1,40 +0,0 @@ -/* Simplified animations - keeping only the cool ones */ - -@keyframes fade-in { - from { opacity: 0; } - to { opacity: 1; } -} - -@keyframes slide-up { - from { - opacity: 0; - transform: translateY(10px); - } - to { - opacity: 1; - transform: translateY(0); - } -} - -@keyframes spin { - from { transform: rotate(0deg); } - to { transform: rotate(360deg); } -} - -/* Utility classes */ -.animate-fade-in { - animation: fade-in 0.3s ease-out; -} - -.animate-slide-up { - animation: slide-up 0.4s ease-out; -} - -.animate-spin { - animation: spin 1s linear infinite; -} - -/* Loading states */ -.loading { - @apply animate-spin rounded-full h-8 w-8 border-2 border-[var(--muted)] border-t-primary; -} \ No newline at end of file diff --git a/assets/styles/theme.css b/assets/styles/theme.css deleted file mode 100644 index 4669404..0000000 --- a/assets/styles/theme.css +++ /dev/null @@ -1,67 +0,0 @@ -/* Shared theme variables for both Python and TypeScript projects */ - -/* Light mode defaults - BLACK AND WHITE TEST */ -:root { - --background: 0 0% 100%; /* Pure white */ - --foreground: 0 0% 0%; /* Pure black */ - - --muted: 0 0% 95%; /* Very light grey */ - --muted-foreground: 0 0% 40%; /* Dark grey */ - - --card: 0 0% 100%; /* White */ - --card-foreground: 0 0% 0%; /* Black */ - - --popover: 0 0% 100%; /* White */ - --popover-foreground: 0 0% 0%; /* Black */ - - --border: 0 0% 85%; /* Light grey border */ - --input: 0 0% 85%; /* Light grey border */ - - --primary: 0 0% 0%; /* Black primary */ - --primary-foreground: 0 0% 100%; /* White text on black */ - - --secondary: 0 0% 90%; /* Light grey */ - --secondary-foreground: 0 0% 0%; /* Black text */ - - --accent: 0 0% 70%; /* Medium grey accent */ - --accent-foreground: 0 0% 0%; /* Black text */ - - --destructive: 0 0% 20%; /* Dark grey for errors */ - --destructive-foreground: 0 0% 100%; /* White text */ - - --ring: 0 0% 50%; /* Medium grey focus ring */ - - --radius: 0.5rem; -} - -/* Dark mode - INVERTED BLACK AND WHITE */ -.dark { - --background: 0 0% 0%; /* Pure black */ - --foreground: 0 0% 100%; /* Pure white */ - - --muted: 0 0% 10%; /* Very dark grey */ - --muted-foreground: 0 0% 60%; /* Light grey */ - - --card: 0 0% 0%; /* Black */ - --card-foreground: 0 0% 100%; /* White */ - - --popover: 0 0% 0%; /* Black */ - --popover-foreground: 0 0% 100%; /* White */ - - --border: 0 0% 20%; /* Dark grey border */ - --input: 0 0% 20%; /* Dark grey border */ - - --primary: 0 0% 100%; /* White primary */ - --primary-foreground: 0 0% 0%; /* Black text on white */ - - --secondary: 0 0% 15%; /* Dark grey */ - --secondary-foreground: 0 0% 100%; /* White text */ - - --accent: 0 0% 30%; /* Dark grey accent */ - --accent-foreground: 0 0% 100%; /* White text */ - - --destructive: 0 0% 80%; /* Light grey for errors */ - --destructive-foreground: 0 0% 0%; /* Black text */ - - --ring: 0 0% 50%; /* Medium grey focus ring */ -} \ No newline at end of file diff --git a/bin/config b/bin/config new file mode 100755 index 0000000..2e41fc5 --- /dev/null +++ b/bin/config @@ -0,0 +1,329 @@ +#!/usr/bin/env bash + +# NOTE (amiller68): assumes that utils are already +# sourced by the calling script + +# error if project root is not set +if [ -z "$PROJECT_ROOT" ]; then + echo -e "${RED}Error: PROJECT_ROOT not set.${NC}" + exit 1 +fi + +# Function to source project configuration with validation +source_project_config() { + if [ -f "$PROJECT_ROOT/.env.project" ]; then + source "$PROJECT_ROOT/.env.project" + else + echo -e "${RED}Error: .env.project not found. Please create it with PROJECT_NAME and DEFAULT_STAGE.${NC}" + return 1 + fi + if [ ! -f "$PROJECT_ROOT/.env.vault" ]; then + echo -e "${RED}Error: .env.vault not found. Please create it with your OP vault items.${NC}" + return 1 + fi + + # Validate required variables + local required_vars=("PROJECT_NAME" "CLOUD_VAULT" "SERVICES" "DNS_ROOT_ZONE" "USE_PRIVATE_REPOS") + local missing_vars=() + + for var in "${required_vars[@]}"; do + if [ -z "${!var}" ]; then + missing_vars+=("$var") + fi + done + + if [ ${#missing_vars[@]} -gt 0 ]; then + echo -e "${RED}Error: Missing required variables in .env.project:${NC}" + for var in "${missing_vars[@]}"; do + echo -e "${RED} - $var${NC}" + done + return 1 + fi + + return 0 +} + + +services() { + # check if services directory exists + if [ ! -d "$PROJECT_ROOT/config/deploy" ]; then + echo -e "${RED}Error: config/deploy directory not found.${NC}" + return 1 + fi + + # pull up a comma separated list of services we have deploy scripts for + local kamal_services=$(ls "$PROJECT_ROOT/config/deploy" | sed 's/.yml//' | paste -sd, -) + if [ -z "$kamal_services" ]; then + echo -e "${RED}Error: No services found'?" + echo -e "${RED}Searched in: $PROJECT_ROOT/config/deploy" + exit 1 + fi + + # parse the domains we configure for our services from the .env.project file, + # they should be a comma separated list under the SERVICES variable by now, + # where each elem is : + # domain may be empty for at most one service + # domains may not conflict with each other + + # just get the names for the services, splitting off the domain + local project_service_names=$(echo "$SERVICES" | tr ',' '\n' | cut -d':' -f1) + + # validate that kamal services align with project services -- + # ensure that all kamal services are in the project services list + for service in $(echo "$kamal_services" | tr ',' ' '); do + if ! echo "$project_service_names" | grep -q "^${service}$"; then + echo -e "${RED}Error: Service '$service' in config/deploy not found in .env.project" + echo -e "${RED}Kamal services: $kamal_services" + echo -e "${RED}Project services: $project_service_names" + exit 1 + fi + done + + # Build space-separated list of service:subdomain pairs + local service_list="" + for service in $(echo "$kamal_services" | tr ',' ' '); do + # Find the subdomain for this service from SERVICES variable + local subdomain=$(echo "$SERVICES" | tr ',' '\n' | grep "^${service}:" | cut -d':' -f2) + service_list+="${service}:${subdomain} " + done + + # Return space-separated list (trim trailing space) + echo "${service_list% }" +} + +# List just the service names (without subdomains) +# Usage: list_service_names +# Example: list_service_names -> "next py" +list_service_names() { + local service_pairs=$(services) + local names="" + + for service_pair in $service_pairs; do + local service_name="${service_pair%%:*}" + names+="$service_name " + done + + # Return space-separated list (trim trailing space) + echo "${names% }" +} + +# List and validate stages +stages() { + # check if stages directory exists + if [ ! -d "$PROJECT_ROOT/iac/stages" ]; then + echo -e "${RED}Error: iac/stages directory not found.${NC}" + return 1 + fi + + # pull up a comma separated list of stages we have in iac/stages + local iac_stages=$(ls "$PROJECT_ROOT/iac/stages" 2>/dev/null | paste -sd, -) + if [ -z "$iac_stages" ]; then + echo -e "${RED}Error: No stages found${NC}" + echo -e "${RED}Searched in: $PROJECT_ROOT/iac/stages${NC}" + exit 1 + fi + + # make sure that 'development' is not in the list of stages + if echo "$iac_stages" | grep -q "development"; then + echo -e "${RED}Error: 'development' stage is not allowed. There should be no reason to deploy development${NC}" + exit 1 + fi + + # Return space-separated list of stages + echo "$iac_stages" | tr ',' ' ' +} + +# Get the hostname for a service in a given stage +# Usage: get_service_hostname +# Example: get_service_hostname next production -> smols.org +# Example: get_service_hostname py development -> app.development.smols.org +get_service_hostname() { + local service="$1" + local stage="$2" + + if [ -z "$service" ] || [ -z "$stage" ]; then + echo -e "${RED}Error: get_service_hostname requires service and stage arguments${NC}" >&2 + return 1 + fi + + # Find the service:subdomain pair from services() output + local service_pairs=$(services) + local subdomain="" + + for service_pair in $service_pairs; do + local service_name="${service_pair%%:*}" + if [ "$service_name" = "$service" ]; then + subdomain="${service_pair#*:}" + break + fi + done + + # Start with just the root domain + local hostname="${DNS_ROOT_ZONE}" + + # If the subdomain is not empty, then add the subdomain to the host name + if [[ -n "$subdomain" ]]; then + hostname="${subdomain}.${hostname}" + fi + + # If the stage is not production, then add the stage to the host name + if [[ "$stage" != "production" ]]; then + hostname="${stage}.${hostname}" + fi + + echo "$hostname" +} + +# List all hostnames for a given stage +# Usage: list_hostnames +# Example: list_hostnames production +# Output: smols.org app.smols.org +list_hostnames() { + local stage="$1" + + if [ -z "$stage" ]; then + echo -e "${RED}Error: list_hostnames requires stage argument${NC}" >&2 + return 1 + fi + + local service_pairs=$(services) + local hostnames="" + + for service_pair in $service_pairs; do + local service_name="${service_pair%%:*}" + local hostname=$(get_service_hostname "$service_name" "$stage") + hostnames+="$hostname " + done + + # Return space-separated list (trim trailing space) + echo "${hostnames% }" +} + +# List all subdomains for a given stage (for DNS record creation) +# Usage: list_subdomains +# Example: list_subdomains production +# Output: @ app (where @ represents root domain) +list_subdomains() { + local stage="$1" + + if [ -z "$stage" ]; then + echo -e "${RED}Error: list_subdomains requires stage argument${NC}" >&2 + return 1 + fi + + local service_pairs=$(services) + local subdomains="" + + for service_pair in $service_pairs; do + local service_name="${service_pair%%:*}" + local subdomain="${service_pair#*:}" + + # If subdomain is empty, use @ for root + if [ -z "$subdomain" ]; then + subdomain="@" + fi + + # If stage is not production, prepend stage to subdomain + if [[ "$stage" != "production" ]]; then + if [ "$subdomain" = "@" ]; then + subdomain="$stage" + else + subdomain="${stage}.${subdomain}" + fi + fi + + subdomains+="$subdomain " + done + + # Return space-separated list (trim trailing space) + echo "${subdomains% }" +} + +# Export service hostnames as environment variables for a given stage +# Usage: export_service_hostnames +# Example: export_service_hostnames production +# Sets: NEXT_HOST_NAME=smols.org PY_HOST_NAME=app.smols.org +export_service_hostnames() { + local stage="${1}" + # if stage is not set, fail loudly + if [[ -z "$stage" ]]; then + echo -e "${RED}Error: Stage not set" + exit 1 + fi + + # Get all services + local service_pairs=$(services) + + for service_pair in $service_pairs; do + local service_name="${service_pair%%:*}" + local hostname=$(get_service_hostname "$service_name" "$stage") + + # Convert service name to uppercase and replace dashes with underscores + local var_name=$(echo "${service_name}" | tr '[:lower:]' '[:upper:]' | tr '-' '_') + var_name="${var_name}_HOST_NAME" + + # Export the variable + export "${var_name}=${hostname}" + done +} + +source_project_config + +# TODO (amiller68): this does not give a good error message +# Now validate services after config is loaded +if ! services > /dev/null; then + exit 1 +fi + +# Check if script is being sourced or executed directly +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + source $PROJECT_ROOT/bin/utils + # Script is being executed directly - print configuration summary + print_header "Project Configuration" + + echo "" + print_info "Project:" + echo " Name: $PROJECT_NAME" + echo " DNS Root Zone: $DNS_ROOT_ZONE" + echo " Cloud Vault: $CLOUD_VAULT" + + echo "" + print_info "Services:" + for service_pair in $(services); do + service_name="${service_pair%%:*}" + subdomain="${service_pair#*:}" + if [ -z "$subdomain" ]; then + echo " - $service_name (root domain)" + else + echo " - $service_name (subdomain: $subdomain)" + fi + done + + echo "" + print_info "Stages:" + for stage in $(stages); do + echo " - $stage" + done + + echo "" + print_info "Production Hostnames:" + for service_pair in $(services); do + service_name="${service_pair%%:*}" + hostname=$(get_service_hostname "$service_name" "production") + echo " - $service_name: $hostname" + done + + echo "" + echo -e "${GRAY}Note: This script is meant to be sourced by other scripts.${NC}" + echo -e "${GRAY}Available functions: services, stages, get_service_hostname, list_hostnames, list_subdomains, etc.${NC}" +else + # source the project config at the top (but skip service validation to avoid circular dependency) + source_project_config + + # Now validate services after config is loaded + if ! services > /dev/null; then + exit 1 + fi + + export STAGES=$(stages) +fi diff --git a/bin/deploy b/bin/deploy new file mode 100755 index 0000000..4d6ea2f --- /dev/null +++ b/bin/deploy @@ -0,0 +1,176 @@ +#!/usr/bin/env bash +# Script to deploy multiple services in parallel using tmux + +set -o errexit +set -o nounset + +# Points back to the project root +export PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )" +# Source utils and config +source "$PROJECT_ROOT/bin/utils" +source "$PROJECT_ROOT/bin/config" + +# Configuration +SESSION_NAME="krondor-deploy" + +# Parse arguments +SELECTED_SERVICES=() +STAGE="" +KILL_SESSION=false +DEPLOY_COMMAND="deploy" + +# Get available services from config +ALL_SERVICES=($(list_service_names)) + +# Function to show usage +usage() { + local service_names=$(list_service_names) + + echo -e "${YELLOW}Parallel Deployment Tool${NC}" + echo "" + echo "Usage: $0 [options]" + echo "" + echo "Arguments:" + echo " Stage to deploy to (${STAGES})" + echo "" + echo "Options:" + for service in ${ALL_SERVICES[@]}; do + echo " --${service} Deploy only ${service} service" + done + echo " -c, --command Kamal command to run (default: deploy)" + echo " -k, --kill Kill existing deploy session" + echo " -h, --help Show this help message" + echo "" + echo "Examples:" + echo " $0 production Deploy all services to production" + echo " $0 production --py Deploy only py to production" + echo " $0 production --command logs Tail logs for all services" + echo " $0 --kill Kill the deploy tmux session" + exit 0 +} + +while [[ $# -gt 0 ]]; do + case $1 in + --kill|-k) + KILL_SESSION=true + shift + ;; + --command|-c) + DEPLOY_COMMAND="$2" + shift 2 + ;; + --help|-h) + usage + ;; + --*) + # Check if it matches a service name + service_name="${1#--}" + if printf '%s\n' "${ALL_SERVICES[@]}" | grep -q "^${service_name}$"; then + SELECTED_SERVICES+=("$service_name") + shift + else + echo "Unknown option: $1" + echo "Use --help for usage information" + exit 1 + fi + ;; + *) + # First positional argument is the stage + if [ -z "$STAGE" ]; then + STAGE="$1" + shift + else + echo "Unknown argument: $1" + echo "Use --help for usage information" + exit 1 + fi + ;; + esac +done + +# If no services selected, use all +if [ ${#SELECTED_SERVICES[@]} -eq 0 ]; then + SELECTED_SERVICES=("${ALL_SERVICES[@]}") +fi + +# Kill existing session if requested +if [ "$KILL_SESSION" = true ]; then + if tmux has-session -t "$SESSION_NAME" 2>/dev/null; then + print_info "Killing existing tmux session: $SESSION_NAME" + tmux kill-session -t "$SESSION_NAME" + print_success "Session killed" + else + print_warning "No session named '$SESSION_NAME' found" + fi + exit 0 +fi + +# Validate stage is provided +if [ -z "$STAGE" ]; then + print_error "Stage is required" + echo "Use --help for usage information" + exit 1 +fi + +# Validate stage exists +VALID_STAGES=($(stages)) +if ! printf '%s\n' "${VALID_STAGES[@]}" | grep -q "^${STAGE}$"; then + print_error "Invalid stage: $STAGE" + echo "Available stages: ${VALID_STAGES[*]}" + exit 1 +fi + +# Don't allow container-registry stage +if [[ "$STAGE" == "container-registry" ]]; then + print_error "Container registry stage not supported for deployment" + exit 1 +fi + +# Check if tmux is installed +if ! command -v tmux &> /dev/null; then + print_error "tmux is not installed. Install it with: brew install tmux" + exit 1 +fi + +# Kill existing session if it exists +if tmux has-session -t "$SESSION_NAME" 2>/dev/null; then + print_warning "Existing session '$SESSION_NAME' found. Killing it..." + tmux kill-session -t "$SESSION_NAME" +fi + +print_info "Running Kamal '$DEPLOY_COMMAND' on stage '$STAGE' in tmux session: $SESSION_NAME" +print_info "Services: ${SELECTED_SERVICES[*]}" +echo "" + +# Create new session with first service +FIRST_SERVICE="${SELECTED_SERVICES[0]}" +print_info "Creating tmux session with $FIRST_SERVICE..." +tmux new-session -d -s "$SESSION_NAME" -n "$FIRST_SERVICE" -c "$PROJECT_ROOT" +tmux send-keys -t "$SESSION_NAME:0" "bin/kamal $FIRST_SERVICE $STAGE $DEPLOY_COMMAND; exit" C-m + +# Add remaining services as new panes +for i in "${!SELECTED_SERVICES[@]}"; do + if [ $i -eq 0 ]; then + continue # Skip first service (already created) + fi + + SERVICE="${SELECTED_SERVICES[$i]}" + print_info "Adding pane for $SERVICE..." + + # Split the window + tmux split-window -t "$SESSION_NAME" -c "$PROJECT_ROOT" + + # Send the deploy command (exit after completion) + tmux send-keys -t "$SESSION_NAME" "bin/kamal $SERVICE $STAGE $DEPLOY_COMMAND; exit" C-m + + # Tile the panes evenly + tmux select-layout -t "$SESSION_NAME" tiled +done + +echo "" +print_success "Parallel deployment started in tmux session" +print_info "Attach with: tmux attach -t $SESSION_NAME" +print_info "Kill with: bin/deploy --kill" + +# Attach to the session +tmux attach -t "$SESSION_NAME" diff --git a/bin/dev b/bin/dev new file mode 100755 index 0000000..d22dd21 --- /dev/null +++ b/bin/dev @@ -0,0 +1,121 @@ +#!/usr/bin/env bash +# Script to run development servers for multiple projects in tmux + +set -o errexit +set -o nounset + +# Points back to the project root +export PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )" +# Source utils for logging +source "$PROJECT_ROOT/bin/utils" + +# Configuration +SESSION_NAME="krondor-dev" +PROJECTS=("py" "ts") + +# Parse arguments +SELECTED_PROJECTS=() +KILL_SESSION=false + +while [[ $# -gt 0 ]]; do + case $1 in + --kill|-k) + KILL_SESSION=true + shift + ;; + --py) + SELECTED_PROJECTS+=("py") + shift + ;; + --ts) + SELECTED_PROJECTS+=("ts") + shift + ;; + --help|-h) + echo "Usage: $0 [options]" + echo "" + echo "Run development servers for projects in tmux panes" + echo "" + echo "Options:" + echo " --py Run only Python dev server" + echo " --ts Run only TypeScript dev server" + echo " -k, --kill Kill existing dev session" + echo " -h, --help Show this help message" + echo "" + echo "Examples:" + echo " $0 Start all dev servers in tmux" + echo " $0 --py Start only Python dev server" + echo " $0 --py --ts Start both servers (same as no args)" + echo " $0 --kill Kill the dev tmux session" + exit 0 + ;; + *) + echo "Unknown option: $1" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + +# If no projects selected, use all +if [ ${#SELECTED_PROJECTS[@]} -eq 0 ]; then + SELECTED_PROJECTS=("${PROJECTS[@]}") +fi + +# Kill existing session if requested +if [ "$KILL_SESSION" = true ]; then + if tmux has-session -t "$SESSION_NAME" 2>/dev/null; then + print_info "Killing existing tmux session: $SESSION_NAME" + tmux kill-session -t "$SESSION_NAME" + print_success "Session killed" + else + print_warning "No session named '$SESSION_NAME' found" + fi + exit 0 +fi + +# Check if tmux is installed +if ! command -v tmux &> /dev/null; then + print_error "tmux is not installed. Install it with: brew install tmux" + exit 1 +fi + +# Kill existing session if it exists +if tmux has-session -t "$SESSION_NAME" 2>/dev/null; then + print_warning "Existing session '$SESSION_NAME' found. Killing it..." + tmux kill-session -t "$SESSION_NAME" +fi + +print_info "Starting development servers in tmux session: $SESSION_NAME" + +# Create new session with first project +FIRST_PROJECT="${SELECTED_PROJECTS[0]}" +print_info "Creating tmux session with $FIRST_PROJECT..." +tmux new-session -d -s "$SESSION_NAME" -n "$FIRST_PROJECT" -c "$PROJECT_ROOT/$FIRST_PROJECT" +tmux send-keys -t "$SESSION_NAME:0" "make dev; exit" C-m + +# Add remaining projects as new panes +for i in "${!SELECTED_PROJECTS[@]}"; do + if [ $i -eq 0 ]; then + continue # Skip first project (already created) + fi + + PROJECT="${SELECTED_PROJECTS[$i]}" + print_info "Adding pane for $PROJECT..." + + # Split the window + tmux split-window -t "$SESSION_NAME" -c "$PROJECT_ROOT/$PROJECT" + + # Send the dev command (exit after completion) + tmux send-keys -t "$SESSION_NAME" "make dev; exit" C-m + + # Tile the panes evenly + tmux select-layout -t "$SESSION_NAME" tiled +done + +print_success "Dev servers started in tmux session" +print_info "Attach with: tmux attach -t $SESSION_NAME" +print_info "Kill with: $0 --kill" + +# Attach to the session +tmux attach -t "$SESSION_NAME" diff --git a/bin/iac b/bin/iac new file mode 100755 index 0000000..dd3c798 --- /dev/null +++ b/bin/iac @@ -0,0 +1,77 @@ +#!/bin/bash + +# iac - Infrastructure as Code Management Script +# Runs terraform commands with 1Password vault secret injection + +set -e + +# Get the directory where this script is located +PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/.." && pwd )" +IAC_ROOT="$PROJECT_ROOT/iac" + +source "$PROJECT_ROOT/bin/utils" +source "$PROJECT_ROOT/bin/config" +source "$PROJECT_ROOT/bin/vault" + +# Function to show usage +usage() { + echo -e "${YELLOW}Infrastructure as Code Management${NC}" + echo "" + echo "Usage: $0 [args]" + echo "" + echo "Where is one of the following:" + echo " ${STAGES}" + echo "" + echo "And is any valid Terraform command" + exit 1 +} + +# Check arguments +if [[ $# -lt 2 ]]; then + usage +fi + +STAGE="$1" + +# Validate and map stage +if ! is_in_list "$STAGES" "$STAGE"; then + print_error "Unknown stage: $STAGE" + echo "Available stages: $STAGES" + exit 1 +fi + +shift + +STAGE_DIR="$IAC_ROOT/stages/$STAGE" + +if [ ! -d "$STAGE_DIR" ]; then + print_error "Stage directory not found: $STAGE_DIR. This should not happen." + echo "Searched in: $IAC_ROOT/stages/$STAGE" + exit 1 +fi + +# TODO (amiller68): it would be really nice if we could +# just inherit the environment from the project config and +# relevant vault params without having to manually export them +# here. +# That is a project for another day. +# For now, if terraform needs a variable, you must export it here +# as a proper TF_VAR_ variable. +# Export common variables for all stages +# For container-registry stage, export services we want to create repositories for +export TF_VAR_services="${SERVICES}" # determind by the services in the config/deploy directory +# For common stage, for cloudflare to know the root zone in order to create DNS records +# for all services +export TF_VAR_project_name="${PROJECT_NAME}" # specified in .env.project +export TF_VAR_docker_hub_username=$(read_from_vault DOCKER_HUB_USERNAME) +export TF_VAR_docker_hub_password=$(read_from_vault DOCKER_HUB_PASSWORD) +# NOTE (amiller68): you should only really turn this on if you have a pro +# account to enable multiple private repositories +export TF_VAR_use_private_repos=${USE_PRIVATE_REPOS:-false} +export TF_VAR_dns_root_zone="${DNS_ROOT_ZONE}" +# Export subdomains for Cloudflare DNS module +# Convert space-separated list to comma-separated for Terraform +SUBDOMAINS=$(list_subdomains "$STAGE" | tr ' ' ',') +export TF_VAR_subdomains="${SUBDOMAINS}" + +run_with_vault -- sh -c "export TF_TOKEN_app_terraform_io=\$TF_TOKEN && cd '$STAGE_DIR' && terraform $*" diff --git a/bin/kamal b/bin/kamal index 1fae3c4..c41d16f 100755 --- a/bin/kamal +++ b/bin/kamal @@ -1,107 +1,106 @@ -#!/usr/bin/env bash +#!/bin/bash + +# kamal - Simplified deployment script using Kamal with automatic configuration +# Automatically loads secrets from 1Password and infrastructure outputs from Terraform + set -euo pipefail -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -REPO_ROOT="$(dirname "$SCRIPT_DIR")" +PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +source "$PROJECT_ROOT/bin/utils" +source "$PROJECT_ROOT/bin/config" +source "$PROJECT_ROOT/bin/vault" # Function to show usage usage() { - echo "Run Kamal commands with Terraform-sourced credentials" - echo "" - echo "Usage: $0 [args]" - echo "" - echo "Stage: production" - echo "Services: py, ts-api, ts-web, static" - echo "" - echo "Examples:" - echo " $0 production ts-web deploy" - echo " $0 production ts-web logs" - echo " $0 production ts-web rollback" - exit 1 + local service_names=$(list_service_names) + + echo -e "${YELLOW}Kamal Deployment Tool${NC}" + echo "" + echo "Usage: $0 [options]" + echo "" + echo " Where is one of the following:" + echo " ${service_names}" + echo " And is one of the following:" + echo " ${STAGES}" + echo " And is any valid Kamal command" + echo "" + echo "This will run the corresponding Kamal command for the service" + echo " defined in config/deploy/.yml for the stage" + echo "Examples:" + echo " $0 next production deploy # Deploy Next.js app" + echo " $0 py production logs # View Python API logs" + echo "" + echo "Environment:" + echo " Stage: ${STAGE:-production} (set with STAGE env var)" + exit 1 } # Check arguments if [[ $# -lt 3 ]]; then - usage + usage fi -STAGE="$1" -SERVICE="$2" +SERVICE="$1" +STAGE="$2" shift 2 -# Validate stage -case "$STAGE" in - production) - ;; - *) - echo "Error: Unknown stage: $STAGE" - echo "Available stages: production" +# Validate and map service +if ! is_in_list "$SERVICES" "$SERVICE"; then + print_error "Unknown service: $SERVICE" + echo "Available services: $SERVICES" exit 1 - ;; -esac +fi -# Validate service -case "$SERVICE" in - py|ts-web|static) - ;; - *) - echo "Error: Unknown service: $SERVICE" - echo "Available services: py, ts-web, static" +# Validate and map stage +if ! is_in_list "$STAGES" "$STAGE"; then + print_error "Unknown stage: $STAGE" + echo "Available stages: $STAGES" exit 1 - ;; -esac - -CONFIG_FILE="$REPO_ROOT/config/deploy/${SERVICE}.yml" +fi +# if the stage is for the container-registry, then +# throw an error +if [[ "$STAGE" == "container-registry" ]]; then + print_error "Container registry stage not supported for Kamal" + exit 1 +fi +CONFIG_FILE="$PROJECT_ROOT/config/deploy/${SERVICE}.yml" # Check if config exists if [[ ! -f "$CONFIG_FILE" ]]; then - echo "Error: Config file not found: $CONFIG_FILE" - exit 1 + print_error "Config file not found: $CONFIG_FILE. This should not happen." + exit 1 fi -# Get infrastructure outputs from Terraform -DROPLET_IP=$(cd "$REPO_ROOT" && ./bin/tf "$STAGE" output -raw digitalocean_droplet_ip) -SSH_PRIVATE_KEY=$(cd "$REPO_ROOT" && ./bin/tf "$STAGE" output digitalocean_ssh_private_key | sed -n '3,13p') +# NOTE (amiller68): these are the default +# values we support for each stage, and should +# be supported without additional configuration +# in all of our kamal deployments. +# They should be re-usable for all services across +# a given stage. +export SERVER_IP=$("$PROJECT_ROOT/bin/iac" "$STAGE" output -raw server_ip 2>/dev/null | tail -n 1 || echo "") +export SSH_PRIVATE_KEY=$("$PROJECT_ROOT/bin/iac" "$STAGE" output -raw ssh_private_key 2>/dev/null | tail -n -13 || echo "") +export DOCKER_HUB_USERNAME=$("$PROJECT_ROOT/bin/vault" read DOCKER_HUB_USERNAME) +export PROJECT_NAME=$PROJECT_NAME +# write the ssh private key to a temp file with proper permissions +SSH_PRIVATE_KEY_FILE=$(mktemp) +echo "$SSH_PRIVATE_KEY" > "$SSH_PRIVATE_KEY_FILE" +chmod 600 "$SSH_PRIVATE_KEY_FILE" +export SSH_PRIVATE_KEY_FILE +# make it so that the temp file is *ALWAYS* removed even if the script exits abnormally +trap 'print_info "Cleaning up SSH private key file" && rm "$SSH_PRIVATE_KEY_FILE"' EXIT -# Get Docker registry credentials from container-registry environment -REGISTRY_ENDPOINT="" -REGISTRY_NAME="" -DOCKERHUB_USERNAME="" -if [[ -d "$REPO_ROOT/iac/envs/container-registry" ]]; then - REGISTRY_INFO=$(cd "$REPO_ROOT" && ./bin/tf container-registry output -json registry 2>/dev/null || echo '{}') - REGISTRY_ENDPOINT=$(echo "$REGISTRY_INFO" | jq -r '.endpoint' 2>/dev/null || echo '') - REGISTRY_NAME=$(echo "$REGISTRY_INFO" | jq -r '.name' 2>/dev/null || echo '') - DOCKER_CREDS=$(cd "$REPO_ROOT" && ./bin/tf container-registry output -raw registry_credentials 2>/dev/null || echo '') - - if [[ -n "$DOCKER_CREDS" ]]; then - # Decode the base64 auth to get username and password - AUTH_STRING=$(echo "$DOCKER_CREDS" | jq -r '.auths."registry.digitalocean.com".auth' 2>/dev/null || echo "") - if [[ -n "$AUTH_STRING" ]]; then - DECODED_AUTH=$(echo "$AUTH_STRING" | base64 -d) - DOCKERHUB_USERNAME=$(echo "$DECODED_AUTH" | cut -d: -f1) - DOCKERHUB_TOKEN=$(echo "$DECODED_AUTH" | cut -d: -f2) - fi - fi -fi - -# TODO: make this more generic -# If the service is py, export the Google client ID and secret -# Source them from hashicorp vault using the generic-py-production vault app -if [[ "$SERVICE" == "py" ]]; then - export VAULT_APP="generic-py-$STAGE" - source "$REPO_ROOT/bin/vault" - export PY_GOOGLE_CLIENT_ID=$(read_from_vault GOOGLE_CLIENT_ID) - export PY_GOOGLE_CLIENT_SECRET=$(read_from_vault GOOGLE_CLIENT_SECRET) -fi +# Determine the hostname for the service given the service name and stage +HOST_NAME=$(get_service_hostname "$SERVICE" "$STAGE") +export HOST_NAME +# also export hostnames for other services in the same stage +export_service_hostnames "$STAGE" -# Export for kamal config -export DROPLET_IP -export REGISTRY_ENDPOINT -export REGISTRY_NAME -export DOCKERHUB_USERNAME -export DOCKERHUB_TOKEN -export SSH_PRIVATE_KEY +print_info "Running Kamal for $SERVICE on $STAGE" +echo "-> Using config file: $CONFIG_FILE" +echo "-> Target hostname: $HOST_NAME" +echo "-> Host: $SERVER_IP" -# Run kamal -cd "$REPO_ROOT" && kamal "$@" -c "$CONFIG_FILE" \ No newline at end of file +# run kamal, injecting our vault - note that we don't quote the kamal args +# so they get properly expanded in the run_with_vault context +run_with_vault --stage "$STAGE" -- kamal "$@" -c "$CONFIG_FILE" diff --git a/bin/ssh b/bin/ssh new file mode 100755 index 0000000..f18ff30 --- /dev/null +++ b/bin/ssh @@ -0,0 +1,87 @@ +#!/bin/bash + +# ssh - SSH into infrastructure servers +# Automatically loads SSH keys from Terraform outputs + +set -euo pipefail + +PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" + +source "$PROJECT_ROOT/bin/utils" +source "$PROJECT_ROOT/bin/config" +source "$PROJECT_ROOT/bin/vault" + +# Function to show usage +usage() { + echo -e "${YELLOW}SSH Connection Tool${NC}" + echo "" + echo "Usage: $0 [user]" + echo "" + echo " Where is one of the following:" + echo " ${STAGES}" + echo " And [user] is optional (defaults to 'root')" + echo "" + echo "This will SSH into the server for the given stage" + echo " using the SSH key from Terraform outputs" + echo "" + echo "Examples:" + echo " $0 production # SSH as root to production server" + echo " $0 production ubuntu # SSH as ubuntu to production server" + echo "" + echo "Environment:" + echo " Stage: ${STAGE:-production} (set with STAGE env var)" + exit 1 +} + +# Check arguments +if [[ $# -lt 1 ]]; then + usage +fi + +STAGE="$1" +USER="${2:-root}" + +# Validate stage +if ! is_in_list "$STAGES" "$STAGE"; then + print_error "Unknown stage: $STAGE" + echo "Available stages: $STAGES" + exit 1 +fi + +# Container registry stage not supported +if [[ "$STAGE" == "container-registry" ]]; then + print_error "Container registry stage does not have servers to SSH into" + exit 1 +fi + +# Get server IP and SSH key from Terraform +print_info "Fetching server information for $STAGE..." +SERVER_IP=$("$PROJECT_ROOT/bin/iac" "$STAGE" output -raw server_ip 2>/dev/null | tail -n 1 || echo "") +SSH_PRIVATE_KEY=$("$PROJECT_ROOT/bin/iac" "$STAGE" output -raw ssh_private_key 2>/dev/null | tail -n -13 || echo "") + +if [[ -z "$SERVER_IP" ]]; then + print_error "Could not determine server IP for stage: $STAGE" + exit 1 +fi + +if [[ -z "$SSH_PRIVATE_KEY" ]]; then + print_error "Could not retrieve SSH private key for stage: $STAGE" + exit 1 +fi + +# Write SSH private key to temp file +SSH_PRIVATE_KEY_FILE=$(mktemp) +echo "$SSH_PRIVATE_KEY" > "$SSH_PRIVATE_KEY_FILE" +chmod 600 "$SSH_PRIVATE_KEY_FILE" + +# Always clean up the temp file +trap 'print_info "Cleaning up SSH private key file" && rm -f "$SSH_PRIVATE_KEY_FILE"' EXIT + +print_info "Connecting to $USER@$SERVER_IP ($STAGE)" + +# SSH into the server +ssh -i "$SSH_PRIVATE_KEY_FILE" \ + -o StrictHostKeyChecking=no \ + -o UserKnownHostsFile=/dev/null \ + -o LogLevel=ERROR \ + "$USER@$SERVER_IP" diff --git a/bin/tf b/bin/tf deleted file mode 100755 index 28b2131..0000000 --- a/bin/tf +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash - -set -e - -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -DEFAULT_TF_VAULT_APP=generic-tf -TF_VAULT_APP=${TF_VAULT_APP:-$DEFAULT_TF_VAULT_APP} -REPO_ROOT="$(dirname "$SCRIPT_DIR")" -IAC_ROOT="$REPO_ROOT/iac" - -# Function to show usage -usage() { - echo "Run terraform commands with HCP Vault secret injection" - echo "" - echo "Usage: $0 [args]" - echo "" - echo "Environment variables:" - echo " TF_VAULT_APP - HCP Vault app to use (default: $DEFAULT_TF_VAULT_APP)" - echo "" - echo "Examples:" - echo " $0 production plan" - echo " TF_VAULT_APP=my-tf-app $0 registry apply" - echo " $0 production destroy" - exit 1 -} - -# Check arguments -if [[ $# -lt 2 ]]; then - usage -fi - -ENV_NAME="$1" -shift - -# Set environment directory -ENV_DIR="$IAC_ROOT/envs/$ENV_NAME" - -# Check if environment directory exists -if [[ ! -d "$ENV_DIR" ]]; then - echo "Error: Environment directory not found: $ENV_DIR" - usage -fi - -# Run terraform using the vault script -source "$REPO_ROOT/bin/vault" -VAULT_APP="$TF_VAULT_APP" -# TODO: remove lock=false -run_with_vault sh -c "echo $DIGITALOCEAN_TOKEN && cd '$ENV_DIR' && terraform $*" \ No newline at end of file diff --git a/bin/tfc b/bin/tfc new file mode 100755 index 0000000..fb61cfd --- /dev/null +++ b/bin/tfc @@ -0,0 +1,265 @@ +#!/bin/bash + +# tfc - Terraform Cloud Management Script +# Manages Terraform Cloud organizations and workspaces for multi-stage deployments + +set -e + +# Get the directory where this script is located +export PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/.." && pwd )" + +source "$PROJECT_ROOT/bin/utils" +source "$PROJECT_ROOT/bin/config" +source "$PROJECT_ROOT/bin/vault" + +# helper to get the right workspace name +# for a given stage +get_workspace_name() { + local stage="$1" + echo "${PROJECT_NAME}-${stage}" +} + +# helper to read out tf cloud token from +# our cloud provider vault +load_tf_token() { + local tf_token="" + tf_token=$(read_from_vault TF_TOKEN) + if [ -z "$tf_token" ]; then + print_error "Terraform Cloud token not found" + print_info "Add TERRAFORM_CLOUD_API_TOKEN to your ${CLOUD_VAULT} vault" + return 1 + fi + echo "$tf_token" +} + +if ! command -v terraform &> /dev/null; then + print_error "Terraform CLI is not installed. Please install it first." + echo "Visit: https://developer.hashicorp.com/terraform/downloads" + return 1 +fi + +# Export Terraform configuration +export TF_ORGANIZATION="${PROJECT_NAME}-org" +# Build a list of workspaces to create +WORKSPACES=() +for stage in $STAGES; do + WORKSPACES+=("$(get_workspace_name "$stage")") +done +export WORKSPACES + +# Function to create Terraform Cloud organization +function create_org() { + print_header "Creating Terraform Cloud Organization" + + local tf_token=$(load_tf_token) + + print_info "Creating organization '$TF_ORGANIZATION'..." + + local response=$(curl -s -w "\n%{http_code}" \ + -H "Authorization: Bearer $tf_token" \ + -H "Content-Type: application/vnd.api+json" \ + -X POST \ + -d "{ + \"data\": { + \"type\": \"organizations\", + \"attributes\": { + \"name\": \"$TF_ORGANIZATION\", + \"email\": \"admin@$TF_ORGANIZATION.local\" + } + } + }" \ + "https://app.terraform.io/api/v2/organizations") + + local http_code=$(echo "$response" | tail -n1) + local response_body=$(echo "$response" | sed '$d') + + if [ "$http_code" = "201" ]; then + print_success "Organization '$TF_ORGANIZATION' is ready" + return 0 + else + print_error "Failed to create organization (HTTP $http_code)" + echo "Response: $response_body" + return 1 + fi +} + +# Internal function to create workspace (not exposed as command) +function create_workspace() { + local stage="$1" + workspace_name=$(get_workspace_name "$stage") + + print_header "Creating Terraform Workspace: $workspace_name" + + local tf_token=$(load_tf_token) + + print_info "Creating workspace '$workspace_name'..." + + local response=$(curl -s -w "\n%{http_code}" \ + -H "Authorization: Bearer $tf_token" \ + -H "Content-Type: application/vnd.api+json" \ + -X POST \ + -d "{ + \"data\": { + \"type\": \"workspaces\", + \"attributes\": { + \"name\": \"$workspace_name\", + \"execution-mode\": \"local\", + \"auto-apply\": false, + \"description\": \"Workspace for $stage environment\", + \"global-remote-state\": true + } + } + }" \ + "https://app.terraform.io/api/v2/organizations/$TF_ORGANIZATION/workspaces") + + local http_code=$(echo "$response" | tail -n1) + + if [ "$http_code" = "201" ] || [ "$http_code" = "422" ]; then + print_success "Workspace '$workspace_name' is ready" + # Update backend config in iac/stages + update_backend_config "$stage" + return 0 + else + print_error "Failed to create workspace (HTTP $http_code)" + return 1 + fi +} + +# Function to update backend configuration in iac/stages +function update_backend_config() { + local stage="$1" + local workspace_name=$(get_workspace_name "$stage") + + # Check if stage directory exists + local stage_dir="$PROJECT_ROOT/iac/stages/$stage" + + # if the stage dir does not exist, log a warning and return + # since we should never have checked for it then. + # We should only have checked for it if we have a stage. + if [ ! -d "$stage_dir" ]; then + print_warning "Stage directory '$stage_dir' not found, skipping backend config" + return 0 + fi + + # if the terraform.tf file does not exist, create it + # using the safe defaults + if [ ! -f "$stage_dir/terraform.tf" ]; then + print_info "Terraform.tf file not found, creating it for $stage" + mkdir -p "$stage_dir" + cat > "$stage_dir/terraform.tf" << EOF +terraform { + cloud { + organization = "$TF_ORGANIZATION" + workspaces { + name = "$workspace_name" + } + } +} +EOF + print_success "Backend configuration created at $stage_dir/terraform.tf" + return 0 + fi +} + +# Main init function +function up() { + print_header "Applying Terraform Cloud configuration:" + print_info " -> Project name: $PROJECT_NAME" + print_info " -> Organization: $TF_ORGANIZATION" + print_info " -> Target workspaces: ${WORKSPACES[*]}" + echo "" + + # Check authentication + local tf_token=$(load_tf_token) + + # Create Terraform Cloud organization + create_org || return 1 + + # Create workspaces for non-dev stages + echo $STAGES + for stage in $STAGES; do + create_workspace "$stage" || return 1 + done + + print_summary "Initialization completed successfully!" "initialization failed" + + if [ $ERRORS -eq 0 ]; then + echo "" + print_header "Next Steps" + echo "1. Set your project to use CLI execution mode" + echo " See docs for more info" + echo "2. Initialize Terraform for your stage:" + echo " make iac init" + fi +} + +# Function to show status +function status() { + print_header "Terraform Cloud Status" + + # Check Terraform Cloud + local tf_token=$(load_tf_token) + + # Check organization + local org_check=$(curl -s -o /dev/null -w "%{http_code}" \ + -H "Authorization: Bearer $tf_token" \ + "https://app.terraform.io/api/v2/organizations/$TF_ORGANIZATION") + + if [ "$org_check" = "200" ]; then + print_success "Organization '$TF_ORGANIZATION' exists" + + # List workspaces + echo "" + print_header "Checking workspaces" + for workspace_name in "${WORKSPACES[@]}"; do + local ws_check=$(curl -s -o /dev/null -w "%{http_code}" \ + -H "Authorization: Bearer $tf_token" \ + "https://app.terraform.io/api/v2/organizations/$TF_ORGANIZATION/workspaces/$workspace_name") + + if [ "$ws_check" = "200" ]; then + print_success " $workspace_name exists" + else + print_warning " $workspace_name not found" + fi + done + else + print_warning "Organization '$TF_ORGANIZATION' not found" + fi +} + +# Function to show help +function help() { + echo -e "${YELLOW}Terraform Cloud Management${NC}" + echo "" + echo "Usage: $0 [command]" + echo "" + echo "Commands:" + echo " up - Ensure Terraform Cloud workspaces are up set up for the declared stages" + echo " status - Show current status" + echo " help - Show this help message" + echo "" + echo "Configuration:" + echo " Workspaces are created based on the stages in the iac/stages directory" + echo " declared stages: ${STAGES[*]}" +} + +# Process command +CMD=${1:-help} +shift || true + +case "$CMD" in + up) + up + ;; + status) + status + ;; + help) + help + ;; + *) + echo -e "${RED}Unknown command: $CMD${NC}" + help + exit 1 + ;; +esac diff --git a/py/bin/utils.sh b/bin/utils similarity index 55% rename from py/bin/utils.sh rename to bin/utils index 3743a81..41125c2 100755 --- a/py/bin/utils.sh +++ b/bin/utils @@ -1,19 +1,25 @@ #!/usr/bin/env bash +# error if project root is not set +if [ -z "$PROJECT_ROOT" ]; then + echo -e "${RED}Error: PROJECT_ROOT not set.${NC}" + exit 1 +fi + # Shared utilities for bin scripts # Colors for output export RED='\033[0;31m' export GREEN='\033[0;32m' export YELLOW='\033[1;33m' +export BLUE='\033[0;34m' +export CYAN='\033[0;36m' +export GRAY='\033[0;90m' export NC='\033[0m' # No Color # Error counter export ERRORS=0 -# Get the project root (parent of bin directory) -export PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" - # Function to print section headers print_header() { echo -e "\n${YELLOW}=== $1 ===${NC}" @@ -33,7 +39,7 @@ check_result() { print_summary() { local success_msg="$1" local failure_msg="${2:-check(s) failed}" - + print_header "Summary" if [ $ERRORS -eq 0 ]; then echo -e "${GREEN}${success_msg}${NC}" @@ -44,7 +50,32 @@ print_summary() { fi } -# Export functions for use in other scripts -export -f print_header -export -f check_result -export -f print_summary \ No newline at end of file +print_error() { + echo -e "${RED}✗ $1${NC}" +} + +print_warning() { + echo -e "${YELLOW}⚠ $1${NC}" +} + +print_success() { + echo -e "${GREEN}✓ $1${NC}" +} + +print_info() { + echo -e "${BLUE}ℹ $1${NC}" +} + +is_in_list() { + local list="$1" + local item="$2" + echo "$list" | grep -q "$item" +} + +# export -f print_header +# export -f check_result +# export -f print_summary +# export -f print_error +# export -f print_warning +# export -f print_success +# export -f print_info diff --git a/bin/vault b/bin/vault index c3f6ece..799061f 100755 --- a/bin/vault +++ b/bin/vault @@ -1,24 +1,209 @@ #!/bin/bash -# Get the directory where this script is located -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -PROJECT_ROOT="$( cd "$SCRIPT_DIR/.." && pwd )" +PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/.." && pwd )" -# Source the HCP env file if it exists -if [ -f "$PROJECT_ROOT/.env.hcp" ]; then - source "$PROJECT_ROOT/.env.hcp" -fi +source "$PROJECT_ROOT/bin/utils" +source "$PROJECT_ROOT/bin/config" -# Source the vault app env file if it exists (for py specific configs) -if [ -f "$PROJECT_ROOT/py/.env.vault" ]; then - source "$PROJECT_ROOT/py/.env.vault" -fi +# our default is just the development vault +DEFAULT_STAGE="development" + +# Validate that the vaults we expect exist in 1Password +# - We should have - for every deployment described in ./iac/stages +# - We should have a -development vault for storing the development environment +# - We should have out vault for storing the cloud provider environment +validate_vaults() { + local all_stages=$STAGES + local all_valid=true + + # TODO (amiller68): this is kinda messy, but it works for now + # validate the deployed stages -- make an exception for container-registry + # though + for stage in $all_stages; do + if [[ "$stage" == "container-registry" ]]; then + continue + fi + local vault_name="${PROJECT_NAME}-${stage}" + if ! op vault list --format=json 2>/dev/null | jq -e ".[] | select(.name == +\"$vault_name\")" > /dev/null 2>&1; then + echo "Error: Vault '$vault_name' not found in 1Password" >&2 + all_valid=false + fi + done + + # validate the development stage + local development_vault="${PROJECT_NAME}-development" + if ! op vault list --format=json 2>/dev/null | jq -e ".[] | select(.name == +\"$development_vault\")" > /dev/null 2>&1; then + echo "Error: Vault '$development_vault' not found in 1Password" >&2 + all_valid=false + fi + + # Validate cloud provider vault + local cloud_provider_vault="${CLOUD_VAULT}" + if ! op vault list --format=json 2>/dev/null | jq -e ".[] | select(.name == +\"$cloud_provider_vault\")" > /dev/null 2>&1; then + echo "Error: Vault '$cloud_provider_vault' not found in 1Password" >&2 + all_valid=false + fi + + [ "$all_valid" = true ] +} +validate_vaults + +# validate that we can access one password, +# by first checking if we are in a ci environment, +# checking for a valid service account env if so +# and then trying to access a op +validate_op_auth() { + op account get > /dev/null 2>&1 +} + +# validate_vaults +validate_op_auth + +# Function to run commands with vault run_with_vault() { - hcp vault-secrets run --project=${VAULT_PROJECT_ID} --app=${VAULT_APP} -- "$@" + local _STAGE_ARG="" + local RUN_ARGS=() + + # Parse arguments + while [[ $# -gt 0 ]]; do + case $1 in + --stage) + _STAGE_ARG=$2 + shift 2 + ;; + --) + shift + RUN_ARGS=("$@") + break + ;; + *) + RUN_ARGS=("$@") + break + ;; + esac + done + + # TODO (amiller68): i am not the biggest fan of this, + # but it's fine for now. We should have a way to *just* load + # cloud credentials + STAGE="${_STAGE_ARG:-$DEFAULT_STAGE}" + + # Validate command + if [ ${#RUN_ARGS[@]} -eq 0 ]; then + echo "Error: No command specified to run" + return 1 + fi + + # Export environment variables and run + export STAGE=$STAGE + export PROJECT_NAME=$PROJECT_NAME + export CLOUD_VAULT=$CLOUD_VAULT + + op run --env-file="$PROJECT_ROOT/.env.vault" --no-masking -- "${RUN_ARGS[@]}" } +# Function to read a single vault value read_from_vault() { - ret=$(hcp vault-secrets secrets open --project=${VAULT_PROJECT_ID} --app=${VAULT_APP} -- "$@") - echo "$ret" | grep "Value:" | awk '{print $2}' -} \ No newline at end of file + local KEY="$1" + local STAGE_ARG="" + + # Parse stage if provided + if [[ "$1" == "--stage" ]]; then + STAGE_ARG="$2" + KEY="$3" + fi + + # Set stage + local CURRENT_STAGE=${STAGE_ARG} + + if [ -z "$KEY" ]; then + echo "Error: Please specify a key to read from vault" >&2 + echo "Usage: $0 read [--stage ] " >&2 + echo "Available keys in .env.vault:" >&2 + grep "^[A-Z_]*=" "$PROJECT_ROOT/.env.vault" | cut -d'=' -f1 | sed 's/^/ - /' >&2 + return 1 + fi + + # Export stage and project name for variable substitution + export STAGE=$CURRENT_STAGE + export PROJECT_NAME=$PROJECT_NAME + export VAULT_SLUG="${PROJECT_NAME}-${STAGE}" + + # Look for the key in .env.vault + local VALUE=$(grep "^${KEY}=" "$PROJECT_ROOT/.env.vault" | cut -d'=' -f2-) + + if [ -z "$VALUE" ]; then + echo "Error: Key '$KEY' not found in .env.vault" >&2 + return 1 + fi + + # check if the value has a STAGE variable in it, if so we should + # raise an error if we were not given a stage + # e.g. + # GOOGLE_O_AUTH_CLIENT_ID=op://${VAULT_SLUG}/GOOGLE_O_AUTH_CLIENT/username + # Should not be read without a stage, and you can't just check the vault slug since + # it will match whatever we just exported ^^. + if [[ "$VALUE" == *"${VAULT_SLUG}"* ]]; then + echo "Error: Key '$KEY' contains a STAGE variable, but no stage was provided" >&2 + return 1 + fi + + # Substitute variables in the value + VALUE=$(eval echo "$VALUE") + + # If it's a 1Password reference, resolve it + if [[ "$VALUE" == op://* ]]; then + op read "$VALUE" 2>/dev/null || { + echo "Error: Failed to read '$VALUE' from 1Password" >&2 + return 1 + } + else + # Return the value as-is + echo "$VALUE" + fi +} + +help() { + echo "Vault - provides nice helpers for reading env" + echo " from one password in line with the setup we expect." + echo "" + echo "Usage:" + echo " $0 read [--stage ] - Read a specific value from vault" + echo " $0 write [--stage ] - Write a specific value to vault" + echo "" + echo "Examples:" + echo " $0 read HASHICORP_API_TOKEN" + echo " $0 read --stage production GOOGLE_O_AUTH_CLIENT_ID" + echo " $0 --stage development -- python app.py" +} + +# Check if script is being sourced or executed directly +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + # Script is being executed directly + CMD="${1:-help}" + + case "$CMD" in + read) + shift + read_from_vault "$@" + ;; + run) + shift + run_with_vault "$@" + ;; + help|--help) + help + ;; + *) + help + ;; + esac +else + # Script is being sourced - just define the functions + export -f run_with_vault + export -f read_from_vault +fi diff --git a/branding/README.md b/branding/README.md new file mode 100644 index 0000000..74cacd0 --- /dev/null +++ b/branding/README.md @@ -0,0 +1,323 @@ +# Krondor Platform Branding System + +A unified design system and branding solution that shares visual identity across Python and TypeScript applications. + +## Overview + +This branding system provides: +- **Single source of truth** for design tokens (colors, typography, spacing) +- **Shared Tailwind CSS configuration** as presets +- **CSS variables** for runtime theming (light/dark mode) +- **Animation utilities** for consistent motion design +- **Shared assets** (icons, favicons, logos) + +## Architecture + +``` +branding/ +├── core/ +│ └── tokens.json # Design tokens (source of truth) +├── tailwind/ +│ └── preset.js # Shared Tailwind preset +├── styles/ +│ ├── variables.css # CSS custom properties +│ ├── animations.css # Shared animations +│ └── utilities.css # Utility classes +├── assets/ # Shared assets (icons, logos) +├── package.json # npm package configuration +└── index.js # Main entry point +``` + +## How It Works + +### 1. Design Tokens (`core/tokens.json`) + +Central repository of all design values: +- **Colors**: Base palette and semantic color mappings +- **Typography**: Font families and size scales +- **Spacing**: Consistent spacing units +- **Animations**: Duration and timing functions +- **Border Radius**: Corner radius values + +### 2. Tailwind Preset (`tailwind/preset.js`) + +Exports a Tailwind configuration preset that: +- Maps design tokens to Tailwind utilities +- Defines color variables using CSS custom properties +- Includes keyframe animations +- Provides base theme configuration + +### 3. CSS Files + +**`variables.css`**: Defines CSS custom properties for runtime theming +- Light mode (default): Black and white theme +- Dark mode: Inverted color scheme +- Semantic color mappings (primary, secondary, accent, etc.) + +**`animations.css`**: Reusable animation keyframes and utilities +- Basic: fade-in, slide-up, spin, pulse +- Advanced: blob, float, drift, orbit +- Animation delay utilities + +**`utilities.css`**: Common utility and component classes +- Text utilities (gradients, balance) +- Backdrop effects +- Pre-styled components (cards, buttons, inputs) + +### 4. Shared Assets (`assets/`) + +Common branding assets used across all platforms: +- `favicon.ico`, `favicon.png` - Browser favicons +- `icon.svg`, `icon.png` - Application icons + +**Integration**: +- **Next.js**: Symlinked into `ts/apps/next/public/` (committed to git) +- **Python**: Copied to `py/static/` via `make styles` script + +## Setup + +### Install Dependencies + +Install branding package dependencies: +```bash +make branding-setup +# Or directly: +cd branding && npm install +``` + +### Project Integration + +Both Python and TypeScript projects already have branding integration configured. + +## Usage + +### Python Application + +The Python app imports the branding preset and styles: + +**`py/tailwind.config.js`**: +```javascript +const brandingPreset = require('../branding/tailwind/preset.js'); + +module.exports = { + content: ["./templates/**/*.html", "./src/**/*.py"], + presets: [brandingPreset], + theme: { + extend: { + // Python-specific extensions + } + } +} +``` + +**`py/styles/main.css`**: +```css +@tailwind base; +@tailwind components; +@tailwind utilities; + +/* Imports shared branding styles */ +@import '../../branding/styles/variables.css'; +@import '../../branding/styles/animations.css'; +@import '../../branding/styles/utilities.css'; +``` + +**Building styles**: +```bash +cd py +make styles # Build CSS and copy branding assets +make styles-watch # Watch mode +``` + +### TypeScript/Next.js Application + +The TypeScript app imports the branding preset directly: + +**`ts/apps/next/tailwind.config.ts`**: +```typescript +const brandingPreset = require('../../../branding/tailwind/preset.js'); + +const config: Config = { + presets: [brandingPreset], + theme: { + extend: { + // TypeScript-specific extensions + } + } +} +``` + +**`ts/apps/next/app/globals.css`**: +```css +@tailwind base; +@tailwind components; +@tailwind utilities; + +@import '../../../../branding/styles/variables.css'; +@import '../../../../branding/styles/animations.css'; +``` + +**Styles are built automatically** by Next.js during `next dev` or `next build`. + +## Extending the System + +### Adding New Design Tokens + +1. Edit `branding/core/tokens.json`: +```json +{ + "colors": { + "brand": { + "primary": "#000000", + "secondary": "#666666" + } + } +} +``` + +2. Update the Tailwind preset in `branding/tailwind/preset.js` + +3. Rebuild project styles: +```bash +make styles-py # Rebuild Python CSS +make build-ts # Rebuild TypeScript (includes CSS) +``` + +### Adding Project-Specific Styles + +Each project can extend the base theme: + +**Python example**: +```javascript +module.exports = { + presets: [brandingPreset], + theme: { + extend: { + colors: { + 'py-special': '#123456' // Python-only color + } + } + } +} +``` + +**TypeScript example**: +```typescript +const config: Config = { + presets: [brandingPreset], + theme: { + extend: { + animation: { + 'ts-fancy': 'fancy 2s ease-in-out' // TS-only animation + } + } + } +} +``` + +### Switching Themes + +The system uses CSS variables for theming. To switch themes: + +```html + + + + + +``` + +In JavaScript/TypeScript: +```javascript +// Toggle dark mode +document.documentElement.classList.toggle('dark'); +``` + +## Development Workflow + +### Python Development +```bash +cd py +make dev # Starts dev server (builds styles automatically) +``` + +Styles are built before the dev server starts. To watch for changes separately: +```bash +make styles-watch +``` + +### TypeScript Development +```bash +cd ts +make dev # Next.js handles CSS automatically +``` + +Next.js watches and rebuilds CSS automatically during development. + +### Updating Shared Styles + +When you update branding styles: + +1. Edit files in `branding/` directory +2. Rebuild project styles: + - **Python**: Run `make styles-py` or restart `make dev-py` + - **TypeScript**: Next.js rebuilds automatically + +## Maintenance + +### Version Control + +- Commit all files in `branding/` directory +- Project-specific Tailwind configs are committed +- Built CSS files (`py/static/css/main.css`) should be built locally or in CI/CD + +### Common Tasks + +**Update branding colors**: +1. Edit `branding/tailwind/preset.js` +2. Run `make styles` in affected projects + +**Add new shared animation**: +1. Edit `branding/styles/animations.css` +2. Changes are picked up automatically (imported by projects) + +**Update shared assets**: +1. Replace files in `branding/assets/` +2. Python: Run `make styles-py` to copy new assets +3. Next.js: Assets are symlinked, changes reflected immediately + +## Benefits + +1. **Consistency**: Same visual language across all platforms +2. **Maintainability**: Single source of truth for design decisions +3. **Scalability**: Easy to add new projects +4. **Flexibility**: Each project can extend base theme +5. **Performance**: Shared utilities reduce CSS duplication +6. **Developer Experience**: Simple integration via imports + +## Troubleshooting + +### CSS not updating in Python app +```bash +# Rebuild CSS +cd py && make styles + +# Clear browser cache and reload +``` + +### TypeScript not finding branding preset +```bash +# Ensure relative path is correct in tailwind.config.ts +const brandingPreset = require('../../../branding/tailwind/preset.js'); +``` + +### Dark mode not working +- Ensure `darkMode: 'class'` is in Tailwind config +- Add `class="dark"` to HTML element +- Check CSS variables are properly imported + +### Assets not loading in Python +```bash +# Rebuild styles to copy assets +cd py && make styles +``` diff --git a/branding/assets/README.md b/branding/assets/README.md new file mode 100644 index 0000000..e2ec402 --- /dev/null +++ b/branding/assets/README.md @@ -0,0 +1,38 @@ +# Shared Brand Assets + +This directory contains all shared logos, icons, and favicons for the platform. + +## Files + +- `favicon.ico` - Standard favicon (16x16, 32x32, 48x48) +- `favicon.png` - PNG version of favicon +- `icon.png` - Application icon (larger format) +- `icon.svg` - Vector version of application icon + +## Usage + +All projects should symlink to these assets rather than maintaining their own copies: + +### Python +```bash +cd py/static +ln -s ../../branding/assets/favicon.ico . +ln -s ../../branding/assets/icon.svg . +``` + +### TypeScript/Next.js +```bash +cd ts/apps/next/public +ln -s ../../../../branding/assets/favicon.ico . +ln -s ../../../../branding/assets/icon.svg . +``` + +## Updating Icons + +To update icons across all projects: +1. Replace the files in this directory +2. All projects using symlinks will automatically get the updated versions + +## Generating Favicons + +When you need different sizes, use the source `icon.svg` or `icon.png` to generate them. \ No newline at end of file diff --git a/assets/icons/favicon.ico b/branding/assets/favicon.ico similarity index 100% rename from assets/icons/favicon.ico rename to branding/assets/favicon.ico diff --git a/assets/icons/favicon.png b/branding/assets/favicon.png similarity index 100% rename from assets/icons/favicon.png rename to branding/assets/favicon.png diff --git a/assets/icons/icon.png b/branding/assets/icon.png similarity index 100% rename from assets/icons/icon.png rename to branding/assets/icon.png diff --git a/assets/icons/icon.svg b/branding/assets/icon.svg similarity index 100% rename from assets/icons/icon.svg rename to branding/assets/icon.svg diff --git a/branding/core/tokens.json b/branding/core/tokens.json new file mode 100644 index 0000000..11442b3 --- /dev/null +++ b/branding/core/tokens.json @@ -0,0 +1,176 @@ +{ + "colors": { + "base": { + "white": "#FFFFFF", + "black": "#000000", + "gray": { + "50": "hsl(0, 0%, 98%)", + "100": "hsl(0, 0%, 95%)", + "200": "hsl(0, 0%, 90%)", + "300": "hsl(0, 0%, 85%)", + "400": "hsl(0, 0%, 70%)", + "500": "hsl(0, 0%, 50%)", + "600": "hsl(0, 0%, 40%)", + "700": "hsl(0, 0%, 30%)", + "800": "hsl(0, 0%, 20%)", + "850": "hsl(0, 0%, 15%)", + "900": "hsl(0, 0%, 10%)", + "950": "hsl(0, 0%, 5%)" + } + }, + "semantic": { + "light": { + "background": "0 0% 100%", + "foreground": "0 0% 0%", + "muted": { + "DEFAULT": "0 0% 95%", + "foreground": "0 0% 40%" + }, + "card": { + "DEFAULT": "0 0% 100%", + "foreground": "0 0% 0%" + }, + "popover": { + "DEFAULT": "0 0% 100%", + "foreground": "0 0% 0%" + }, + "border": "0 0% 85%", + "input": "0 0% 85%", + "primary": { + "DEFAULT": "0 0% 0%", + "foreground": "0 0% 100%" + }, + "secondary": { + "DEFAULT": "0 0% 90%", + "foreground": "0 0% 0%" + }, + "accent": { + "DEFAULT": "0 0% 70%", + "foreground": "0 0% 0%" + }, + "destructive": { + "DEFAULT": "0 0% 20%", + "foreground": "0 0% 100%" + }, + "ring": "0 0% 50%" + }, + "dark": { + "background": "0 0% 0%", + "foreground": "0 0% 100%", + "muted": { + "DEFAULT": "0 0% 10%", + "foreground": "0 0% 60%" + }, + "card": { + "DEFAULT": "0 0% 0%", + "foreground": "0 0% 100%" + }, + "popover": { + "DEFAULT": "0 0% 0%", + "foreground": "0 0% 100%" + }, + "border": "0 0% 20%", + "input": "0 0% 20%", + "primary": { + "DEFAULT": "0 0% 100%", + "foreground": "0 0% 0%" + }, + "secondary": { + "DEFAULT": "0 0% 15%", + "foreground": "0 0% 100%" + }, + "accent": { + "DEFAULT": "0 0% 30%", + "foreground": "0 0% 100%" + }, + "destructive": { + "DEFAULT": "0 0% 80%", + "foreground": "0 0% 0%" + }, + "ring": "0 0% 50%" + } + } + }, + "typography": { + "fonts": { + "sans": ["system-ui", "-apple-system", "sans-serif"], + "mono": ["ui-monospace", "monospace"] + }, + "sizes": { + "xs": "0.75rem", + "sm": "0.875rem", + "base": "1rem", + "lg": "1.125rem", + "xl": "1.25rem", + "2xl": "1.5rem", + "3xl": "1.875rem", + "4xl": "2.25rem", + "5xl": "3rem", + "6xl": "3.75rem", + "7xl": "4.5rem", + "8xl": "6rem", + "9xl": "8rem" + } + }, + "spacing": { + "0": "0px", + "1": "0.25rem", + "2": "0.5rem", + "3": "0.75rem", + "4": "1rem", + "5": "1.25rem", + "6": "1.5rem", + "7": "1.75rem", + "8": "2rem", + "9": "2.25rem", + "10": "2.5rem", + "11": "2.75rem", + "12": "3rem", + "14": "3.5rem", + "16": "4rem", + "20": "5rem", + "24": "6rem", + "28": "7rem", + "32": "8rem", + "36": "9rem", + "40": "10rem", + "44": "11rem", + "48": "12rem", + "52": "13rem", + "56": "14rem", + "60": "15rem", + "64": "16rem", + "72": "18rem", + "80": "20rem", + "96": "24rem" + }, + "borderRadius": { + "none": "0", + "sm": "0.125rem", + "DEFAULT": "0.25rem", + "md": "0.375rem", + "lg": "0.5rem", + "xl": "0.75rem", + "2xl": "1rem", + "3xl": "1.5rem", + "full": "9999px" + }, + "animations": { + "duration": { + "75": "75ms", + "100": "100ms", + "150": "150ms", + "200": "200ms", + "300": "300ms", + "500": "500ms", + "700": "700ms", + "1000": "1000ms" + }, + "timing": { + "ease": "cubic-bezier(0.4, 0, 0.2, 1)", + "in": "cubic-bezier(0.4, 0, 1, 1)", + "out": "cubic-bezier(0, 0, 0.2, 1)", + "in-out": "cubic-bezier(0.4, 0, 0.2, 1)" + } + } +} \ No newline at end of file diff --git a/branding/index.js b/branding/index.js new file mode 100644 index 0000000..6091488 --- /dev/null +++ b/branding/index.js @@ -0,0 +1,47 @@ +// Main entry point for the branding package + +const tokens = require('./core/tokens.json'); +const tailwindPreset = require('./tailwind/preset.js'); + +module.exports = { + tokens, + tailwindPreset, + // Helper to get CSS variable value + getCSSVariable: (name, mode = 'light') => { + const themeTokens = mode === 'dark' ? tokens.colors.semantic.dark : tokens.colors.semantic.light; + + // Navigate nested properties + const keys = name.split('.'); + let value = themeTokens; + for (const key of keys) { + value = value?.[key]; + if (value && typeof value === 'object' && value.DEFAULT) { + value = value.DEFAULT; + } + } + return value; + }, + // Helper to generate CSS variables string + generateCSSVariables: (mode = 'light') => { + const themeTokens = mode === 'dark' ? tokens.colors.semantic.dark : tokens.colors.semantic.light; + let css = ''; + + const processTokens = (obj, prefix = '--') => { + for (const [key, value] of Object.entries(obj)) { + if (typeof value === 'object') { + if (value.DEFAULT) { + css += ` ${prefix}${key}: ${value.DEFAULT};\n`; + css += ` ${prefix}${key}-foreground: ${value.foreground};\n`; + } else { + processTokens(value, `${prefix}${key}-`); + } + } else { + css += ` ${prefix}${key}: ${value};\n`; + } + } + }; + + processTokens(themeTokens); + return css; + } +}; \ No newline at end of file diff --git a/branding/package.json b/branding/package.json new file mode 100644 index 0000000..b0e6d24 --- /dev/null +++ b/branding/package.json @@ -0,0 +1,23 @@ +{ + "name": "@krondor/branding", + "version": "1.0.0", + "description": "Shared branding and design system for Krondor platform", + "main": "index.js", + "exports": { + ".": "./index.js", + "./tailwind-preset": "./tailwind/preset.js", + "./tokens": "./core/tokens.json", + "./styles/*": "./styles/*" + }, + "keywords": [ + "branding", + "design-system", + "tailwind", + "theme" + ], + "author": "", + "license": "ISC", + "peerDependencies": { + "tailwindcss": "^3.0.0" + } +} \ No newline at end of file diff --git a/branding/styles/animations.css b/branding/styles/animations.css new file mode 100644 index 0000000..ba1f1bd --- /dev/null +++ b/branding/styles/animations.css @@ -0,0 +1,199 @@ +/* Shared animations that can be used across projects */ + +/* Basic animations */ +@keyframes fade-in { + from { opacity: 0; } + to { opacity: 1; } +} + +@keyframes slide-up { + from { + opacity: 0; + transform: translateY(10px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +@keyframes slide-down { + from { + opacity: 0; + transform: translateY(-10px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +@keyframes slide-left { + from { + opacity: 0; + transform: translateX(10px); + } + to { + opacity: 1; + transform: translateX(0); + } +} + +@keyframes slide-right { + from { + opacity: 0; + transform: translateX(-10px); + } + to { + opacity: 1; + transform: translateX(0); + } +} + +@keyframes spin { + from { transform: rotate(0deg); } + to { transform: rotate(360deg); } +} + +@keyframes pulse { + 0%, 100% { + opacity: 1; + } + 50% { + opacity: 0.5; + } +} + +/* Advanced animations for dramatic effects */ +@keyframes blob { + 0%, 100% { + transform: translate(0, 0) scale(1); + } + 33% { + transform: translate(300px, -300px) scale(1.2); + } + 66% { + transform: translate(-200px, 200px) scale(0.8); + } +} + +@keyframes blob-reverse { + 0%, 100% { + transform: translate(0, 0) scale(1); + } + 33% { + transform: translate(-300px, 200px) scale(1.2); + } + 66% { + transform: translate(200px, -300px) scale(0.8); + } +} + +@keyframes float { + 0%, 100% { + transform: translateY(0px); + } + 50% { + transform: translateY(-200px); + } +} + +@keyframes drift { + 0%, 100% { + transform: translate(0, 0); + } + 25% { + transform: translate(400px, -200px); + } + 50% { + transform: translate(-300px, 300px); + } + 75% { + transform: translate(200px, 150px); + } +} + +@keyframes orbit { + from { + transform: rotate(0deg) translateX(250px) rotate(0deg); + } + to { + transform: rotate(360deg) translateX(250px) rotate(-360deg); + } +} + +/* Utility classes */ +.animate-fade-in { + animation: fade-in 0.3s ease-out; +} + +.animate-slide-up { + animation: slide-up 0.4s ease-out; +} + +.animate-slide-down { + animation: slide-down 0.4s ease-out; +} + +.animate-slide-left { + animation: slide-left 0.4s ease-out; +} + +.animate-slide-right { + animation: slide-right 0.4s ease-out; +} + +.animate-spin { + animation: spin 1s linear infinite; +} + +.animate-pulse { + animation: pulse 2s cubic-bezier(0.4, 0, 0.6, 1) infinite; +} + +/* Advanced animation utilities */ +.animate-blob { + animation: blob 15s infinite ease-in-out; +} + +.animate-blob-reverse { + animation: blob-reverse 18s infinite ease-in-out; +} + +.animate-float { + animation: float 12s ease-in-out infinite; +} + +.animate-drift { + animation: drift 20s ease-in-out infinite; +} + +.animate-orbit { + animation: orbit 25s linear infinite; +} + +/* Animation delays */ +.animation-delay-200 { + animation-delay: 200ms; +} + +.animation-delay-500 { + animation-delay: 500ms; +} + +.animation-delay-1000 { + animation-delay: 1s; +} + +.animation-delay-2000 { + animation-delay: 2s; +} + +.animation-delay-4000 { + animation-delay: 4s; +} + +/* Loading states */ +.loading { + @apply animate-spin rounded-full h-8 w-8 border-2 border-[var(--muted)] border-t-[var(--primary)]; +} \ No newline at end of file diff --git a/branding/styles/utilities.css b/branding/styles/utilities.css new file mode 100644 index 0000000..20c506e --- /dev/null +++ b/branding/styles/utilities.css @@ -0,0 +1,163 @@ +/* Shared utility classes - Import this AFTER @tailwind directives in your main CSS */ + +@layer utilities { + /* Text utilities */ + .gradient-text { + @apply bg-gradient-to-r from-black to-gray-600 bg-clip-text text-transparent; + } + + .dark .gradient-text { + @apply from-white to-gray-400; + } + + .text-balance { + text-wrap: balance; + } + + /* Scroll utilities */ + .scrollbar-hide { + -ms-overflow-style: none; + scrollbar-width: none; + } + + .scrollbar-hide::-webkit-scrollbar { + display: none; + } + + /* Backdrop utilities */ + .backdrop-blur-xs { + backdrop-filter: blur(2px); + } + + .backdrop-blur-sm { + backdrop-filter: blur(4px); + } + + .backdrop-blur { + backdrop-filter: blur(8px); + } + + .backdrop-blur-md { + backdrop-filter: blur(12px); + } + + .backdrop-blur-lg { + backdrop-filter: blur(16px); + } + + .backdrop-blur-xl { + backdrop-filter: blur(24px); + } + + /* Grid utilities */ + .grid-auto-fit { + grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); + } + + .grid-auto-fill { + grid-template-columns: repeat(auto-fill, minmax(250px, 1fr)); + } + + /* Aspect ratio utilities */ + .aspect-cinema { + aspect-ratio: 2.39 / 1; + } + + .aspect-golden { + aspect-ratio: 1.618 / 1; + } +} + +@layer components { + /* Card component */ + .card { + @apply bg-card border border-border rounded-lg p-6; + } + + .card-hover { + @apply card hover:shadow-lg transition-shadow duration-200; + } + + /* Button components */ + .btn { + @apply px-4 py-2 rounded-md font-medium transition-colors duration-200; + } + + .btn-primary { + @apply bg-primary text-primary-foreground hover:bg-primary/90; + } + + .btn-secondary { + @apply bg-secondary text-secondary-foreground hover:bg-secondary/80; + } + + .btn-ghost { + @apply hover:bg-accent hover:text-accent-foreground; + } + + .btn-outline { + @apply border border-border bg-background hover:bg-accent hover:text-accent-foreground; + } + + .btn-destructive { + @apply bg-destructive text-destructive-foreground hover:bg-destructive/90; + } + + /* Input component */ + .input { + @apply w-full px-3 py-2 bg-background border border-input rounded-md focus:outline-none focus:ring-2 focus:ring-ring focus:border-input transition-colors; + } + + /* Badge component */ + .badge { + @apply inline-flex items-center rounded-full px-2.5 py-0.5 text-xs font-semibold transition-colors; + } + + .badge-primary { + @apply bg-primary text-primary-foreground; + } + + .badge-secondary { + @apply bg-secondary text-secondary-foreground; + } + + .badge-outline { + @apply border border-border; + } + + /* Text utilities */ + .text-muted { + @apply text-muted-foreground; + } + + .text-gradient { + @apply gradient-text; + } + + /* Link component */ + .link { + @apply text-primary underline-offset-4 hover:underline; + } + + /* Divider component */ + .divider { + @apply border-t border-border; + } + + /* Container components */ + .container-sm { + @apply max-w-2xl mx-auto px-4; + } + + .container-md { + @apply max-w-4xl mx-auto px-4; + } + + .container-lg { + @apply max-w-6xl mx-auto px-4; + } + + .container-xl { + @apply max-w-7xl mx-auto px-4; + } +} \ No newline at end of file diff --git a/branding/styles/variables.css b/branding/styles/variables.css new file mode 100644 index 0000000..6a96429 --- /dev/null +++ b/branding/styles/variables.css @@ -0,0 +1,67 @@ +/* Shared CSS Variables - Automatically generated from tokens.json */ + +/* Light mode defaults - BLACK AND WHITE THEME */ +:root { + --background: 0 0% 100%; /* Pure white */ + --foreground: 0 0% 0%; /* Pure black */ + + --muted: 0 0% 95%; /* Very light grey */ + --muted-foreground: 0 0% 40%; /* Dark grey */ + + --card: 0 0% 100%; /* White */ + --card-foreground: 0 0% 0%; /* Black */ + + --popover: 0 0% 100%; /* White */ + --popover-foreground: 0 0% 0%; /* Black */ + + --border: 0 0% 85%; /* Light grey border */ + --input: 0 0% 85%; /* Light grey border */ + + --primary: 0 0% 0%; /* Black primary */ + --primary-foreground: 0 0% 100%; /* White text on black */ + + --secondary: 0 0% 90%; /* Light grey */ + --secondary-foreground: 0 0% 0%; /* Black text */ + + --accent: 0 0% 70%; /* Medium grey accent */ + --accent-foreground: 0 0% 0%; /* Black text */ + + --destructive: 0 0% 20%; /* Dark grey for errors */ + --destructive-foreground: 0 0% 100%; /* White text */ + + --ring: 0 0% 50%; /* Medium grey focus ring */ + + --radius: 0.5rem; +} + +/* Dark mode - INVERTED BLACK AND WHITE */ +.dark { + --background: 0 0% 0%; /* Pure black */ + --foreground: 0 0% 100%; /* Pure white */ + + --muted: 0 0% 10%; /* Very dark grey */ + --muted-foreground: 0 0% 60%; /* Light grey */ + + --card: 0 0% 0%; /* Black */ + --card-foreground: 0 0% 100%; /* White */ + + --popover: 0 0% 0%; /* Black */ + --popover-foreground: 0 0% 100%; /* White */ + + --border: 0 0% 20%; /* Dark grey border */ + --input: 0 0% 20%; /* Dark grey border */ + + --primary: 0 0% 100%; /* White primary */ + --primary-foreground: 0 0% 0%; /* Black text on white */ + + --secondary: 0 0% 15%; /* Dark grey */ + --secondary-foreground: 0 0% 100%; /* White text */ + + --accent: 0 0% 30%; /* Dark grey accent */ + --accent-foreground: 0 0% 100%; /* White text */ + + --destructive: 0 0% 80%; /* Light grey for errors */ + --destructive-foreground: 0 0% 0%; /* Black text */ + + --ring: 0 0% 50%; /* Medium grey focus ring */ +} \ No newline at end of file diff --git a/branding/tailwind/preset.js b/branding/tailwind/preset.js new file mode 100644 index 0000000..0bba15c --- /dev/null +++ b/branding/tailwind/preset.js @@ -0,0 +1,140 @@ +const tokens = require('../core/tokens.json'); + +/** @type {import('tailwindcss').Config} */ +module.exports = { + darkMode: 'class', + theme: { + extend: { + colors: { + // Use CSS variables for theming + background: 'hsl(var(--background))', + foreground: 'hsl(var(--foreground))', + muted: { + DEFAULT: 'hsl(var(--muted))', + foreground: 'hsl(var(--muted-foreground))', + }, + card: { + DEFAULT: 'hsl(var(--card))', + foreground: 'hsl(var(--card-foreground))', + }, + popover: { + DEFAULT: 'hsl(var(--popover))', + foreground: 'hsl(var(--popover-foreground))', + }, + border: 'hsl(var(--border))', + input: 'hsl(var(--input))', + primary: { + DEFAULT: 'hsl(var(--primary))', + foreground: 'hsl(var(--primary-foreground))', + }, + secondary: { + DEFAULT: 'hsl(var(--secondary))', + foreground: 'hsl(var(--secondary-foreground))', + }, + accent: { + DEFAULT: 'hsl(var(--accent))', + foreground: 'hsl(var(--accent-foreground))', + }, + destructive: { + DEFAULT: 'hsl(var(--destructive))', + foreground: 'hsl(var(--destructive-foreground))', + }, + ring: 'hsl(var(--ring))', + // Base gray scale for utility + gray: tokens.colors.base.gray + }, + fontFamily: { + sans: tokens.typography.fonts.sans, + mono: tokens.typography.fonts.mono, + }, + fontSize: tokens.typography.sizes, + spacing: tokens.spacing, + borderRadius: tokens.borderRadius, + animation: { + 'fade-in': 'fade-in 0.3s ease-out', + 'slide-up': 'slide-up 0.4s ease-out', + 'spin': 'spin 1s linear infinite', + // Advanced animations for TS projects + 'blob': 'blob 15s infinite ease-in-out', + 'blob-reverse': 'blob-reverse 18s infinite ease-in-out', + 'float': 'float 12s ease-in-out infinite', + 'drift': 'drift 20s ease-in-out infinite', + 'orbit': 'orbit 25s linear infinite', + }, + keyframes: { + 'fade-in': { + from: { opacity: '0' }, + to: { opacity: '1' } + }, + 'slide-up': { + from: { + opacity: '0', + transform: 'translateY(10px)' + }, + to: { + opacity: '1', + transform: 'translateY(0)' + } + }, + 'spin': { + from: { transform: 'rotate(0deg)' }, + to: { transform: 'rotate(360deg)' } + }, + // Advanced animations + 'blob': { + '0%, 100%': { + transform: 'translate(0, 0) scale(1)', + }, + '33%': { + transform: 'translate(300px, -300px) scale(1.2)', + }, + '66%': { + transform: 'translate(-200px, 200px) scale(0.8)', + } + }, + 'blob-reverse': { + '0%, 100%': { + transform: 'translate(0, 0) scale(1)', + }, + '33%': { + transform: 'translate(-300px, 200px) scale(1.2)', + }, + '66%': { + transform: 'translate(200px, -300px) scale(0.8)', + } + }, + 'float': { + '0%, 100%': { + transform: 'translateY(0px)', + }, + '50%': { + transform: 'translateY(-200px)', + } + }, + 'drift': { + '0%, 100%': { + transform: 'translate(0, 0)', + }, + '25%': { + transform: 'translate(400px, -200px)', + }, + '50%': { + transform: 'translate(-300px, 300px)', + }, + '75%': { + transform: 'translate(200px, 150px)', + } + }, + 'orbit': { + from: { + transform: 'rotate(0deg) translateX(250px) rotate(0deg)', + }, + to: { + transform: 'rotate(360deg) translateX(250px) rotate(-360deg)', + } + } + } + }, + }, + plugins: [], +} \ No newline at end of file diff --git a/config/deploy/py.yml b/config/deploy/py.yml index 86654a0..4b777fa 100644 --- a/config/deploy/py.yml +++ b/config/deploy/py.yml @@ -1,59 +1,66 @@ -# Kamal deploy configuration for TypeScript Web service -service: generic-py +service: <%= ENV['PROJECT_NAME'] %>-py -# NOTE: this must match the name you passed to the container registry module -# Container image -image: "<%= ENV['REGISTRY_NAME'] %>/generic-py" +# NOTE (amiller68): do not change this! +# It is important that this is kept in sync with +# the name of the service as described in the file +# name. +# i.e. if the file name is py.yml, then the service name must be <%= ENV['PROJECT_NAME'] %>-py +# Container image using Docker Hub +image: <%= ENV['DOCKER_HUB_USERNAME'] %>/<%= ENV['PROJECT_NAME'] %>-py # Deploy to these servers servers: web: - - "<%= ENV['DROPLET_IP'].strip %>" + - "<%= ENV['SERVER_IP'].strip %>" -# SSH configuration +# SSH configuration ssh: user: root - key_data: ["<%= ENV['SSH_PRIVATE_KEY'] %>"] + keys: ["<%= ENV['SSH_PRIVATE_KEY_FILE'] %>"] + keys_only: true -# Registry configuration +# Registry configuration for Docker Hub registry: - server: "<%= ENV['REGISTRY_ENDPOINT'] %>" - username: "<%= ENV['DOCKERHUB_USERNAME'] %>" + username: <%= ENV['DOCKER_HUB_USERNAME'] %> password: - - DOCKERHUB_TOKEN + - DOCKER_HUB_TOKEN # Builder configuration builder: dockerfile: py/Dockerfile - context: "py" + context: "." arch: amd64 # Proxy configuration for SSL proxy: ssl: true - host: py.generic.krondor.org + host: <%= ENV['HOST_NAME'] %> app_port: 8000 + healthcheck: + path: /up # Environment variables env: clear: - POSTGRES_URL: "postgresql://generic_user:generic_pass@generic-py-postgres:5432/generic_py" - HOST_NAME: https://py.generic.krondor.org - AUTH_REDIRECT_URI: https://py.generic.krondor.org/auth/google/callback + POSTGRES_URL: postgresql://postgres:postgres@<%= ENV['PROJECT_NAME'] %>-py-postgres:5432/postgres + # TODO (amiller68): this env is closer to DOMAIN_NAME in practice, + # since i have to add the scheme here + HOST_NAME: https://<%= ENV['HOST_NAME'] %> + DEBUG: false secret: - - GOOGLE_CLIENT_ID - - GOOGLE_CLIENT_SECRET + - GOOGLE_O_AUTH_CLIENT_ID + - GOOGLE_O_AUTH_CLIENT_SECRET # Accessories (PostgreSQL database - internal only) accessories: postgres: image: postgres:17-alpine - host: "<%= ENV['DROPLET_IP'].strip %>" + host: "<%= ENV['SERVER_IP'].strip %>" port: "5432:5432" env: clear: - POSTGRES_DB: generic_py - POSTGRES_USER: generic_user - POSTGRES_PASSWORD: generic_pass + POSTGRES_DB: postgres + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres directories: - - data:/var/lib/postgresql/data \ No newline at end of file + - data:/var/lib/postgresql/data diff --git a/config/deploy/static.yml b/config/deploy/static.yml index aceb489..f9a8c79 100644 --- a/config/deploy/static.yml +++ b/config/deploy/static.yml @@ -1,31 +1,31 @@ # Kamal deploy configuration for Static site service -service: generic-static +service: <%= ENV['PROJECT_NAME'] %>-static # NOTE: this must match the name you passed to the container registry module # Container image -image: "<%= ENV['REGISTRY_NAME'] %>/generic-static" +image: <%= ENV['DOCKER_HUB_USERNAME'] %>/<%= ENV['PROJECT_NAME'] %>-static # Deploy to these servers servers: web: - - "<%= ENV['DROPLET_IP'].strip %>" + - "<%= ENV['SERVER_IP'].strip %>" -# SSH configuration +# SSH configuration ssh: user: root - key_data: ["<%= ENV['SSH_PRIVATE_KEY'] %>"] + keys: ["<%= ENV['SSH_PRIVATE_KEY_FILE'] %>"] + keys_only: true -# Registry configuration +# Registry configuration for Docker Hub registry: - server: "<%= ENV['REGISTRY_ENDPOINT'] %>" - username: "<%= ENV['DOCKERHUB_USERNAME'] %>" + username: <%= ENV['DOCKER_HUB_USERNAME'] %> password: - - DOCKERHUB_TOKEN + - DOCKER_HUB_TOKEN # Builder configuration builder: dockerfile: static/Dockerfile - context: "static" + context: "." arch: amd64 cache: type: registry @@ -33,5 +33,7 @@ builder: # Proxy configuration for SSL proxy: ssl: true - host: generic.krondor.org - app_port: 8080 \ No newline at end of file + host: <%= ENV['HOST_NAME'] %> + app_port: 8080 + healthcheck: + path: / diff --git a/config/deploy/ts-web.yml b/config/deploy/ts-web.yml index 6becd3f..4f4189c 100644 --- a/config/deploy/ts-web.yml +++ b/config/deploy/ts-web.yml @@ -1,31 +1,34 @@ -# Kamal deploy configuration for TypeScript Web service -service: generic-ts-web +service: <%= ENV['PROJECT_NAME'] %>-ts-web -# NOTE: this must match the name you passed to the container registry module -# Container image -image: "<%= ENV['REGISTRY_NAME'] %>/generic-ts-web" +# NOTE (amiller68): do not change this! +# It is important that this is kept in sync with +# the name of the service as described in the file +# name. +# i.e. if the file name is py.yml, then the service name must be <%= ENV['PROJECT_NAME'] %>-py +# Container image using Docker Hub +image: <%= ENV['DOCKER_HUB_USERNAME'] %>/<%= ENV['PROJECT_NAME'] %>-ts-web # Deploy to these servers servers: web: - - "<%= ENV['DROPLET_IP'].strip %>" + - "<%= ENV['SERVER_IP'].strip %>" -# SSH configuration +# SSH configuration ssh: user: root - key_data: ["<%= ENV['SSH_PRIVATE_KEY'] %>"] + keys: ["<%= ENV['SSH_PRIVATE_KEY_FILE'] %>"] + keys_only: true -# Registry configuration +# Registry configuration for Docker Hub registry: - server: "<%= ENV['REGISTRY_ENDPOINT'] %>" - username: "<%= ENV['DOCKERHUB_USERNAME'] %>" + username: <%= ENV['DOCKER_HUB_USERNAME'] %> password: - - DOCKERHUB_TOKEN + - DOCKER_HUB_TOKEN # Builder configuration builder: dockerfile: ts/apps/web/Dockerfile - context: "ts" + context: "." arch: amd64 cache: type: registry @@ -34,4 +37,6 @@ builder: proxy: ssl: true host: ts.generic.krondor.org - app_port: 3000 \ No newline at end of file + app_port: 3000 + healthcheck: + path: / diff --git a/config/example.yml b/config/example.yml index 89c7a7c..b94bea5 100644 --- a/config/example.yml +++ b/config/example.yml @@ -1,101 +1,39 @@ -# Name of your application. Used to uniquely configure containers. -service: my-app +# Kamal deploy configuration for Static site service +service: <%= ENV['PROJECT_NAME'] %>-app -# Name of the container image. -image: my-user/my-app +# NOTE: this must match the name you passed to the container registry module +# Container image +image: <%= ENV['DOCKER_HUB_USERNAME'] %>/<%= ENV['PROJECT_NAME'] %>-app -# Deploy to these servers. +# Deploy to these servers servers: web: - - 192.168.0.1 - # job: - # hosts: - # - 192.168.0.1 - # cmd: bin/jobs + - "<%= ENV['SERVER_IP'].strip %>" -# Enable SSL auto certification via Let's Encrypt and allow for multiple apps on a single web server. -# Remove this section when using multiple web servers and ensure you terminate SSL at your load balancer. -# -# Note: If using Cloudflare, set encryption mode in SSL/TLS setting to "Full" to enable CF-to-app encryption. -proxy: - ssl: true - host: app.example.com - # Proxy connects to your container on port 80 by default. - # app_port: 3000 +# SSH configuration +ssh: + user: root + keys: ["<%= ENV['SSH_PRIVATE_KEY_FILE'] %>"] + keys_only: true -# Credentials for your image host. +# Registry configuration for Docker Hub registry: - # Specify the registry server, if you're not using Docker Hub - # server: registry.digitalocean.com / ghcr.io / ... - username: my-user - - # Always use an access token rather than real password (pulled from .kamal/secrets). + username: <%= ENV['DOCKER_HUB_USERNAME'] %> password: - - KAMAL_REGISTRY_PASSWORD + - DOCKER_HUB_TOKEN -# Configure builder setup. +# Builder configuration builder: + dockerfile: app/Dockerfile + context: "." arch: amd64 - # Pass in additional build args needed for your Dockerfile. - # args: - # RUBY_VERSION: <%= ENV["RBENV_VERSION"] || ENV["rvm_ruby_string"] || "#{RUBY_ENGINE}-#{RUBY_ENGINE_VERSION}" %> - -# Inject ENV variables into containers (secrets come from .kamal/secrets). -# -# env: -# clear: -# DB_HOST: 192.168.0.2 -# secret: -# - RAILS_MASTER_KEY - -# Aliases are triggered with "bin/kamal ". You can overwrite arguments on invocation: -# "bin/kamal app logs -r job" will tail logs from the first server in the job section. -# -# aliases: -# shell: app exec --interactive --reuse "bash" - -# Use a different ssh user than root -# -# ssh: -# user: app + cache: + type: registry -# Use a persistent storage volume. -# -# volumes: -# - "app_storage:/app/storage" - -# Bridge fingerprinted assets, like JS and CSS, between versions to avoid -# hitting 404 on in-flight requests. Combines all files from new and old -# version inside the asset_path. -# -# asset_path: /app/public/assets - -# Configure rolling deploys by setting a wait time between batches of restarts. -# -# boot: -# limit: 10 # Can also specify as a percentage of total hosts, such as "25%" -# wait: 2 - -# Use accessory services (secrets come from .kamal/secrets). -# -# accessories: -# db: -# image: mysql:8.0 -# host: 192.168.0.2 -# port: 3306 -# env: -# clear: -# MYSQL_ROOT_HOST: '%' -# secret: -# - MYSQL_ROOT_PASSWORD -# files: -# - config/mysql/production.cnf:/etc/mysql/my.cnf -# - db/production.sql:/docker-entrypoint-initdb.d/setup.sql -# directories: -# - data:/var/lib/mysql -# redis: -# image: valkey/valkey:8 -# host: 192.168.0.2 -# port: 6379 -# directories: -# - data:/data +# Proxy configuration for SSL +proxy: + ssl: true + host: <%= ENV['HOST_NAME'] %> + app_port: 8080 + healthcheck: + path: / diff --git a/docs/deployment/KAMAL.md b/docs/deployment/KAMAL.md new file mode 100644 index 0000000..0f23010 --- /dev/null +++ b/docs/deployment/KAMAL.md @@ -0,0 +1,474 @@ +# Kamal Deployment Guide + +This guide explains how to deploy applications using Kamal with automatic secrets management. + +## Overview + +Kamal orchestrates Docker deployments to your servers: +- **Zero-downtime deploys** with rolling restarts +- **Automatic SSL** via Let's Encrypt and Traefik proxy +- **Health checks** and automatic rollback +- **Multi-service support** (Python API, TypeScript web/API, databases) +- **Secrets from 1Password** automatically injected + +All managed via: `make kamal ARGS=" "` + +## Prerequisites + +Before deploying, ensure: + +1. **[Infrastructure Deployed](../setup/INFRASTRUCTURE.md)** - Server and DNS configured +2. **[Container Registry](../setup/INFRASTRUCTURE.md#container-registry-setup)** - Docker repositories created +3. **[1Password Configured](../setup/ONE_PASSWORD.md)** - Stage-specific secrets in vault + +### Install Kamal + +```bash +gem install kamal +``` + +Verify: +```bash +kamal version +``` + +## Service Configuration + +Services are defined in `config/deploy/`: + +``` +config/deploy/ +├── py.yml # Python FastAPI + PostgreSQL +├── ts-web.yml # TypeScript Vite web app +└── static.yml # Static file service (optional) +``` + +Each service config specifies: +- Docker image name and registry +- Server hostnames (from Terraform outputs) +- Environment variables (from 1Password) +- Health check endpoints +- Proxy configuration (Traefik) +- Accessories (databases, Redis, etc.) + +## Deployment Workflow + +### First-Time Server Bootstrap + +**CRITICAL**: Before deploying ANY service to a fresh server, you MUST bootstrap the Kamal infrastructure. + +#### Step 1: Bootstrap Server + +The first time you deploy to a server, run setup to install Kamal infrastructure: + +```bash +# Bootstrap server with Kamal infrastructure +make kamal ARGS="py production setup" +``` + +This installs: +- **Traefik reverse proxy** - Handles SSL and routing +- **Docker networks** - For container communication +- **Required directories** - For logs, caches, volumes + +You only need to run `setup` **once per server**, not once per service. + +#### Step 2: Boot Accessories (If Applicable) + +If your service has accessories (databases, Redis, etc.), boot them BEFORE deploying the app: + +```bash +# For Python app with PostgreSQL +make kamal ARGS="py production accessory boot postgres" +``` + +Check which accessories a service has by looking at its config file: + +```bash +# Check py.yml for accessories +grep -A 10 "accessories:" config/deploy/py.yml +``` + +#### Step 3: Deploy the Application + +Now you can deploy the actual application: + +```bash +# Deploy Python API +make kamal ARGS="py production deploy" + +# Deploy TypeScript web app +make kamal ARGS="ts-web production deploy" +``` + +#### Complete First-Time Workflow + +For a fresh server with multiple services: + +```bash +# 1. Bootstrap server (only needed once) +make kamal ARGS="py production setup" + +# 2. Boot database for Python app +make kamal ARGS="py production accessory boot postgres" + +# 3. Deploy Python app +make kamal ARGS="py production deploy" + +# 4. Deploy web app (no setup needed - server already bootstrapped) +make kamal ARGS="ts-web production deploy" +``` + +**Subsequent deployments** only need the deploy command: + +```bash +make kamal ARGS="py production deploy" +make kamal ARGS="ts-web production deploy" +``` + +### Build and Deploy + +```bash +# Deploy Python API +make kamal ARGS="py production deploy" + +# Deploy TypeScript web app +make kamal ARGS="ts-web production deploy" +``` + +The deploy process: +1. Builds Docker image locally +2. Pushes to container registry +3. Pulls image on server +4. Runs health checks +5. Switches traffic to new version +6. Removes old containers + +### Viewing Logs + +```bash +# View Python API logs +make kamal ARGS="py production logs" + +# View web app logs +make kamal ARGS="ts-web production logs" + +# Follow logs (live tail) +make kamal ARGS="py production logs --follow" + +# View specific number of lines +make kamal ARGS="py production logs --lines 100" +``` + +### Managing Services + +```bash +# Stop service +make kamal ARGS="py production stop" + +# Start service +make kamal ARGS="py production start" + +# Restart service +make kamal ARGS="py production restart" + +# Rollback to previous version +make kamal ARGS="py production rollback" +``` + +## Service Details + +### Python API Service + +**Config**: `config/deploy/py.yml` + +**Includes:** +- FastAPI application (port 8000) +- PostgreSQL database (accessory) +- Google OAuth environment variables +- Health check on `/health` +- SSL via Traefik + +**Environment Variables:** +Automatically loaded from 1Password vault `-production`: +- `GOOGLE_O_AUTH_CLIENT_ID` +- `GOOGLE_O_AUTH_CLIENT_SECRET` +- `POSTGRES_URL` (generated by database accessory) + +**Database Management:** + +```bash +# Start database only +make kamal ARGS="py production accessory boot postgres" + +# Stop database +make kamal ARGS="py production accessory stop postgres" + +# View database logs +make kamal ARGS="py production accessory logs postgres" + +# Execute command in database +make kamal ARGS="py production accessory exec postgres psql -U " +``` + +**Deploying:** +```bash +# First time (includes database setup) +make kamal ARGS="py production setup" +make kamal ARGS="py production accessory boot postgres" +make kamal ARGS="py production deploy" + +# Subsequent deployments +make kamal ARGS="py production deploy" +``` + +The deploy automatically runs Alembic migrations via the startup script. + +### TypeScript Web Service + +**Config**: `config/deploy/ts-web.yml` + +**Includes:** +- Vite React application (port 5173) +- Static asset serving +- Health check on `/` +- SSL via Traefik + +**Environment Variables:** +Build-time environment variables (if needed): +- `VITE_API_URL` - Backend API URL + +**Deploying:** +```bash +# First time +make kamal ARGS="ts-web production setup" +make kamal ARGS="ts-web production deploy" + +# Subsequent deployments +make kamal ARGS="ts-web production deploy" +``` + +## Common Operations + +### Executing Commands + +Run commands inside containers: + +```bash +# Python: Run database migration manually +make kamal ARGS="py production app exec ./bin/db.sh migrate" + +# Python: Open Python shell +make kamal ARGS="py production app exec uv run python" + +# Python: Check environment +make kamal ARGS="py production app exec env" + +# Database: Connect to PostgreSQL +make kamal ARGS="py production accessory exec postgres psql -U " +``` + +### Viewing Status + +```bash +# Show running containers +make kamal ARGS="py production ps" + +# Show all details (containers, health, etc.) +make kamal ARGS="py production details" + +# Show audit log (recent deployments) +make kamal ARGS="py production audit" +``` + +### Image Management + +```bash +# List images on server +make kamal ARGS="py production images" + +# Remove old images (free up space) +make kamal ARGS="py production prune all" +``` + +### Config Validation + +```bash +# Validate config file before deploying +kamal config validate -c config/deploy/py.yml + +# Show rendered config (with secrets redacted) +kamal config show -c config/deploy/py.yml +``` + +## Troubleshooting + +### "Traefik container not found" or "No proxy running" + +You forgot to bootstrap the server. Run setup first: + +```bash +make kamal ARGS="py production setup" +``` + +This must be run once before any deployments on a fresh server. + +### "Database connection refused" (Python app) + +You forgot to boot the PostgreSQL accessory. Boot it before deploying: + +```bash +# Boot the database +make kamal ARGS="py production accessory boot postgres" + +# Then deploy +make kamal ARGS="py production deploy" +``` + +### "Container won't start" + +Check logs: +```bash +make kamal ARGS="py production logs" +``` + +Common issues: +- **Missing environment variable**: Check 1Password vault has required secrets +- **Health check failing**: Check health endpoint returns 200 OK +- **Port conflict**: Ensure no other containers using the port +- **Accessory not running**: Boot accessories first + +### "Health check failed" + +The deployment will fail if health checks don't pass. To debug: + +```bash +# SSH to server and check container +bin/ssh production +docker ps -a # See if container is running +docker logs # View container logs + +# Check health endpoint manually +curl http://localhost:8000/health +``` + +### "Cannot pull image" + +Authentication issues with container registry: + +```bash +# Check Docker Hub credentials in 1Password +bin/vault read DOCKER_HUB_USERNAME +bin/vault read DOCKER_HUB_PASSWORD + +# Re-authenticate on server +bin/ssh production +docker login -u USERNAME -p PASSWORD +``` + +### "SSH connection failed" + +Check SSH key and server IP: + +```bash +# Verify infrastructure outputs +make iac production output server_ip +make iac production output -raw ssh_private_key + +# Test SSH manually +bin/ssh production +``` + +### "SSL certificate not working" + +Traefik handles SSL via Let's Encrypt. Common issues: + +```bash +# Check Traefik logs +bin/ssh production +docker logs + +# Verify DNS points to server +dig yourdomain.com # Should return SERVER_IP + +# Check Traefik dashboard (if enabled) +curl http://localhost:8080/dashboard/ +``` + +**Note**: Let's Encrypt requires: +- Domain must resolve to server IP +- Port 80/443 must be accessible +- Valid email address in Kamal config + +## Best Practices + +### 1. Test Locally First + +Before deploying: +```bash +# Build and test locally +make build +make test +make check +``` + +### 2. Use Staging + +Always test on staging before production: +```bash +# Deploy to staging +make kamal ARGS="py staging deploy" + +# Test thoroughly + +# Deploy to production +make kamal ARGS="py production deploy" +``` + +### 3. Monitor Deployments + +Watch logs during deployment: +```bash +# In one terminal +make kamal ARGS="py production deploy" + +# In another terminal +make kamal ARGS="py production logs --follow" +``` + +### 4. Keep Images Small + +Optimize Dockerfiles: +- Use multi-stage builds +- Minimize layers +- Remove build dependencies in final image + +### 5. Regular Cleanup + +Remove old images to save disk space: +```bash +# Weekly or after major deployments +make kamal ARGS="py production prune all" +make kamal ARGS="ts-web production prune all" +``` + +### 6. Backup Databases + +Before major updates: +```bash +# Backup PostgreSQL +bin/ssh production +docker exec pg_dump -U > backup.sql +``` + +## Next Steps + +After successful deployment: + +1. **Verify services**: Visit your domain (https://yourdomain.com) +2. **Check logs**: `make kamal ARGS=" production logs"` +3. **Monitor resources**: `bin/ssh production && docker stats` +4. **Set up monitoring**: Consider Uptime Robot, Sentry, etc. +5. **Configure backups**: Set up automated database backups + +For local development workflows, see [Local Development Guide](../development/LOCAL.md). diff --git a/docs/development/LOCAL.md b/docs/development/LOCAL.md new file mode 100644 index 0000000..45f4e0f --- /dev/null +++ b/docs/development/LOCAL.md @@ -0,0 +1,485 @@ +# Local Development Guide + +This guide explains how to set up and use the local development environment for the generic template. + +## Overview + +The template supports local development with: +- **Multi-project tmux sessions** - All services in one terminal +- **Hot-reload** - Python and TypeScript automatically reload on changes +- **Local databases** - PostgreSQL in Docker containers +- **1Password integration** - Secrets loaded from vaults +- **Branding sync** - Shared CSS and assets across projects + +## Quick Start + +```bash +# Install all dependencies +make install + +# Build styles +make styles + +# Run all dev servers in tmux +make dev +``` + +This starts: +- Python FastAPI on `http://localhost:8000` +- TypeScript Vite web app on `http://localhost:5173` +- TypeScript Express API on `http://localhost:3001` +- PostgreSQL on `localhost:5432` (Python only) + +## Prerequisites + +### Required Tools + +**All Projects:** +- **1Password CLI** (`op`) - For secrets management +- **Make** - Command runner + +**Python:** +- **Python 3.12** +- **uv** - Python package manager +- **Docker** - For PostgreSQL container + +**TypeScript:** +- **Node.js 20.x** +- **pnpm** - Installed automatically by project + +### Install Prerequisites + +```bash +# macOS +brew install --cask 1password-cli +brew install python@3.12 +brew install --cask docker + +# Install uv (Python package manager) +curl -LsSf https://astral.sh/uv/install.sh | sh + +# Node.js (via nvm recommended) +nvm install 20 +nvm use 20 +``` + +## Development Workflows + +### Multi-Project Development (Recommended) + +Run all projects simultaneously in a tmux session: + +```bash +# Start all dev servers +make dev +``` + +This creates a tmux session with: +- **Pane 1**: Python development server +- **Pane 2**: TypeScript development servers (web + api) + +**Tmux Navigation:** +- `Ctrl-b %` - Split pane vertically +- `Ctrl-b "` - Split pane horizontally +- `Ctrl-b arrow` - Navigate between panes +- `Ctrl-b d` - Detach from session +- `tmux attach -t ` - Reattach to session + +**Stopping:** +```bash +# Kill the tmux session +make dev ARGS="--kill" + +# Or from inside tmux +Ctrl-b :kill-session +``` + +### Single Project Development + +Run projects individually: + +```bash +# Python only +make dev-py + +# TypeScript only +make dev-ts +``` + +Or use project-specific commands: + +```bash +# Python development +cd py/ +make dev + +# TypeScript development +cd ts/ +make dev +``` + +## TypeScript Development + +### Initial Setup + +```bash +cd ts/ + +# Install dependencies +make install + +# Setup branding asset symlinks +make styles + +# Start development servers +make dev +``` + +### Development Servers + +```bash +# All apps (web + api) +make dev +``` + +This starts: +- Vite web app on `http://localhost:5173` +- Express API on `http://localhost:3001` + +Both with: +- Hot module replacement +- TypeScript compilation +- Auto-restart on changes + +### Code Quality + +```bash +# Format code +make fmt + +# Check formatting +make fmt-check + +# Type checking +make types + +# Run tests +make test + +# Run all checks +make check +``` + +### Styles and Assets + +```bash +# Create asset symlinks +make styles + +# Clean symlinks +make clean +``` + +This creates symlinks in `ts/apps/web/public/` pointing to `branding/assets/`: +- `favicon.ico` → `branding/assets/favicon.ico` +- `icon.svg` → `branding/assets/icon.svg` +- etc. + +**Note**: Symlinks are used in development. For Docker builds, assets are copied instead. + +### Project Structure + +``` +ts/ +├── apps/ +│ ├── web/ # Vite React app +│ │ ├── src/ # Source code +│ │ ├── public/ # Static files (symlinks to branding) +│ │ └── package.json +│ └── api/ # Express API +│ ├── src/ # Source code +│ ├── tests/ # Test files +│ └── package.json +├── packages/ +│ ├── typescript-config/ # Shared TypeScript configs +│ └── http-api/ # Shared HTTP API types +├── bin/ +│ └── styles.sh # Branding asset setup +├── package.json # Root package.json +├── pnpm-workspace.yaml # pnpm workspace config +└── turbo.json # Turbo monorepo config +``` + +### Common Tasks + +**Add a new page (web app):** +1. Create component in `apps/web/src/pages/YourPage.tsx` +2. Add route to your router +3. Test: `http://localhost:5173/your-page` + +**Add an API endpoint:** +1. Create route in `apps/api/src/routes/` +2. Register in `apps/api/src/index.ts` +3. Test: `curl http://localhost:3001/your-endpoint` + +**Add a dependency:** +```bash +# To web app +cd apps/web +pnpm add package-name + +# To API +cd apps/api +pnpm add package-name + +# To root (dev dependency) +pnpm add -D -w package-name +``` + +## Python Development + +### Initial Setup + +```bash +cd py/ + +# Install dependencies +make install + +# Build styles +make styles + +# Start local PostgreSQL +make db up + +# Run migrations +make db migrate +``` + +### Development Server + +```bash +# With 1Password vault (recommended) +make dev + +# Without vault (requires manual env vars) +make run +``` + +Server runs on `http://localhost:8000` with hot-reload. + +### Database Management + +**Starting Database:** +```bash +# Start PostgreSQL container +make db up + +# Check status +make db status + +# View connection URL +make db endpoint +``` + +**Migrations:** +```bash +# Run all pending migrations +make db migrate + +# Create new migration (auto-detect changes) +make db prepare "Add user table" + +# Create manual migration +MANUAL=1 make db prepare "Custom data migration" +``` + +**Database Access:** +```bash +# Connect via psql +make db connect + +# Run SQL query +make db connect +> SELECT * FROM users; + +# View migration history +uv run alembic history +``` + +**Stopping Database:** +```bash +# Stop container (keeps data) +docker stop + +# Remove container and volumes (deletes data) +make db down +``` + +## Branding and Styles + +Shared branding assets are in `branding/`: + +``` +branding/ +├── assets/ # Icons, favicons +│ ├── favicon.ico +│ ├── icon.svg +│ ├── icon.png +│ └── favicon.png +├── styles/ # CSS source files +│ └── global.css # Tailwind base + custom CSS +└── tailwind.config.js # Shared Tailwind config +``` + +### Building Styles + +```bash +# From project root +make styles # Build styles for all projects +make styles-py # Build Python styles only +make styles-ts # Build TypeScript styles only + +# Or from individual projects +cd py/ && make styles +cd ts/ && make styles +``` + +**Python**: Copies assets and compiles Tailwind CSS to `py/static/css/main.css` + +**TypeScript**: Creates symlinks from `ts/apps/web/public/` to `branding/assets/` + +## Environment Variables + +### Development Environment + +For local development, secrets can be loaded from 1Password or set manually. + +**Python** (via `bin/app/dev.sh`): +```bash +# Example environment variables +GOOGLE_O_AUTH_CLIENT_ID +GOOGLE_O_AUTH_CLIENT_SECRET +POSTGRES_URL # Auto-set by db commands +``` + +**TypeScript**: +```bash +# Web app can use .env.local or environment variables +VITE_API_URL=http://localhost:3001 + +# API can use environment variables for config +PORT=3001 +``` + +## Testing + +### Python Tests + +```bash +cd py/ + +# Run all tests +make test + +# Run specific test file +uv run pytest tests/test_auth.py + +# Run with coverage +uv run pytest --cov=src tests/ + +# Run with verbose output +uv run pytest -v tests/ +``` + +### TypeScript Tests + +```bash +cd ts/ + +# Run all tests +make test + +# Run specific test +pnpm test your-test.spec.ts + +# Run with watch mode +cd apps/api +pnpm test --watch +``` + +## Troubleshooting + +### "Module not found" (Python) + +```bash +cd py/ +make install # Reinstall dependencies +``` + +### "Command not found: pnpm" (TypeScript) + +```bash +cd ts/ +make install # Installs pnpm and dependencies +``` + +### "Database connection refused" (Python) + +```bash +# Check database is running +make db status + +# Start database +make db up + +# Check connection URL +make db endpoint +``` + +### "Port already in use" + +```bash +# Python (8000) +lsof -i :8000 +kill -9 PID + +# TypeScript Web (5173) +lsof -i :5173 +kill -9 PID + +# TypeScript API (3001) +lsof -i :3001 +kill -9 PID + +# PostgreSQL (5432) +lsof -i :5432 +make db down +``` + +### "Styles not loading" + +```bash +# Rebuild styles +make styles + +# Python: Check static/css/main.css exists +ls py/static/css/ + +# TypeScript: Check symlinks exist +ls -la ts/apps/web/public/ +``` + +## Next Steps + +After setting up local development: + +1. **Make changes** to your application +2. **Test locally** with `make dev` +3. **Run checks** with `make check` +4. **Commit changes** to git +5. **Deploy** to staging/production (see deployment guides) + +For deployment workflows, see: +- [Infrastructure Setup](../setup/INFRASTRUCTURE.md) +- [Deployment Guide](../deployment/KAMAL.md) diff --git a/docs/setup/WALKTHROUGH.md b/docs/setup/WALKTHROUGH.md new file mode 100644 index 0000000..55f392e --- /dev/null +++ b/docs/setup/WALKTHROUGH.md @@ -0,0 +1,505 @@ +# Complete Setup Walkthrough + +This guide walks you through the complete setup process for deploying the generic template from scratch. + +## Overview + +The generic template is an **extensible Docker-based deployment framework** for different types of applications. It provides: + +- **Multiple application stacks** - TypeScript (Vite + Express) and Python (FastAPI) +- **Container-first deployment** - Portable, reproducible builds with Docker +- **Infrastructure as Code** - Terraform for server provisioning +- **Automated secrets management** - 1Password integration +- **Zero-downtime deployments** - Kamal orchestration + +This walkthrough will guide you through: +1. Prerequisites and account setup +2. 1Password vault configuration +3. Terraform Cloud setup +4. Infrastructure deployment +5. Application deployment + +**Time estimate**: 1-2 hours for first-time setup + +## Prerequisites + +### Required Accounts + +1. **1Password account** with CLI installed (`op`) + - Individual or Teams account + - CLI tool: https://1password.com/downloads/command-line/ + +2. **Terraform Cloud account** (free tier) + - Sign up at: https://app.terraform.io/ + +3. **Digital Ocean account** with API token + - Sign up at: https://www.digitalocean.com/ + - Create API token in Settings → API + +4. **Cloudflare account** managing your DNS zone + - Sign up at: https://www.cloudflare.com/ + - Add your domain to Cloudflare + +5. **Docker Hub account** (or GitHub Container Registry) + - Sign up at: https://hub.docker.com/ + - Create access token in Settings → Security + +### Required Tools + +Install these tools on your development machine: + +```bash +# macOS +brew install --cask 1password-cli +brew install terraform +gem install kamal + +# Verify installations +op --version +terraform --version +kamal version +``` + +### Domain Setup + +You need a domain name managed by Cloudflare: + +1. Register a domain (NameCheap, Google Domains, etc.) +2. Point nameservers to Cloudflare +3. Verify domain is active in Cloudflare dashboard + +## Step 1: Configure Project Settings + +### 1.1 Edit `.env.project` + +This is the single source of truth for your project configuration: + +```bash +# Edit the file +vim .env.project +``` + +Set these values: + +```bash +# Must be globally unique (becomes your TFC org name) +PROJECT_NAME=yourname-generic + +# Your Cloudflare domain +DNS_ROOT_ZONE=yourdomain.com + +# Services and subdomains (format: service:subdomain) +# Empty subdomain means root domain +SERVICES="py:app,ts-web:" + +# 1Password vault for cloud credentials +CLOUD_VAULT=cloud-providers +``` + +**Important**: +- `PROJECT_NAME` must be globally unique (TFC org name = `${PROJECT_NAME}-org`) +- For `SERVICES`, format is `service:subdomain` + - `py:app` → deploys Python API to `app.yourdomain.com` + - `ts-web:` → deploys web app to `yourdomain.com` (root) + +### 1.2 Verify Configuration + +```bash +# Source the config +source .env.project + +# Verify variables are set +echo $PROJECT_NAME +echo $DNS_ROOT_ZONE +echo $SERVICES +``` + +## Step 2: 1Password Setup + +### 2.1 Create Cloud Providers Vault + +Create a vault for cloud provider credentials: + +```bash +# Create vault +op vault create "cloud-providers" + +# Or use existing vault and update .env.project +``` + +### 2.2 Add Cloud Provider Credentials + +Add credentials to the cloud-providers vault: + +**Terraform Cloud API Token:** +```bash +op item create \ + --category=login \ + --title=TERRAFORM_CLOUD_API_TOKEN \ + --vault=cloud-providers \ + credential= +``` + +Get token from: https://app.terraform.io/app/settings/tokens + +**Docker Hub:** +```bash +op item create \ + --category=login \ + --title=DOCKER_HUB_LOGIN \ + --vault=cloud-providers \ + username= \ + credential= +``` + +**Digital Ocean:** +```bash +op item create \ + --category=login \ + --title=DO_API_TOKEN \ + --vault=cloud-providers \ + credential= +``` + +**Cloudflare:** +```bash +op item create \ + --category=login \ + --title=CLOUDFLARE_DNS_API_TOKEN \ + --vault=cloud-providers \ + credential= +``` + +### 2.3 Create Stage-Specific Vaults + +Create vaults for each deployment stage: + +```bash +# Production vault +op vault create "-production" + +# Staging vault (optional) +op vault create "-staging" + +# Development vault (for local dev) +op vault create "-development" +``` + +### 2.4 Add Application Secrets + +Add application-specific secrets to stage vaults: + +**Google OAuth (for Python app):** +```bash +op item create \ + --category=login \ + --title=GOOGLE_O_AUTH_CLIENT \ + --vault=-production \ + username= \ + credential= +``` + +Get credentials from: https://console.cloud.google.com/apis/credentials + +**Other secrets as needed:** +- API keys +- Third-party service tokens +- Application-specific config + +### 2.5 Verify 1Password Setup + +```bash +# Test reading credentials +bin/vault read DOCKER_HUB_USERNAME +bin/vault read DO_API_TOKEN + +# Should print values without errors +``` + +See [ONE_PASSWORD.md](./ONE_PASSWORD.md) for detailed vault structure. + +## Step 3: Terraform Cloud Setup + +### 3.1 Create Organization and Workspaces + +```bash +# Create TFC org and workspaces +make tfc up +``` + +This creates: +- TFC organization: `-org` +- Workspaces: + - `container-registry` - Docker Hub repos + - `production` - Production infrastructure + - `staging` - Staging infrastructure (optional) + +### 3.2 Verify TFC Setup + +```bash +# Check status +make tfc status + +# Should show: +# ✓ Organization exists +# ✓ Workspaces created +``` + +See [TERRAFORM_CLOUD.md](./TERRAFORM_CLOUD.md) for details. + +## Step 4: Infrastructure Deployment + +### 4.1 Deploy Container Registry + +First, create Docker Hub repositories: + +```bash +# Initialize Terraform +make iac container-registry init + +# Review plan +make iac container-registry plan + +# Deploy +make iac container-registry apply +``` + +This creates Docker Hub repos for each service. + +### 4.2 Deploy Production Infrastructure + +```bash +# Initialize Terraform +make iac production init + +# Review plan +make iac production plan + +# Deploy infrastructure +make iac production apply +``` + +This creates: +- Digital Ocean droplet (server) +- SSH keys for access +- Cloudflare DNS records +- Firewall rules + +**Note**: First apply may take 5-10 minutes. + +### 4.3 Verify Infrastructure + +```bash +# Get infrastructure outputs +make iac production output + +# Should show: +# server_ip = "xxx.xxx.xxx.xxx" +# ssh_connect_command = "ssh ..." +# dns_records = { ... } + +# Test SSH access +bin/ssh production +# Should connect to server +``` + +See [INFRASTRUCTURE.md](./INFRASTRUCTURE.md) for details. + +## Step 5: Application Deployment + +### 5.1 Bootstrap Server + +**First time only** - setup Kamal infrastructure on server: + +```bash +# Bootstrap server (installs Traefik, Docker networks, etc.) +make kamal ARGS="py production setup" +``` + +### 5.2 Deploy Python API + +If using the Python stack: + +```bash +# Boot PostgreSQL database +make kamal ARGS="py production accessory boot postgres" + +# Deploy Python app +make kamal ARGS="py production deploy" + +# Verify +curl https://app.yourdomain.com/health +# Should return: {"status": "ok"} +``` + +### 5.3 Deploy TypeScript Web App + +If using the TypeScript stack: + +```bash +# Deploy web app +make kamal ARGS="ts-web production deploy" + +# Verify +curl https://yourdomain.com +# Should return HTML +``` + +### 5.4 Verify Deployments + +```bash +# Check running containers +make kamal ARGS="py production ps" + +# View logs +make kamal ARGS="py production logs" +make kamal ARGS="ts-web production logs" + +# Check SSL certificates +curl -I https://yourdomain.com +# Should show: HTTP/2 200 +``` + +See [KAMAL.md](../deployment/KAMAL.md) for deployment details. + +## Step 6: Verification and Testing + +### 6.1 Test Services + +Visit your deployed services: + +- **Web app**: https://yourdomain.com +- **Python API**: https://app.yourdomain.com +- **API health**: https://app.yourdomain.com/health + +### 6.2 Monitor Resources + +```bash +# SSH to server +bin/ssh production + +# Check resource usage +docker stats + +# Should show all containers running with reasonable CPU/RAM +``` + +### 6.3 Test OAuth (if enabled) + +For Python app with Google OAuth: + +1. Visit: https://app.yourdomain.com/auth/google +2. Should redirect to Google login +3. After auth, redirects back to app + +## Common Issues + +### "TFC organization already exists" + +Someone else is using that project name. Change `PROJECT_NAME` in `.env.project`. + +### "DNS records not propagating" + +Wait 5-10 minutes for Cloudflare DNS propagation. Check with: +```bash +dig yourdomain.com +``` + +### "Cannot connect to server via SSH" + +Check firewall allows your IP: +```bash +# Get your IP +curl ifconfig.me + +# Add to firewall in Digital Ocean dashboard +``` + +### "Container won't start" + +Check logs: +```bash +make kamal ARGS="py production logs" +``` + +Common issues: +- Missing environment variables in 1Password +- Database not booted (for Python) +- Health check endpoint not responding + +### "SSL certificate not issued" + +Ensure: +- DNS points to server (check with `dig`) +- Port 80/443 open in firewall +- Valid email in Kamal config + +## Next Steps + +After successful deployment: + +1. **Set up monitoring** + - Uptime Robot for health checks + - Sentry for error tracking + - Log aggregation (if needed) + +2. **Configure backups** + - Database backups (for Python stack) + - Config backups (1Password handles this) + +3. **Set up CI/CD** (optional) + - GitHub Actions for automated deploys + - Run tests before deployment + +4. **Scale as needed** + - Upgrade droplet size in Terraform + - Add more servers for load balancing + +5. **Customize your application** + - Modify Python/TypeScript code + - Add new features + - Deploy updates with `make kamal ARGS=" production deploy"` + +## Development Workflow + +For local development: + +```bash +# Install dependencies +make install + +# Build styles +make styles + +# Run all dev servers +make dev +``` + +See [LOCAL.md](../development/LOCAL.md) for development guide. + +## Resources + +- [1Password Setup](./ONE_PASSWORD.md) +- [Terraform Cloud Setup](./TERRAFORM_CLOUD.md) +- [Infrastructure Guide](./INFRASTRUCTURE.md) +- [Deployment Guide](../deployment/KAMAL.md) +- [Development Guide](../development/LOCAL.md) +- [DevOps Philosophy](../dev-ops/index.md) + +## Support + +For issues: +- Check troubleshooting sections in each guide +- Review Kamal logs: `make kamal ARGS=" production logs"` +- Check infrastructure: `make iac production output` +- Verify 1Password: `bin/vault read ` + +## Cost Estimate + +Running this stack: +- **Digital Ocean Droplet** (s-1vcpu-1gb): $6/month +- **Cloudflare DNS**: Free +- **Terraform Cloud**: Free (up to 500 resources) +- **1Password**: Varies by plan + +**Total minimum**: ~$6-15/month diff --git a/hcp.yaml b/hcp.yaml deleted file mode 100644 index a660242..0000000 --- a/hcp.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# HCP Vault configuration -vault: - # Project ID from HCP - # TODO: Replace with your actual project ID - project_id: a9087abb-c3b8-405c-b77f-f45e9bed9d09 \ No newline at end of file diff --git a/iac/Makefile b/iac/Makefile deleted file mode 100644 index f9a5d05..0000000 --- a/iac/Makefile +++ /dev/null @@ -1,106 +0,0 @@ -.PHONY: help -help: ## Show this help message - @echo 'Usage: make [target]' - @echo '' - @echo 'Available targets:' - @awk 'BEGIN {FS = ":.*##"; printf "\n"} /^[a-zA-Z_-]+:.*?##/ { printf " %-20s %s\n", $$1, $$2 }' $(MAKEFILE_LIST) - -ENV ?= production - -.PHONY: init -init: ## Initialize Terraform (use ENV=production|registry) - @echo "Initializing Terraform for $(ENV) environment..." - @cd envs/$(ENV) && terraform init - -.PHONY: init-all -init-all: ## Initialize all Terraform environments - @for env in production registry; do \ - echo "Initializing $$env environment..."; \ - cd envs/$$env && terraform init && cd ../..; \ - done - -.PHONY: plan -plan: ## Plan Terraform changes (use ENV=production|registry) - @echo "Planning Terraform changes for $(ENV) environment..." - @cd envs/$(ENV) && terraform plan - -.PHONY: apply -apply: ## Apply Terraform changes (use ENV=production|registry) - @echo "Applying Terraform changes for $(ENV) environment..." - @cd envs/$(ENV) && terraform apply - -.PHONY: destroy -destroy: ## Destroy Terraform resources (use ENV=production|registry) - @echo "WARNING: This will destroy all resources in $(ENV) environment!" - @echo "Press Ctrl+C to cancel, or Enter to continue..." - @read confirm - @cd envs/$(ENV) && terraform destroy - -.PHONY: fmt -fmt: ## Format Terraform files - @echo "Formatting Terraform files..." - @terraform fmt -recursive . - -.PHONY: fmt-check -fmt-check: ## Check Terraform formatting - @echo "Checking Terraform formatting..." - @terraform fmt -recursive -check . - -.PHONY: validate -validate: ## Validate Terraform configuration - @echo "Validating Terraform configuration for $(ENV) environment..." - @cd envs/$(ENV) && terraform validate - -.PHONY: validate-all -validate-all: ## Validate all Terraform environments - @for env in production registry; do \ - echo "Validating $$env environment..."; \ - cd envs/$$env && terraform validate && cd ../..; \ - done - -.PHONY: output -output: ## Show Terraform outputs (use ENV=production|registry) - @cd envs/$(ENV) && terraform output - -.PHONY: refresh -refresh: ## Refresh Terraform state (use ENV=production|registry) - @echo "Refreshing Terraform state for $(ENV) environment..." - @cd envs/$(ENV) && terraform refresh - -.PHONY: state-list -state-list: ## List Terraform state resources (use ENV=production|registry) - @cd envs/$(ENV) && terraform state list - -.PHONY: console -console: ## Open Terraform console (use ENV=production|registry) - @cd envs/$(ENV) && terraform console - -.PHONY: graph -graph: ## Generate Terraform dependency graph (use ENV=production|registry) - @cd envs/$(ENV) && terraform graph | dot -Tpng > terraform-graph-$(ENV).png - @echo "Graph saved to envs/$(ENV)/terraform-graph-$(ENV).png" - -.PHONY: clean -clean: ## Clean Terraform files - @echo "Cleaning Terraform files..." - @find . -type f -name "*.tfplan" -delete - @find . -type f -name "*.tfstate.backup" -delete - @find . -type f -name "terraform-graph-*.png" -delete - @find . -type d -name ".terraform" -prune -exec rm -rf {} + - -.PHONY: docs -docs: ## Generate Terraform documentation - @echo "Generating Terraform documentation..." - @terraform-docs markdown table --output-file README.md --output-mode inject ./modules/aws - @terraform-docs markdown table --output-file README.md --output-mode inject ./modules/digitalocean - @for env in production registry; do \ - terraform-docs markdown table --output-file README.md --output-mode inject ./envs/$$env; \ - done - -.PHONY: cost -cost: ## Estimate costs with Infracost (use ENV=production|registry) - @infracost breakdown --path envs/$(ENV) - -.PHONY: security -security: ## Run security scan with tfsec - @tfsec . \ No newline at end of file diff --git a/iac/envs/common/main.tf b/iac/envs/common/main.tf deleted file mode 100644 index b9709f6..0000000 --- a/iac/envs/common/main.tf +++ /dev/null @@ -1,49 +0,0 @@ -# DigitalOcean Infrastructure Module - -# random prefix for the project -resource "random_string" "project_prefix" { - length = 4 - special = false -} - -# Project -module "digitalocean_project" { - source = "../../modules/digitalocean/project" - - name = "${local.project_name}-${var.environment}-${random_string.project_prefix.result}-project" - description = "Project for ${local.project_name} ${var.environment}" - environment = var.environment - resources = [module.digitalocean_droplet.urn] -} - - -# SSH key which will get access to the droplet -module "digitalocean_ssh_key" { - source = "../../modules/digitalocean/ssh_key" - - name = "${local.project_name}-${var.environment}-${random_string.project_prefix.result}-ssh-key" -} - -# Droplet -module "digitalocean_droplet" { - source = "../../modules/digitalocean/droplet" - - name = "${local.project_name}-${var.environment}-${random_string.project_prefix.result}-droplet" - - region = var.digitalocean.droplet.region - tags = ["${local.project_name}-${var.environment}"] - - # SSH configuration - ssh_keys = [module.digitalocean_ssh_key.id] -} - -# Cloudflare DNS -module "cloudflare_dns" { - source = "../../modules/cloudflare/dns" - - zone_id = local.cloudflare_zone_id - droplet_ip = module.digitalocean_droplet.ipv4_address - domain_slugs = ["ts.${local.project_name}", "py.${local.project_name}", "${local.project_name}"] - ttl = var.cloudflare.ttl - proxied = var.cloudflare.proxied -} \ No newline at end of file diff --git a/iac/envs/common/outputs.tf b/iac/envs/common/outputs.tf deleted file mode 100644 index 45ea14c..0000000 --- a/iac/envs/common/outputs.tf +++ /dev/null @@ -1,57 +0,0 @@ -# Droplet outputs -output "digitalocean_droplet_ip" { - description = "IP address of the DigitalOcean droplet" - value = module.digitalocean_droplet.ipv4_address -} - -output "digitalocean_droplet_id" { - description = "ID of the DigitalOcean droplet" - value = module.digitalocean_droplet.id -} - -# SSH Key outputs -output "digitalocean_ssh_key_id" { - description = "ID of the SSH key" - value = module.digitalocean_ssh_key.id -} - -output "digitalocean_ssh_key_fingerprint" { - description = "Fingerprint of the SSH key" - value = module.digitalocean_ssh_key.fingerprint -} - -output "digitalocean_ssh_private_key" { - description = "The generated private SSH key (sensitive)" - value = module.digitalocean_ssh_key.private_key_openssh - sensitive = true -} - -output "digitalocean_ssh_public_key" { - description = "The public SSH key" - value = module.digitalocean_ssh_key.public_key_openssh -} - -# Cloudflare outputs -output "cloudflare_zone_id" { - description = "The Cloudflare Zone ID" - value = module.cloudflare_dns.zone_id -} - -output "cloudflare_dns_records" { - description = "All Cloudflare DNS records created" - value = { - root = { - id = module.cloudflare_dns.subdomain_records["ts.${local.project_name}"].id - hostname = module.cloudflare_dns.subdomain_records["ts.${local.project_name}"].hostname - } - py = { - id = module.cloudflare_dns.subdomain_records["py.${local.project_name}"].id - hostname = module.cloudflare_dns.subdomain_records["py.${local.project_name}"].hostname - } - project = { - id = module.cloudflare_dns.subdomain_records["${local.project_name}"].id - hostname = module.cloudflare_dns.subdomain_records["${local.project_name}"].hostname - } - subdomains = module.cloudflare_dns.subdomain_records - } -} \ No newline at end of file diff --git a/iac/envs/container-registry/main.tf b/iac/envs/container-registry/main.tf deleted file mode 100644 index faad3bd..0000000 --- a/iac/envs/container-registry/main.tf +++ /dev/null @@ -1,10 +0,0 @@ -module "registry" { - source = "../../modules/digitalocean/container-registry" - - # TODO: this naming should be abstracted to the module - name = "${lower(random_string.project_prefix.result)}${local.project_name}registry" - region = "nyc3" - subscription_tier = "basic" - repositories = var.repositories - lifecycle_policy = var.default_lifecycle_policy -} \ No newline at end of file diff --git a/iac/envs/container-registry/outputs.tf b/iac/envs/container-registry/outputs.tf deleted file mode 100644 index ee82266..0000000 --- a/iac/envs/container-registry/outputs.tf +++ /dev/null @@ -1,12 +0,0 @@ -# Container Registry outputs - -output "registry" { - description = "Registry details" - value = module.registry.registry -} - -output "registry_credentials" { - description = "Docker credentials for the registry" - value = module.registry.registry_credentials - sensitive = true -} \ No newline at end of file diff --git a/iac/envs/container-registry/terraform.tf b/iac/envs/container-registry/terraform.tf deleted file mode 100644 index 9170d4c..0000000 --- a/iac/envs/container-registry/terraform.tf +++ /dev/null @@ -1,10 +0,0 @@ -terraform { - cloud { - # TODO (replace with your own organization) - organization = "krondor-corp" - - workspaces { - name = "container-registry" - } - } -} \ No newline at end of file diff --git a/iac/envs/container-registry/variables.tf b/iac/envs/container-registry/variables.tf deleted file mode 100644 index d6b8e25..0000000 --- a/iac/envs/container-registry/variables.tf +++ /dev/null @@ -1,38 +0,0 @@ -resource "random_string" "project_prefix" { - length = 4 - special = false -} - -locals { - project_name = "generic" -} - -variable "default_lifecycle_policy" { - description = "Default lifecycle policy to apply to all repositories" - type = string - default = < "${var.docker_hub_username}/${repo.name}" + } + description = "Docker Hub repository URLs" +} + +output "registry_url" { + value = "docker.io" + description = "Docker Hub registry URL" +} diff --git a/iac/stages/container-registry/providers.tf b/iac/stages/container-registry/providers.tf new file mode 100644 index 0000000..4391f5a --- /dev/null +++ b/iac/stages/container-registry/providers.tf @@ -0,0 +1,11 @@ +# Provider configuration for production environment +terraform { + required_version = ">= 1.0" + + required_providers { + dockerhub = { + source = "artificialinc/dockerhub" + version = "~> 0.0.15" + } + } +} diff --git a/iac/stages/container-registry/terraform.tf b/iac/stages/container-registry/terraform.tf new file mode 100644 index 0000000..a950a83 --- /dev/null +++ b/iac/stages/container-registry/terraform.tf @@ -0,0 +1,9 @@ +terraform { + cloud { + organization = "krondor-generic-org" + + workspaces { + name = "krondor-generic-container-registry" + } + } +} diff --git a/iac/stages/container-registry/variables.tf b/iac/stages/container-registry/variables.tf new file mode 100644 index 0000000..08e8318 --- /dev/null +++ b/iac/stages/container-registry/variables.tf @@ -0,0 +1,27 @@ +variable "docker_hub_username" { + description = "Docker Hub username or organization name" + type = string + sensitive = false +} + +variable "docker_hub_password" { + description = "Docker Hub personal access token" + type = string + sensitive = true +} + +variable "project_name" { + description = "Project name for repository naming" + type = string +} + +variable "services" { + description = "Services configuration from .env.project (format: service:subdomain,service:subdomain)" + type = string +} + +variable "use_private_repos" { + description = "Whether to create private repositories" + type = bool + default = false +} diff --git a/iac/stages/production/.gitignore b/iac/stages/production/.gitignore new file mode 100644 index 0000000..e729079 --- /dev/null +++ b/iac/stages/production/.gitignore @@ -0,0 +1,29 @@ +# Terraform files +*.tfstate +*.tfstate.* +*.tfplan +.terraform/ +.terraform.lock.hcl + +# SSH keys - NEVER commit these! +ssh-keys/ +*.pem +*.key +id_rsa* + +# Local override files +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Sensitive variable files +*.auto.tfvars +terraform.tfvars + +# Crash log +crash.log +crash.*.log + +# Outputs +outputs.json \ No newline at end of file diff --git a/iac/stages/production/main.tf b/iac/stages/production/main.tf new file mode 100644 index 0000000..7a8f27a --- /dev/null +++ b/iac/stages/production/main.tf @@ -0,0 +1,23 @@ +# Production Infrastructure - Simplified Digital Ocean Droplet + +locals { + environment = "production" +} + +module "common" { + source = "../../modules/common" + + project_name = var.project_name + environment = local.environment + subdomains = var.subdomains + digitalocean = { + droplet = { + region = "nyc3" + } + } + cloudflare = { + ttl = 300 + proxied = false + dns_root_zone = var.dns_root_zone + } +} \ No newline at end of file diff --git a/iac/stages/production/outputs.tf b/iac/stages/production/outputs.tf new file mode 100644 index 0000000..a55cfc9 --- /dev/null +++ b/iac/stages/production/outputs.tf @@ -0,0 +1,11 @@ +output "ssh_private_key" { + description = "Private SSH key for accessing the droplet" + value = module.common.digitalocean_ssh_private_key + sensitive = true +} + +output "server_ip" { + description = "The IP address of the droplet" + value = module.common.digitalocean_droplet_ip +} + diff --git a/iac/stages/production/providers.tf b/iac/stages/production/providers.tf new file mode 100644 index 0000000..93f1958 --- /dev/null +++ b/iac/stages/production/providers.tf @@ -0,0 +1,15 @@ +# Provider configuration for production environment +terraform { + required_version = ">= 1.0" + + required_providers { + tls = { + source = "hashicorp/tls" + version = "~> 4.0" + } + local = { + source = "hashicorp/local" + version = "~> 2.0" + } + } +} \ No newline at end of file diff --git a/iac/stages/production/terraform.tf b/iac/stages/production/terraform.tf new file mode 100644 index 0000000..c05c7c4 --- /dev/null +++ b/iac/stages/production/terraform.tf @@ -0,0 +1,9 @@ +terraform { + cloud { + organization = "krondor-generic-org" + + workspaces { + name = "krondor-generic-production" + } + } +} diff --git a/iac/stages/production/variables.tf b/iac/stages/production/variables.tf new file mode 100644 index 0000000..1af2398 --- /dev/null +++ b/iac/stages/production/variables.tf @@ -0,0 +1,20 @@ +# Production environment variables +variable "project_name" { + description = "The name of the project" + type = string +} + +variable "services" { + description = "Services configuration from .env.project (format: service:subdomain,service:subdomain)" + type = string +} + +variable "dns_root_zone" { + description = "Root DNS zone for the project" + type = string +} + +variable "subdomains" { + description = "Comma-separated list of subdomains to create DNS records for (@ for root)" + type = string +} diff --git a/py/.env.example b/py/.env.example index 9a47a3a..ab86630 100644 --- a/py/.env.example +++ b/py/.env.example @@ -17,12 +17,15 @@ LISTEN_ADDRESS=0.0.0.0 LISTEN_PORT=8000 # Google OAuth credentials (optional, for SSO) -GOOGLE_CLIENT_ID=your-google-client-id -GOOGLE_CLIENT_SECRET=your-google-client-secret +GOOGLE_O_AUTH_CLIENT_ID=your-google-client-id +GOOGLE_O_AUTH_CLIENT_SECRET=your-google-client-secret # Auth redirect URI (defaults to {HOST_NAME}/auth/google/callback) AUTH_REDIRECT_URI=http://localhost:8000/auth/google/callback +# Marketing site URL (for linking back from login page, defaults to http://localhost:3000) +MARKETING_SITE_URL=http://localhost:3000 + # Debug mode DEBUG=True diff --git a/py/.gitignore b/py/.gitignore index 6db728a..eacecd0 100644 --- a/py/.gitignore +++ b/py/.gitignore @@ -2,4 +2,10 @@ __pycache__ .ruff_cache .pytest_cache -.mypy_cache \ No newline at end of file +.mypy_cache + +# Copied branding assets (source is in ../branding/assets/) +static/favicon.ico +static/favicon.png +static/icon.png +static/icon.svg \ No newline at end of file diff --git a/py/Dockerfile b/py/Dockerfile index a2aebcd..ce6c1d5 100644 --- a/py/Dockerfile +++ b/py/Dockerfile @@ -1,46 +1,48 @@ -# Use an official Python runtime as the base image -FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim +# NOTE: Run 'make styles-py' before building this image +# This Dockerfile expects pre-built static assets in py/static/ + +FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS app -# Set the working directory in the container WORKDIR /app # Install system dependencies RUN apt-get update && apt-get install -y \ gcc \ + make \ && rm -rf /var/lib/apt/lists/* -# Copy the bin scripts -COPY bin/ ./bin/ +# Copy the bin scripts from platform root (scripts expect to be 2 levels deep) +COPY bin/ ../bin/ +COPY py/bin/ ./bin/ -# Make sure the scripts are executable -RUN chmod +x ./bin/* +# Make sure the scripts are executable (including subdirectories) +RUN chmod +x ../bin/* && \ + find ./bin -type f -name "*.sh" -exec chmod +x {} \; # Copy the requirements file into the container -COPY uv.lock . -COPY pyproject.toml . +COPY py/uv.lock . +COPY py/pyproject.toml . # Install the Python dependencies -RUN /app/bin/install.sh -# TODO: add migration back in +RUN uv sync + # Copy the application code -COPY src/ ./src/ +COPY py/src/ ./src/ # Copy the alembic artifacts -COPY alembic/ ./alembic/ -COPY alembic.ini ./alembic.ini +COPY py/alembic/ ./alembic/ +COPY py/alembic.ini ./alembic.ini -# Copy the static assets -COPY static/ ./static/ +# Copy pre-built static assets (run 'make styles-py' first) +COPY py/static/ ./static/ # Copy the html templates -COPY templates/ ./templates/ +COPY py/templates/ ./templates/ -# NOTE: you must set the DATABASE_PATH environment variable to the path to the database file # Create a startup script -# TODO: add migration back in RUN echo '#!/bin/bash' > /app/start.sh && \ - echo '/app/bin/migrate.sh' >> /app/start.sh && \ - echo '/app/bin/run.sh' >> /app/start.sh && \ + echo '/app/bin/db.sh migrate' >> /app/start.sh && \ + echo '/app/bin/app/run.sh' >> /app/start.sh && \ chmod +x /app/start.sh # Expose the port the app runs on diff --git a/py/Makefile b/py/Makefile index 0f470cf..616ad58 100644 --- a/py/Makefile +++ b/py/Makefile @@ -3,23 +3,11 @@ help: ## Show this help message @echo 'Usage: make [target]' @echo '' @echo 'Available targets:' - @echo ' dev: Run development server' - @echo ' install: Install dependencies' - @echo ' build: Build the project' - @echo ' test: Run tests' - @echo ' lint: Run linter (ruff)' - @echo ' fmt: Format code (black)' - @echo ' fmt-check: Check code formatting' - @echo ' types: Check types (mypy/pyright)' - @echo ' check: Run all checks' - @echo ' docker-build: Build Docker image' - @echo ' clean: Clean build artifacts' - @echo ' tailwind: Build Tailwind CSS' - @echo ' tailwind-watch: Watch Tailwind CSS' + @awk 'BEGIN {FS = ":.*##"; printf "\n"} /^[a-zA-Z_-]+:.*?##/ { printf " %-20s %s\n", $$1, $$2 }' $(MAKEFILE_LIST) .PHONY: dev -dev: ## Run development server - @./bin/dev.sh +dev: styles ## Run development server (builds styles first) + @./bin/app/dev.sh .PHONY: install install: ## Install dependencies @@ -31,32 +19,36 @@ build: ## Build the project .PHONY: test test: ## Run tests - @./bin/test.sh + @./bin/ci/test.sh .PHONY: lint lint: ## Run linter (ruff) - @./bin/lint.sh + @./bin/ci/lint.sh .PHONY: fmt fmt: ## Format code (black) - @./bin/fmt.sh + @./bin/ci/fmt.sh .PHONY: fmt-check fmt-check: ## Check code formatting - @./bin/fmt.sh --check + @./bin/ci/fmt.sh --check .PHONY: types types: ## Check types (mypy/pyright) - @./bin/types.sh + @./bin/ci/types.sh + +# Database management - pass all arguments after 'db' to the postgres script +.PHONY: db +db: ## Database management - pass all arguments after 'db' to the postgres script + @./bin/db.sh $(filter-out $@,$(MAKECMDGOALS)) + +# Catch additional arguments to db command +%: + @: .PHONY: check check: fmt-check lint types test ## Run all checks -# TODO (config): target with configurable image name -.PHONY: docker-build -docker-build: ## Build Docker image - @docker build -t py-app . - .PHONY: clean clean: ## Clean build artifacts @echo "Cleaning Python build artifacts..." @@ -69,11 +61,16 @@ clean: ## Clean build artifacts @find . -type f -name "*.pyc" -delete @find . -type f -name ".coverage" -delete @find . -type f -name ".venv" -delete + @echo "Cleaning built styles and assets..." + @rm -rf static/css/ + @rm -f static/*.ico static/*.png static/*.svg + @rm -rf .venv + @echo "✓ Clean complete" -.PHONY: tailwind -tailwind: ## Build Tailwind CSS - @./bin/tailwind.sh +.PHONY: styles +styles: ## Build styles with Tailwind CSS and copy assets (USE_SYMLINKS=1 for symlinks) + @./bin/styles.sh -.PHONY: tailwind-watch -tailwind-watch: ## Watch Tailwind CSS - @./bin/tailwind.sh -w \ No newline at end of file +.PHONY: styles-watch +styles-watch: ## Watch and rebuild styles on changes + @./bin/styles.sh --watch diff --git a/py/README.md b/py/README.md index 7974c6d..cccd48f 100644 --- a/py/README.md +++ b/py/README.md @@ -1,58 +1,261 @@ # muze -## requirements +## Requirements - python 3.12 - uv - hcp (for vault secrets) - tailwindcss@3 +- PostgreSQL (via Docker/Podman for local development) -## development +## 🚀 Quick Start -fmt +```bash +# Install dependencies +make install +# Start development server (with 1Password vault integration) +make dev + +# Run the project +make run + +# Database operations +make db up # Start PostgreSQL container +make db migrate # Run migrations (auto-starts DB) +make db prepare "Add user table" # Create new migration +MANUAL=1 make db prepare "Custom migration" # Create manual migration (important!) ``` -uv run black src + +## 📦 Database Management + +### Key Database Commands + +```bash +# Container management +make db up # Start PostgreSQL container +make db down # Stop and remove container/volumes +make db status # Check container status +make db connect # Open psql connection +make db endpoint # Get connection URL + +# Migrations - IMPORTANT: Use environment variables for flags! +make db migrate # Run migrations (auto-starts local DB if needed) +make db prepare "Description here" # Auto-generate migration from models +MANUAL=1 make db prepare "Description" # Create manual/empty migration template ``` -check +### ⚠️ IMPORTANT: Migration Examples with Flags + +The **RECOMMENDED** way to pass flags is through environment variables: +```bash +# Manual migration (IMPORTANT: Use MANUAL=1 prefix) +MANUAL=1 make db prepare "Add custom indexes" +MANUAL=true make db prepare "Data migration script" + +# Direct script call (always works) +./bin/db.sh prepare --manual "Complex data transformation" ``` -uv run ruff check src + +### Why Use Environment Variables for Flags? + +Due to how Make processes arguments, flags like `--manual` don't pass through correctly with the standard `make db prepare` syntax. Using `MANUAL=1` ensures the flag is properly recognized by the underlying script. + +## 🔧 Development Commands + +### Code Quality + +```bash +# Formatting +make fmt # Auto-format code with black +make fmt-check # Check formatting without changes + +# Linting +make lint # Run ruff linter + +# Type checking +make types # Run mypy type checker + +# Run all quality checks +make check # Runs fmt-check, lint, types, and test ``` -mypy +### Testing + +```bash +# Run all tests (auto-starts PostgreSQL if needed) +make test +# Direct test command with options +uv run pytest tests/ -v # Verbose output ``` -uv run mypy src + +## 🎯 Application Commands + +```bash +# Development server (with vault integration) +make dev # Runs with 1Password vault for secrets +# or directly: +./bin/app/dev.sh + +# Production-like run +make run # Standard run without vault +# or directly: +./bin/app/run.sh + +# Install/update dependencies +make install # Uses uv to sync dependencies ``` -dev +## 🎨 Styling and Assets + +```bash +# Build Tailwind CSS styles +make styles # Build and minify CSS, copy brand assets +# Watch mode for development +make styles-watch # Auto-rebuild on style changes ``` -./bin/dev.sh + +## 🧹 Utilities + +```bash +# Clean build artifacts +make clean # Remove __pycache__, .pyc, coverage, etc. + +# Show help +make help # Display all available targets with descriptions ``` -run +## 🔐 Environment Variables + +### Database +- `POSTGRES_URL` - PostgreSQL connection string (auto-set by db commands) +- `DATABASE_URL` - Alternative name for POSTGRES_URL +### Migration Flags (IMPORTANT!) +- `MANUAL=1` or `MANUAL=true` - Create manual migration template (prefix to command!) + +### Vault Integration +The project uses 1Password for secret management. Secrets are loaded from `.env.vault` when using `make dev`. + +## 📋 Example Workflows + +### Start fresh development environment +```bash +make clean +make install +make db up +make db migrate +make dev ``` -./bin/run.sh + +### Create and apply a new migration +```bash +# Auto-generate from model changes +make db prepare "Add user profile fields" + +# Or create manual migration (NOTE THE PREFIX!) +MANUAL=1 make db prepare "Custom data migration" + +# Apply the migration +make db migrate ``` -prepare alembic +### Run full test suite +```bash +make check # Runs fmt-check, lint, types, and test +``` +### Development with live reload +```bash +# Terminal 1 - Start database and app +make db up +make dev + +# Terminal 2 - Watch styles +make styles-watch ``` -./bin/prepare_migrations.sh + +## 💡 Pro Tips + +1. **⚡ Always use environment variables for flags**: `MANUAL=1 make db prepare "..."` + - This is the most reliable way to pass flags through Make + +2. **🔄 Database auto-starts**: Migration commands will start PostgreSQL automatically if needed + - No need to run `make db up` before `make db migrate` + +3. **🔑 Vault integration**: Use `make dev` for development with 1Password secrets + - Automatically loads secrets from your vault + +4. **🎯 Direct script access**: All scripts in `bin/` can be called directly for more control + - Example: `./bin/db.sh prepare --manual "My migration"` + +## 🐛 Troubleshooting + +### Make command shows "Unknown command" +- Make sure you're in the `py/` directory +- Check that the command exists in the Makefile + +### Database connection issues +```bash +make db status # Check if container is running +make db up # Start container +make db endpoint # Get connection URL ``` -migrate +### Migration issues +```bash +# View migration history +uv run alembic history +# Check current revision +uv run alembic current + +# Downgrade if needed (direct command) +uv run alembic downgrade -1 ``` -./bin/migrate.sh + +### Port conflicts +If port 5432 is already in use: +```bash +lsof -i :5432 # Check what's using the port +make db down # Stop our container +# Then stop the conflicting service ``` -tailwind +### Manual migration not working? +Remember: **Always prefix with MANUAL=1** +```bash +# ✅ Correct +MANUAL=1 make db prepare "My manual migration" +# ❌ Won't work +make db prepare --manual "My manual migration" ``` -./bin/tailwind.sh -w + +## 📚 Direct Script Reference + +For more control, you can always use the scripts directly: + +```bash +# Database management +./bin/db.sh up # Start PostgreSQL +./bin/db.sh prepare --manual "Description" # Manual migration +./bin/db.sh migrate # Run migrations + +# CI scripts +./bin/ci/fmt.sh # Format code +./bin/ci/lint.sh # Run linter +./bin/ci/types.sh # Type checking +./bin/ci/test.sh # Run tests + +# Application +./bin/app/dev.sh # Development server +./bin/app/run.sh # Production-like run ``` + +## 📖 Full Makefile Command Reference + +Run `make help` to see all available commands with descriptions. diff --git a/py/alembic.ini b/py/alembic.ini index ce6cebc..4f7df9a 100644 --- a/py/alembic.ini +++ b/py/alembic.ini @@ -8,7 +8,7 @@ script_location = alembic # Uncomment the line below if you want the files to be prepended with date and time # see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file # for all available tokens -# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s +file_template = %%(year)d%%(month).2d%%(day).2d_%%(hour).2d%%(minute).2d%%(second).2d_%%(rev)s_%%(slug)s # sys.path path, will be prepended to sys.path if present. # defaults to the current working directory. diff --git a/py/alembic/versions/64d96d2d5ba4_init.py b/py/alembic/versions/20250926_113958_74e9156d0365_init.py similarity index 90% rename from py/alembic/versions/64d96d2d5ba4_init.py rename to py/alembic/versions/20250926_113958_74e9156d0365_init.py index 94d4e1f..d5cb603 100644 --- a/py/alembic/versions/64d96d2d5ba4_init.py +++ b/py/alembic/versions/20250926_113958_74e9156d0365_init.py @@ -1,8 +1,8 @@ """init -Revision ID: 64d96d2d5ba4 +Revision ID: 74e9156d0365 Revises: -Create Date: 2025-06-03 22:48:09.676036 +Create Date: 2025-09-26 11:39:58.852327 """ from typing import Sequence, Union @@ -12,7 +12,7 @@ # revision identifiers, used by Alembic. -revision: str = '64d96d2d5ba4' +revision: str = '74e9156d0365' down_revision: Union[str, None] = None branch_labels: Union[str, Sequence[str], None] = None depends_on: Union[str, Sequence[str], None] = None diff --git a/py/bin/app/dev.sh b/py/bin/app/dev.sh new file mode 100755 index 0000000..0dbd234 --- /dev/null +++ b/py/bin/app/dev.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../.." && pwd )" +source "$PROJECT_ROOT/bin/config" +source "$PROJECT_ROOT/bin/vault" + +# start db flag +START_DB=false + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + -d | --db) + START_DB=true + shift + ;; + -h | --help) + echo "Usage: $0 [options]" + echo "" + echo "Options:" + echo " -d, --db Start PostgreSQL" + echo " -h, --help Show this help message" + exit 0 + ;; + *) + echo "Unknown option: $1" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + +# Set environment variables +export DEV_SERVER_HOST=localhost +export HOST_NAME=http://localhost:8000 +export LISTEN_ADDRESS=0.0.0.0 +export LISTEN_PORT=8000 +export DEBUG=True +export DEV_MODE=True +export LOG_PATH= + +# Start PostgreSQL +if [ "$START_DB" = true ]; then + echo "Starting PostgreSQL..." + ./bin/db.sh up + # Give PostgreSQL a moment to start + sleep 2 +fi + +export POSTGRES_URL=$(./bin/db.sh endpoint) + +# Start the main application +echo "Running application..." +run_with_vault --stage development -- uv run src/__main__.py + +# Exit the script +exit 0 diff --git a/py/bin/app/run.sh b/py/bin/app/run.sh new file mode 100644 index 0000000..ab483be --- /dev/null +++ b/py/bin/app/run.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +export LISTEN_ADDRESS=0.0.0.0 +export LISTEN_PORT=8000 +export DEBUG=False + +uv run src/__main__.py + +# Exit the script +exit 0 diff --git a/py/bin/check.sh b/py/bin/check.sh deleted file mode 100755 index 9553cc7..0000000 --- a/py/bin/check.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env bash - -# Comprehensive check script that runs all code quality tools - -# Source shared utilities -source "$(dirname "$0")/utils.sh" - -# Parse arguments -FIX_MODE=false -VERBOSE=false - -while [[ $# -gt 0 ]]; do - case $1 in - -f|--fix) - FIX_MODE=true - shift - ;; - -v|--verbose) - VERBOSE=true - shift - ;; - -h|--help) - echo "Usage: $0 [options]" - echo "" - echo "Options:" - echo " -f, --fix Auto-fix issues where possible" - echo " -v, --verbose Show detailed output" - echo " -h, --help Show this help message" - echo "" - echo "This script runs all code quality checks including:" - echo " - Type checking (mypy)" - echo " - Linting (ruff)" - echo " - Formatting (black)" - echo " - Tests (pytest)" - exit 0 - ;; - *) - echo -e "${RED}Unknown option: $1${NC}" - echo "Use --help for usage information" - exit 1 - ;; - esac -done - -# Track overall status -OVERALL_SUCCESS=0 - -# Function to run a check with proper error handling -run_check() { - local name=$1 - local script=$2 - local args=$3 - - print_header "$name" - - if $VERBOSE; then - $script $args - else - # Capture output for non-verbose mode - output=$($script $args 2>&1) - result=$? - fi - - result=$? - if [ $result -eq 0 ]; then - echo -e "${GREEN} $name passed${NC}" - else - echo -e "${RED} $name failed${NC}" - if ! $VERBOSE; then - echo "$output" - fi - OVERALL_SUCCESS=1 - fi - echo - - return $result -} - -# Type checking -if $FIX_MODE; then - echo -e "${YELLOW}Note: Type checking has no auto-fix mode${NC}" -fi -run_check "Type Checking (MyPy)" "$PROJECT_ROOT/bin/types.sh" "" - -# Linting -if $FIX_MODE; then - run_check "Linting (Ruff)" "$PROJECT_ROOT/bin/lint.sh" "--fix" -else - run_check "Linting (Ruff)" "$PROJECT_ROOT/bin/lint.sh" "" -fi - -# Formatting -if $FIX_MODE; then - run_check "Formatting (Black)" "$PROJECT_ROOT/bin/fmt.sh" "" -else - run_check "Formatting (Black)" "$PROJECT_ROOT/bin/fmt.sh" "--check" -fi - -# Tests -run_check "Tests (Pytest)" "$PROJECT_ROOT/bin/test.sh" "" - -# Final summary -print_header "Summary" - -if [ $OVERALL_SUCCESS -eq 0 ]; then - echo -e "${GREEN} All checks passed!${NC}" -else - echo -e "${RED}L Some checks failed${NC}" - echo - echo "Tips:" - if $FIX_MODE; then - echo " - Some issues were auto-fixed, run again to verify" - else - echo " - Run with --fix to auto-fix formatting and linting issues" - fi - echo " - Run individual check scripts for more details:" - echo " - ./bin/types.sh # Type checking" - echo " - ./bin/lint.sh # Linting" - echo " - ./bin/fmt.sh # Formatting" - echo " - ./bin/test.sh # Tests" -fi - -exit $OVERALL_SUCCESS \ No newline at end of file diff --git a/py/bin/fmt.sh b/py/bin/ci/fmt.sh similarity index 63% rename from py/bin/fmt.sh rename to py/bin/ci/fmt.sh index 9760445..e139ab4 100755 --- a/py/bin/fmt.sh +++ b/py/bin/ci/fmt.sh @@ -2,8 +2,11 @@ # Formatting script for the project using black -# Source shared utilities -source "$(dirname "$0")/utils.sh" +# Get project root (going up from py/bin/ci to root) +PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../.." && pwd )" + +# Source shared utilities from project root +source "$PROJECT_ROOT/bin/utils" # Parse arguments CHECK_ONLY=false @@ -15,13 +18,13 @@ if $CHECK_ONLY; then print_header "Running Formatters in Check Mode" echo "Checking black formatting..." - uvx black "$PROJECT_ROOT/src" "$PROJECT_ROOT/tests" --line-length 88 --check + uvx black "$PROJECT_ROOT/py/src" "$PROJECT_ROOT/py/tests" --line-length 88 --check check_result "Black check" else print_header "Running Code Formatters" echo "Formatting with black..." - uvx black "$PROJECT_ROOT/src" "$PROJECT_ROOT/tests" --line-length 88 + uvx black "$PROJECT_ROOT/py/src" "$PROJECT_ROOT/py/tests" --line-length 88 check_result "Black format" fi diff --git a/py/bin/lint.sh b/py/bin/ci/lint.sh similarity index 63% rename from py/bin/lint.sh rename to py/bin/ci/lint.sh index e46d89d..a444203 100755 --- a/py/bin/lint.sh +++ b/py/bin/ci/lint.sh @@ -2,8 +2,11 @@ # Linting script for the project using ruff -# Source shared utilities -source "$(dirname "$0")/utils.sh" +# Get project root (going up from py/bin/ci to root) +PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../.." && pwd )" + +# Source shared utilities from project root +source "$PROJECT_ROOT/bin/utils" # Parse arguments FIX_MODE=false @@ -15,13 +18,13 @@ if $FIX_MODE; then print_header "Running Ruff with Auto-fix" echo "Fixing linting issues..." - uvx ruff check "$PROJECT_ROOT/src" "$PROJECT_ROOT/tests" --fix + uvx ruff check "$PROJECT_ROOT/py/src" "$PROJECT_ROOT/py/tests" --fix check_result "Ruff auto-fix" else print_header "Running Ruff Linter" echo "Checking for linting issues..." - uvx ruff check "$PROJECT_ROOT/src" "$PROJECT_ROOT/tests" + uvx ruff check "$PROJECT_ROOT/py/src" "$PROJECT_ROOT/py/tests" check_result "Ruff check" fi diff --git a/py/bin/test.sh b/py/bin/ci/test.sh similarity index 87% rename from py/bin/test.sh rename to py/bin/ci/test.sh index 805e10e..659e9c1 100755 --- a/py/bin/test.sh +++ b/py/bin/ci/test.sh @@ -2,8 +2,11 @@ # Test script for running project tests -# Source utilities -source "$(dirname "$0")/utils.sh" +# Get project root (going up from py/bin/ci to root) +PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../.." && pwd )" + +# Source shared utilities from project root +source "$PROJECT_ROOT/bin/utils" # Default values VERBOSE=false @@ -53,17 +56,17 @@ if [ -n "$POSTGRES_URL" ]; then echo " Database: $(echo $POSTGRES_URL | sed 's/postgresql:\/\/[^@]*@/postgresql:\/\/***@/')" else # Try to get it from running container - if ./bin/postgres.sh endpoint >/dev/null 2>&1; then - export POSTGRES_URL=$(./bin/postgres.sh endpoint) + if ./bin/db.sh endpoint >/dev/null 2>&1; then + export POSTGRES_URL=$(./bin/db.sh endpoint) echo -e "${GREEN}✓ Using running PostgreSQL container${NC}" else # Start PostgreSQL echo "Starting PostgreSQL container..." - ./bin/postgres.sh run >/dev/null 2>&1 + ./bin/db.sh up >/dev/null 2>&1 check_result "PostgreSQL startup" # Get the endpoint - export POSTGRES_URL=$(./bin/postgres.sh endpoint) + export POSTGRES_URL=$(./bin/db.sh endpoint) echo -e "${GREEN}✓ Started new PostgreSQL container${NC}" fi echo " Database: $(echo $POSTGRES_URL | sed 's/postgresql:\/\/[^@]*@/postgresql:\/\/***@/')" diff --git a/py/bin/types.sh b/py/bin/ci/types.sh similarity index 70% rename from py/bin/types.sh rename to py/bin/ci/types.sh index a830c0d..4d77769 100755 --- a/py/bin/types.sh +++ b/py/bin/ci/types.sh @@ -2,8 +2,11 @@ # Type checking script for the project using mypy -# Source shared utilities -source "$(dirname "$0")/utils.sh" +# Get project root (going up from py/bin/ci to root) +PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../.." && pwd )" + +# Source shared utilities from project root +source "$PROJECT_ROOT/bin/utils" # Parse arguments STRICT_MODE=false @@ -15,18 +18,18 @@ if $STRICT_MODE; then print_header "Running MyPy in Strict Mode" echo "Type checking with strict settings..." - uvx mypy "$PROJECT_ROOT/src" --strict + uvx mypy "$PROJECT_ROOT/py/src" --strict check_result "MyPy strict check" else print_header "Running MyPy Type Checker" echo "Type checking src directory..." - uvx mypy "$PROJECT_ROOT/src" + uvx mypy "$PROJECT_ROOT/py/src" check_result "MyPy check" fi # Check if there's a mypy cache to report on -if [ -d "$PROJECT_ROOT/.mypy_cache" ]; then +if [ -d "$PROJECT_ROOT/py/.mypy_cache" ]; then echo echo "MyPy cache exists (use 'rm -rf .mypy_cache' to clear if needed)" fi diff --git a/py/bin/config.sh b/py/bin/config.sh deleted file mode 100755 index 90f9dad..0000000 --- a/py/bin/config.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Get the directory where this script is located -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -PROJECT_ROOT="$( cd "$SCRIPT_DIR/.." && pwd )" - -# Function to read value from YAML file -# Usage: read_yaml_value "key.subkey" -read_yaml_value() { - local key="$1" - local file="$PROJECT_ROOT/hcp.yaml" - - # For simple top-level keys - if [[ "$key" == "vault.apps.dev" ]]; then - awk '/^vault:/{f=1} f && /apps:/{g=1} g && /dev:/{print $2; exit}' "$file" - fi -} - -# Export app configuration -export APP_NAME=generic -export DEV_SERVER_PORT=8000 -export VAULT_APP_DEV=generic-dev - -function print_config { - echo "APP_NAME: $APP_NAME" - echo "VAULT_APP_DEV: $VAULT_APP_DEV" -} - -# if directly called, print the config -if [ "$0" == "$BASH_SOURCE" ]; then - print_config -fi \ No newline at end of file diff --git a/py/bin/postgres.sh b/py/bin/db.sh similarity index 60% rename from py/bin/postgres.sh rename to py/bin/db.sh index 2fde3b4..934f31f 100755 --- a/py/bin/postgres.sh +++ b/py/bin/db.sh @@ -1,27 +1,29 @@ #!/usr/bin/env bash # Script to manage a local PostgreSQL container for development (Mac-optimized) +# TODO (amiller68): on a rainy day, i'd love +# to move this into a nested dir + set -o errexit set -o nounset -# Source configuration and utilities -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -source "$SCRIPT_DIR/config.sh" -source "$SCRIPT_DIR/utils.sh" - -# NOTE (amiller68): we source APP_NAME from config.sh +# NOTE (amiller68): points back to the project root +PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd )" +# which lets us easily source utils +source "$PROJECT_ROOT/bin/utils" +source "$PROJECT_ROOT/bin/config" -if [ -z "$APP_NAME" ]; then - echo -e "${RED}Error: APP_NAME is not set${NC}" +if [ -z "$PROJECT_NAME" ]; then + echo -e "${RED}Error: PROJECT_NAME is not set${NC}" exit 1 fi -POSTGRES_CONTAINER_NAME="${APP_NAME}-postgres" -POSTGRES_VOLUME_NAME="${APP_NAME}-postgres-data" +POSTGRES_CONTAINER_NAME="${PROJECT_NAME}-postgres" +POSTGRES_VOLUME_NAME="${PROJECT_NAME}-postgres-data" POSTGRES_PORT=5432 POSTGRES_USER=postgres POSTGRES_PASSWORD=postgres POSTGRES_IMAGE_NAME=postgres:17 -POSTGRES_DB="${APP_NAME}" +POSTGRES_DB="${PROJECT_NAME}" # Check if docker or podman is available CONTAINER_RUNTIME="docker" @@ -38,7 +40,7 @@ function check_runtime { } # Start local PostgreSQL for development -function run { +function up { check_runtime print_header "Starting PostgreSQL" @@ -140,22 +142,22 @@ function start_postgres_container { fi } -function clean { +function down { check_runtime print_header "Cleaning PostgreSQL Container" - + echo "Stopping PostgreSQL container..." $CONTAINER_RUNTIME stop $POSTGRES_CONTAINER_NAME 2>/dev/null || true check_result "Container stop" - + echo "Removing PostgreSQL container..." $CONTAINER_RUNTIME rm -f $POSTGRES_CONTAINER_NAME 2>/dev/null || true check_result "Container removal" - + echo "Removing PostgreSQL volume..." $CONTAINER_RUNTIME volume rm -f $POSTGRES_VOLUME_NAME 2>/dev/null || true check_result "Volume removal" - + print_summary "PostgreSQL cleaned up successfully!" "cleanup step(s) failed" } @@ -173,10 +175,98 @@ function connect { psql ""$(./bin/postgres.sh endpoint)"" } +function migrate { + print_header "Running Database Migrations" + + # Check if POSTGRES_URL is set (using ${VAR:-} to handle unset variable with nounset) + if [ -z "${POSTGRES_URL:-}" ]; then + echo -e "${YELLOW}POSTGRES_URL environment variable is not set${NC}" + echo -e "${YELLOW}Setting POSTGRES_URL environment variable to local container${NC}" + export POSTGRES_URL=$(endpoint) + fi + + echo "Running migrations with Alembic..." + + # Check if uv command exists + if ! command -v uv &>/dev/null; then + echo -e "${RED}Error: 'uv' command not found. Please install uv first.${NC}" + exit 1 + fi + + # Run the migrations + uv run alembic upgrade head + check_result "Database migrations" + + print_summary "Migrations completed successfully!" "migration(s) failed" +} + +function prepare { + print_header "Preparing Database Migration" + + local manual=false + local description="" + + # Check environment variable for manual flag + if [ "${MANUAL:-}" = "1" ] || [ "${MANUAL:-}" = "true" ]; then + manual=true + fi + + # Parse arguments + while [[ "$#" -gt 0 ]]; do + case $1 in + --manual) + manual=true + shift + ;; + *) + # Concatenate all remaining arguments as the description + description="$*" + break + ;; + esac + done + + # Check for description + if [ -z "$description" ]; then + echo -e "${RED}Error: Please provide a description for the migration.${NC}" + echo "Usage: $0 prepare [--manual] " + echo "Example: $0 prepare 'Add user table'" + echo "Example: $0 prepare --manual 'Custom migration for data cleanup'" + exit 1 + fi + + # Start PostgreSQL if needed (for development) + up + export POSTGRES_URL=$(endpoint) + echo -e "${YELLOW}Using local PostgreSQL for migration generation${NC}" + + # Check if uv command exists + if ! command -v uv &>/dev/null; then + echo -e "${RED}Error: 'uv' command not found. Please install uv first.${NC}" + exit 1 + fi + + # Generate alembic migrations + echo "Generating migration: $description" + + # Run alembic revision and capture output + if [ "$manual" = true ]; then + echo "Creating manual migration..." + uv run alembic revision -m "$description" + else + echo "Auto-generating migration from model changes..." + uv run alembic revision --autogenerate -m "$description" + fi + + check_result "Migration generation" + + print_summary "Migration prepared successfully!" "migration preparation failed" +} + function status { check_runtime print_header "PostgreSQL Status" - + if $CONTAINER_RUNTIME ps | grep -q "$POSTGRES_CONTAINER_NAME"; then echo -e "${GREEN}✓ PostgreSQL container is running.${NC}" echo "" @@ -200,26 +290,37 @@ function status { } function help { - echo -e "${YELLOW}PostgreSQL Container Manager${NC}" + echo -e "${YELLOW}PostgreSQL Container Manager & Database Tools${NC}" + echo "" + echo "Usage: $0 [command] [options]" echo "" - echo "Usage: $0 [command]" + echo "Container Management Commands:" + echo " up - Start a local PostgreSQL container for development" + echo " down - Remove the PostgreSQL container and volume" + echo " endpoint - Print the PostgreSQL connection URLs" + echo " connect - Connect to the postgres instance" + echo " status - Check container status and connection" echo "" - echo "Commands:" - echo " run - Start a local PostgreSQL container for development" - echo " clean - Remove the PostgreSQL container and volume" - echo " endpoint - Print the PostgreSQL connection URLs" - echo " connect - Connect to the postgres instance" - echo " status - Check container status and connection" - echo " help - Show this help message" + echo "Migration Commands:" + echo " migrate - Run database migrations (auto-starts local DB if needed)" + echo " prepare - Create a new migration" + echo " [--manual] " + echo " - Use --manual for manual migration (no auto-generate)" echo "" - echo "For production, set the DATABASE_URL environment variable." + echo " help - Show this help message" + echo "" + echo "For production, set the POSTGRES_URL environment variable." } # Process command CMD=${1:-help} +shift || true # Shift to remove command from arguments + +# NOTE (amiller68): these cannot conflict with +# our make directives case "$CMD" in -run | clean | endpoint | connect | status | help) - $CMD +up | down | endpoint | connect | status | help | migrate | prepare) + $CMD "$@" ;; *) echo -e "${RED}Unknown command: $CMD${NC}" diff --git a/py/bin/dev.sh b/py/bin/dev.sh deleted file mode 100755 index 2e3784c..0000000 --- a/py/bin/dev.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash - -# Source configuration -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -source "$SCRIPT_DIR/config.sh" -source "$SCRIPT_DIR/../../bin/vault" - -# start db flag -START_DB=false - -# Parse command line arguments -while [[ $# -gt 0 ]]; do - case $1 in - -d|--db) - START_DB=true - shift - ;; - -h|--help) - echo "Usage: $0 [options]" - echo "" - echo "Options:" - echo " -d, --db Start PostgreSQL" - echo " -h, --help Show this help message" - exit 0 - ;; - *) - echo "Unknown option: $1" - echo "Use --help for usage information" - exit 1 - ;; - esac -done - -# Set environment variables -export DEV_SERVER_HOST=localhost -export HOST_NAME=http://localhost:${DEV_SERVER_PORT} -export LISTEN_ADDRESS=0.0.0.0 -export LISTEN_PORT=${DEV_SERVER_PORT} -export DEBUG=True -export DEV_MODE=True -export LOG_PATH= - -# Start PostgreSQL -if [ "$START_DB" = true ]; then - echo "Starting PostgreSQL..." - ./bin/postgres.sh run - # Give PostgreSQL a moment to start - sleep 2 -fi - -export POSTGRES_URL=$(./bin/postgres.sh endpoint) - -# Start the main application -# TODO (first-time-setup): replace with your own vault app -export VAULT_APP='generic-py-development' -run_with_vault uv run src/__main__.py - -# Exit the script -exit 0 diff --git a/py/bin/docker.sh b/py/bin/docker.sh deleted file mode 100755 index 8af57cd..0000000 --- a/py/bin/docker.sh +++ /dev/null @@ -1,294 +0,0 @@ -#!/usr/bin/env bash - -usage() { - cat </dev/null 2>&1; then - echo "Error: Image generic-py/$SERVICE_NAME:latest not found" - exit 1 - fi - - # Check image platform - PLATFORM=$(docker image inspect "generic-py/$SERVICE_NAME:latest" --format '{{.Os}}/{{.Architecture}}') - - # For deployment to AWS Fargate, we need to ensure the image is linux/amd64 - if [[ "$COMMAND" == "deploy" ]]; then - if [[ "$PLATFORM" != "linux/amd64" ]]; then - echo "Error: AWS Fargate requires linux/amd64 platform" - echo "Current platform: $PLATFORM" - echo "Please rebuild the image with: $0 build $SERVICE_NAME -p linux/amd64" - exit 1 - fi - # For other commands, either amd64 or arm64 is acceptable - elif [[ "$PLATFORM" != "linux/amd64" && "$PLATFORM" != "linux/arm64" ]]; then - echo "Error: Image must be built for linux/amd64 or linux/arm64" - echo "Current platform: $PLATFORM" - exit 1 - fi - - echo "Image platform validated: $PLATFORM" -} - -validate_branch() { - local current_branch=$1 - local is_dirty=$(git status --porcelain) - - # NEVER allow dirty commits from main and dev - if [[ -n "$is_dirty" ]] && [[ "$current_branch" =~ ^(main|dev)$ ]]; then - echo "Error: Dirty working directory not allowed for main or dev branches" - exit 1 - fi - - # For other branches, check dirty flag - if [[ -n "$is_dirty" ]] && [[ "$ALLOW_DIRTY" != 1 ]]; then - echo "Error: Working directory not clean" - exit 1 - fi - - # Check branch name restrictions if we're not skipping - if [[ "$SKIP_BRANCH_CHECK" != 1 ]]; then - if [[ ! "$current_branch" =~ ^(main|dev)$ ]]; then - echo "Error: Deployments only allowed from main or dev branches" - echo "Current branch: $current_branch" - echo "Use -S flag to bypass this restriction" - exit 1 - fi - fi -} - -# TODO: digital ocean integration -push() { - local branch_name=$1 - local is_dirty=$(git status --porcelain) - - COMMIT_HASH=$(git rev-parse --short HEAD) - - # Sanitize branch name for docker tag (replace / with -) - SAFE_BRANCH_NAME=${branch_name//\//-} - - # Generate a short hash of the branch name for uniqueness - BRANCH_HASH=$(echo "$SAFE_BRANCH_NAME" | md5sum | cut -c1-6) - - # Map branch to environment with short identifiers - if [[ "$SAFE_BRANCH_NAME" == "main" ]]; then - ENV_NAME="production" - elif [[ "$SAFE_BRANCH_NAME" == "dev" ]]; then - ENV_NAME="staging" - else - # Extract branch type (feature, fix, etc.) and create a short slug - if [[ "$SAFE_BRANCH_NAME" =~ ^feature- ]]; then - ENV_NAME="staging-feat-${BRANCH_HASH}" - elif [[ "$SAFE_BRANCH_NAME" =~ ^fix- ]]; then - ENV_NAME="staging-fix-${BRANCH_HASH}" - elif [[ "$SAFE_BRANCH_NAME" =~ ^release- ]]; then - ENV_NAME="staging-rel-${BRANCH_HASH}" - else - # For any other branch, use a generic prefix with hash - ENV_NAME="staging-br-${BRANCH_HASH}" - fi - - # Print mapping to console for reference - echo "Branch mapping: $SAFE_BRANCH_NAME → $ENV_NAME" - fi - - # Add dirty flag if working directory is not clean - if [[ -n "$is_dirty" ]]; then - ENV_NAME="${ENV_NAME}-dirty" - echo "Working directory not clean, adding -dirty suffix" - fi - - # Tag with environment name and commit hash - if ! docker tag generic-py/$SERVICE_NAME:latest $REPOSITORY_URL:$ENV_NAME-$COMMIT_HASH || - ! docker tag generic-py/$SERVICE_NAME:latest $REPOSITORY_URL:$ENV_NAME-latest; then - echo "Error: Failed to tag images" - exit 1 - fi - - # For main/production or when force latest is enabled, tag as 'latest' - if [[ "$ENV_NAME" == "production" ]] || [[ "$FORCE_LATEST" == 1 ]]; then - if ! docker tag generic-py/$SERVICE_NAME:latest $REPOSITORY_URL:latest; then - echo "Error: Failed to tag as latest" - exit 1 - fi - if [[ "$FORCE_LATEST" == 1 ]]; then - echo "Warning: Forcing push to latest tag from non-production branch" - fi - fi - - # For custom branches, optionally tag as staging-latest - if [[ "$TAG_AS_STAGING_LATEST" == 1 ]] && [[ ! "$SAFE_BRANCH_NAME" =~ ^(main|dev)$ ]]; then - if ! docker tag generic-py/$SERVICE_NAME:latest $REPOSITORY_URL:staging-latest; then - echo "Error: Failed to tag as staging-latest" - exit 1 - fi - fi - - # Push all tags - PUSHED_TAGS=() - - if docker push $REPOSITORY_URL:$ENV_NAME-$COMMIT_HASH; then - PUSHED_TAGS+=("$REPOSITORY_URL:$ENV_NAME-$COMMIT_HASH") - else - echo "Error: Failed to push $REPOSITORY_URL:$ENV_NAME-$COMMIT_HASH" - exit 1 - fi - - if docker push $REPOSITORY_URL:$ENV_NAME-latest; then - PUSHED_TAGS+=("$REPOSITORY_URL:$ENV_NAME-latest") - else - echo "Error: Failed to push $REPOSITORY_URL:$ENV_NAME-latest" - exit 1 - fi - - # Push 'latest' tag for production or when force latest is enabled - if [[ "$ENV_NAME" == "production" ]] || [[ "$FORCE_LATEST" == 1 ]]; then - if docker push $REPOSITORY_URL:latest; then - PUSHED_TAGS+=("$REPOSITORY_URL:latest") - else - echo "Error: Failed to push latest tag" - exit 1 - fi - fi - - # Push staging-latest for custom branches if requested - if [[ "$TAG_AS_STAGING_LATEST" == 1 ]] && [[ ! "$SAFE_BRANCH_NAME" =~ ^(main|dev)$ ]]; then - if docker push $REPOSITORY_URL:staging-latest; then - PUSHED_TAGS+=("$REPOSITORY_URL:staging-latest") - else - echo "Error: Failed to push staging-latest tag" - exit 1 - fi - fi - - echo "Pushed images:" - for tag in "${PUSHED_TAGS[@]}"; do - echo " - $tag" - done -} - -# Main command processing -COMMAND=$1 -shift 1 - -case $COMMAND in -build) - # Default to host platform if not specified - PLATFORM="" - - # Parse build-specific options - while getopts "p:" opt; do - case $opt in - p) PLATFORM="--platform=$OPTARG" ;; - ?) usage ;; - esac - done - - echo "Building docker container for $SERVICE_NAME..." - if [ -n "$PLATFORM" ]; then - echo "Using platform: $PLATFORM" - docker build $PLATFORM -t generic-py/$SERVICE_NAME:latest -f Dockerfile . - else - docker build -t generic-py/$SERVICE_NAME:latest -f Dockerfile . - fi - ;; -run) - # Default to no port mapping - PORT_MAPPING="" - - # Parse run-specific options - while getopts "p:" opt; do - case $opt in - p) PORT_MAPPING="-p $OPTARG" ;; - ?) usage ;; - esac - done - - echo "Running docker container for $SERVICE_NAME..." - if [ -n "$PORT_MAPPING" ]; then - echo "Using port mapping: $PORT_MAPPING" - docker run -it --rm $PORT_MAPPING generic-py/$SERVICE_NAME:latest - else - docker run -it --rm generic-py/$SERVICE_NAME:latest - fi - ;; -push) - # Parse push-specific options - while getopts "SDLF" opt; do - case $opt in - S) SKIP_BRANCH_CHECK=1 ;; - D) ALLOW_DIRTY=1 ;; - L) TAG_AS_STAGING_LATEST=1 ;; - F) FORCE_LATEST=1 ;; - ?) usage ;; - esac - done - - # Validate required environment variables - echo $DO_TOKEN - if [[ -z "$DO_TOKEN" ]]; then - echo "Error: Missing required environment variables" - usage - fi - - # Always run platform validation - validate_platform - - # Always validate the branch, but -S flag will modify the validation behavior - validate_branch "$BRANCH_NAME" - - push "$BRANCH_NAME" - ;; -help | --help | -h) - usage - ;; -*) - echo "Error: Unknown command '$COMMAND'" - usage - ;; -esac diff --git a/py/bin/install.sh b/py/bin/install.sh deleted file mode 100755 index 3121bfe..0000000 --- a/py/bin/install.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -uv venv -uv python pin 3.12 -uv lock -uv sync --dev diff --git a/py/bin/migrate.sh b/py/bin/migrate.sh deleted file mode 100755 index 53565ec..0000000 --- a/py/bin/migrate.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -# Source configuration -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -source "$SCRIPT_DIR/config.sh" - -# Parse arguments -DEV=false - -while [[ "$#" -gt 0 ]]; do - case $1 in - --dev) DEV=true ;; - *) - echo "Unknown parameter: $1" - exit 1 - ;; - esac - shift -done - -echo "Running migrations..." - -# Handle development environment -if [ "$DEV" = true ]; then - # Start PostgreSQL if needed - ./bin/postgres.sh run - export POSTGRES_URL=$(./bin/postgres.sh endpoint) - echo "DEV mode: Using POSTGRES_URL=$POSTGRES_URL" -fi - -# Check if POSTGRES_URL is set -if [ -z "$POSTGRES_URL" ]; then - echo "Error: POSTGRES_URL environment variable is not set" - echo "For development, run with --dev flag or set POSTGRES_URL" - exit 1 -fi - -# Run the migrations -uv run alembic upgrade head - -# Exit the script -exit 0 diff --git a/py/bin/prepare_migration.sh b/py/bin/prepare_migration.sh deleted file mode 100755 index 8113c41..0000000 --- a/py/bin/prepare_migration.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -set -e # Exit immediately if a command exits with a non-zero status. - -# Source configuration -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -source "$SCRIPT_DIR/config.sh" - -# check for the auto flag -MANUAL=false -if [ "$1" = "--manual" ]; then - MANUAL=true - shift # Remove --auto from arguments so $1 becomes the description -fi - -# Start PostgreSQL if needed (for development) -./bin/postgres.sh run -export POSTGRES_URL=$(./bin/postgres.sh endpoint) -echo "Using POSTGRES_URL: $POSTGRES_URL" - -# Get the migration description -DESCRIPTION=$1 -if [ -z "$DESCRIPTION" ]; then - echo "Error: Please provide a description for the migration." - exit 1 -fi - -# Generate alembic migrations -echo "Generating migration..." - -# Run alembic revision and capture output -if [ "$MANUAL" = true ]; then - uv run alembic revision -m "$DESCRIPTION" -else - uv run alembic revision --autogenerate -m "$DESCRIPTION" -fi - -exit 0 diff --git a/py/bin/run.sh b/py/bin/run.sh deleted file mode 100755 index 333cce7..0000000 --- a/py/bin/run.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -WORKER=false - -while [[ "$#" -gt 0 ]]; do - case $1 in - --worker) WORKER=true ;; - *) echo "Unknown parameter: $1"; exit 1 ;; - esac - shift -done - -export LISTEN_ADDRESS=0.0.0.0 -export LISTEN_PORT=8000 -export DEBUG=False - -if [ "$WORKER" = true ]; then - uv run arq src.task_manager.worker.WorkerSettings -else - uv run src/__main__.py -fi - -# Exit the script -exit 0 diff --git a/py/bin/styles.sh b/py/bin/styles.sh new file mode 100755 index 0000000..81fbd53 --- /dev/null +++ b/py/bin/styles.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +# Script to build styles with Tailwind CSS and manage branding assets + +set -o errexit +set -o nounset + +# Points back to the project root +export PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd )" +# Source utils for logging +source "$PROJECT_ROOT/bin/utils" + +# Configuration +USE_SYMLINKS="${USE_SYMLINKS:-0}" +WATCH_MODE=false + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --watch|-w) + WATCH_MODE=true + shift + ;; + --symlinks|-s) + USE_SYMLINKS=1 + shift + ;; + --help|-h) + echo "Usage: $0 [options]" + echo "" + echo "Options:" + echo " -w, --watch Watch mode (rebuild on changes)" + echo " -s, --symlinks Use symlinks instead of copying assets" + echo " -h, --help Show this help message" + exit 0 + ;; + *) + echo "Unknown option: $1" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + +# Change to py directory +cd "$PROJECT_ROOT/py" + +# Function to sync branding assets +sync_assets() { + print_info "Syncing branding assets..." + mkdir -p static + + if [ "$USE_SYMLINKS" = "1" ]; then + print_info "Creating symlinks to branding assets..." + ln -sf ../../branding/assets/favicon.ico static/favicon.ico + ln -sf ../../branding/assets/favicon.png static/favicon.png + ln -sf ../../branding/assets/icon.png static/icon.png + ln -sf ../../branding/assets/icon.svg static/icon.svg + print_success "Assets symlinked" + else + print_info "Copying branding assets..." + cp ../branding/assets/favicon.ico static/favicon.ico + cp ../branding/assets/favicon.png static/favicon.png + cp ../branding/assets/icon.png static/icon.png + cp ../branding/assets/icon.svg static/icon.svg + print_success "Assets copied" + fi +} + +# Build CSS +build_css() { + print_info "Building Tailwind CSS..." + if [ "$WATCH_MODE" = true ]; then + npx tailwindcss -i ./styles/main.css -o ./static/css/main.css --watch + else + npx tailwindcss -i ./styles/main.css -o ./static/css/main.css --minify + print_success "Styles built" + fi +} + +# Main execution +print_info "Building Python styles..." + +# Sync assets first +sync_assets + +# Build CSS (will watch if in watch mode) +build_css diff --git a/py/bin/tailwind.sh b/py/bin/tailwind.sh deleted file mode 100755 index c8a3fac..0000000 --- a/py/bin/tailwind.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Check if -w flag is set -# If it is, start the watcher -if [ "$1" == "-w" ]; then - npx tailwindcss -i styles/main.css -o static/css/main.css --watch -else - npx tailwindcss -i styles/main.css -o static/css/main.css -fi - -# Exit the script -exit 0 diff --git a/py/hcp.yaml b/py/hcp.yaml deleted file mode 100644 index e42a1cf..0000000 --- a/py/hcp.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# Vault configuration -vault: - apps: - # TODO (amiller68): this should be configurable - dev: generic-dev - # TODO (amiller68): add staging and production app names \ No newline at end of file diff --git a/py/src/config.py b/py/src/config.py index 29e93bb..20c9f50 100644 --- a/py/src/config.py +++ b/py/src/config.py @@ -58,8 +58,21 @@ def __init__(self): f"SERVICE_SECRET environment variable must be at least {MIN_SECRET_LENGTH} characters long", ) - self.google_client_id = empty_to_none("GOOGLE_CLIENT_ID") - self.google_client_secret = empty_to_none("GOOGLE_CLIENT_SECRET") + # google sso credentials + self.google_client_id = empty_to_none("GOOGLE_O_AUTH_CLIENT_ID") + self.google_client_secret = empty_to_none("GOOGLE_O_AUTH_CLIENT_SECRET") + + # throw if GOOGLE_O_AUTH_CLIENT_ID or GOOGLE_O_AUTH_CLIENT_SECRET is not set + if not self.google_client_id: + raise ConfigException( + ConfigExceptionType.missing_env_var, + "GOOGLE_O_AUTH_CLIENT_ID environment variable must be set", + ) + if not self.google_client_secret: + raise ConfigException( + ConfigExceptionType.missing_env_var, + "GOOGLE_O_AUTH_CLIENT_SECRET environment variable must be set", + ) # TODO: getopt() for cmd line arguments @@ -75,6 +88,9 @@ class Config: listen_port: int auth_redirect_uri: str + # marketing site URL (for linking back from login page) + marketing_site_url: str + # database # TODO (amiller68): ... what do we use this for? postgres_url: str @@ -88,7 +104,7 @@ class Config: secrets: Secrets def __str__(self): - return f"Config(dev_mode={self.dev_mode}, host_name={self.host_name}, listen_address={self.listen_address}, listen_port={self.listen_port}, auth_redirect_uri={self.auth_redirect_uri}, postgres_url={self.postgres_url}, postgres_async_url={self.postgres_async_url}, debug={self.debug}, log_path={self.log_path}, secrets={self.secrets})" + return f"Config(dev_mode={self.dev_mode}, host_name={self.host_name}, listen_address={self.listen_address}, listen_port={self.listen_port}, auth_redirect_uri={self.auth_redirect_uri}, marketing_site_url={self.marketing_site_url}, postgres_url={self.postgres_url}, postgres_async_url={self.postgres_async_url}, debug={self.debug}, log_path={self.log_path}, secrets={self.secrets})" def __init__(self): # Load the environment variables @@ -103,6 +119,11 @@ def __init__(self): "AUTH_REDIRECT_URI", f"{self.host_name}/auth/google/callback" ) + # Marketing site URL with safe default + self.marketing_site_url = os.getenv( + "MARKETING_SITE_URL", "http://localhost:3000" + ) + # TODO (amiller68): is this the correct way to handle postgres urls? # i.e. what if we're pooling? self.postgres_url = empty_to_none("POSTGRES_URL") diff --git a/py/src/server/__init__.py b/py/src/server/__init__.py index c241e35..dc73f48 100644 --- a/py/src/server/__init__.py +++ b/py/src/server/__init__.py @@ -61,11 +61,13 @@ async def http_exception_handler(request: Request, exc: HTTPException): print( f"Exception handler called: {exc.status_code} - {request.url.path}" ) # Debug - if request.url.path.startswith("/app"): - if exc.status_code in [ - status.HTTP_401_UNAUTHORIZED, - status.HTTP_403_FORBIDDEN, - ]: + # Redirect unauthorized users to login for protected pages + if exc.status_code in [ + status.HTTP_401_UNAUTHORIZED, + status.HTTP_403_FORBIDDEN, + ]: + # Don't redirect if already on login page + if not request.url.path.startswith("/app/login"): return RedirectResponse( url="/app/login", status_code=status.HTTP_302_FOUND ) diff --git a/py/src/server/auth/__init__.py b/py/src/server/auth/__init__.py index 4f7b9be..3fba946 100644 --- a/py/src/server/auth/__init__.py +++ b/py/src/server/auth/__init__.py @@ -35,6 +35,6 @@ async def google_callback(request: Request, state=Depends(app_state)): algorithm="HS256", ) - response = RedirectResponse(url="/app") + response = RedirectResponse(url="/app/dashboard") response.set_cookie(key="session", value=token, expires=expiration) return response diff --git a/py/static/css/main.css b/py/static/css/main.css index 3f68b8f..9d5762d 100644 --- a/py/static/css/main.css +++ b/py/static/css/main.css @@ -1,1388 +1 @@ -/* -! tailwindcss v3.4.0 | MIT License | https://tailwindcss.com -*/ - -/* -1. Prevent padding and border from affecting element width. (https://github.com/mozdevs/cssremedy/issues/4) -2. Allow adding a border to an element by just adding a border-width. (https://github.com/tailwindcss/tailwindcss/pull/116) -*/ - -*, -::before, -::after { - box-sizing: border-box; - /* 1 */ - border-width: 0; - /* 2 */ - border-style: solid; - /* 2 */ - border-color: #e5e7eb; - /* 2 */ -} - -::before, -::after { - --tw-content: ''; -} - -/* -1. Use a consistent sensible line-height in all browsers. -2. Prevent adjustments of font size after orientation changes in iOS. -3. Use a more readable tab size. -4. Use the user's configured `sans` font-family by default. -5. Use the user's configured `sans` font-feature-settings by default. -6. Use the user's configured `sans` font-variation-settings by default. -7. Disable tap highlights on iOS -*/ - -html, -:host { - line-height: 1.5; - /* 1 */ - -webkit-text-size-adjust: 100%; - /* 2 */ - -moz-tab-size: 4; - /* 3 */ - -o-tab-size: 4; - tab-size: 4; - /* 3 */ - font-family: system-ui, -apple-system, sans-serif; - /* 4 */ - font-feature-settings: normal; - /* 5 */ - font-variation-settings: normal; - /* 6 */ - -webkit-tap-highlight-color: transparent; - /* 7 */ -} - -/* -1. Remove the margin in all browsers. -2. Inherit line-height from `html` so users can set them as a class directly on the `html` element. -*/ - -body { - margin: 0; - /* 1 */ - line-height: inherit; - /* 2 */ -} - -/* -1. Add the correct height in Firefox. -2. Correct the inheritance of border color in Firefox. (https://bugzilla.mozilla.org/show_bug.cgi?id=190655) -3. Ensure horizontal rules are visible by default. -*/ - -hr { - height: 0; - /* 1 */ - color: inherit; - /* 2 */ - border-top-width: 1px; - /* 3 */ -} - -/* -Add the correct text decoration in Chrome, Edge, and Safari. -*/ - -abbr:where([title]) { - -webkit-text-decoration: underline dotted; - text-decoration: underline dotted; -} - -/* -Remove the default font size and weight for headings. -*/ - -h1, -h2, -h3, -h4, -h5, -h6 { - font-size: inherit; - font-weight: inherit; -} - -/* -Reset links to optimize for opt-in styling instead of opt-out. -*/ - -a { - color: inherit; - text-decoration: inherit; -} - -/* -Add the correct font weight in Edge and Safari. -*/ - -b, -strong { - font-weight: bolder; -} - -/* -1. Use the user's configured `mono` font-family by default. -2. Use the user's configured `mono` font-feature-settings by default. -3. Use the user's configured `mono` font-variation-settings by default. -4. Correct the odd `em` font sizing in all browsers. -*/ - -code, -kbd, -samp, -pre { - font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; - /* 1 */ - font-feature-settings: normal; - /* 2 */ - font-variation-settings: normal; - /* 3 */ - font-size: 1em; - /* 4 */ -} - -/* -Add the correct font size in all browsers. -*/ - -small { - font-size: 80%; -} - -/* -Prevent `sub` and `sup` elements from affecting the line height in all browsers. -*/ - -sub, -sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; -} - -sub { - bottom: -0.25em; -} - -sup { - top: -0.5em; -} - -/* -1. Remove text indentation from table contents in Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=999088, https://bugs.webkit.org/show_bug.cgi?id=201297) -2. Correct table border color inheritance in all Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=935729, https://bugs.webkit.org/show_bug.cgi?id=195016) -3. Remove gaps between table borders by default. -*/ - -table { - text-indent: 0; - /* 1 */ - border-color: inherit; - /* 2 */ - border-collapse: collapse; - /* 3 */ -} - -/* -1. Change the font styles in all browsers. -2. Remove the margin in Firefox and Safari. -3. Remove default padding in all browsers. -*/ - -button, -input, -optgroup, -select, -textarea { - font-family: inherit; - /* 1 */ - font-feature-settings: inherit; - /* 1 */ - font-variation-settings: inherit; - /* 1 */ - font-size: 100%; - /* 1 */ - font-weight: inherit; - /* 1 */ - line-height: inherit; - /* 1 */ - color: inherit; - /* 1 */ - margin: 0; - /* 2 */ - padding: 0; - /* 3 */ -} - -/* -Remove the inheritance of text transform in Edge and Firefox. -*/ - -button, -select { - text-transform: none; -} - -/* -1. Correct the inability to style clickable types in iOS and Safari. -2. Remove default button styles. -*/ - -button, -[type='button'], -[type='reset'], -[type='submit'] { - -webkit-appearance: button; - /* 1 */ - background-color: transparent; - /* 2 */ - background-image: none; - /* 2 */ -} - -/* -Use the modern Firefox focus style for all focusable elements. -*/ - -:-moz-focusring { - outline: auto; -} - -/* -Remove the additional `:invalid` styles in Firefox. (https://github.com/mozilla/gecko-dev/blob/2f9eacd9d3d995c937b4251a5557d95d494c9be1/layout/style/res/forms.css#L728-L737) -*/ - -:-moz-ui-invalid { - box-shadow: none; -} - -/* -Add the correct vertical alignment in Chrome and Firefox. -*/ - -progress { - vertical-align: baseline; -} - -/* -Correct the cursor style of increment and decrement buttons in Safari. -*/ - -::-webkit-inner-spin-button, -::-webkit-outer-spin-button { - height: auto; -} - -/* -1. Correct the odd appearance in Chrome and Safari. -2. Correct the outline style in Safari. -*/ - -[type='search'] { - -webkit-appearance: textfield; - /* 1 */ - outline-offset: -2px; - /* 2 */ -} - -/* -Remove the inner padding in Chrome and Safari on macOS. -*/ - -::-webkit-search-decoration { - -webkit-appearance: none; -} - -/* -1. Correct the inability to style clickable types in iOS and Safari. -2. Change font properties to `inherit` in Safari. -*/ - -::-webkit-file-upload-button { - -webkit-appearance: button; - /* 1 */ - font: inherit; - /* 2 */ -} - -/* -Add the correct display in Chrome and Safari. -*/ - -summary { - display: list-item; -} - -/* -Removes the default spacing and border for appropriate elements. -*/ - -blockquote, -dl, -dd, -h1, -h2, -h3, -h4, -h5, -h6, -hr, -figure, -p, -pre { - margin: 0; -} - -fieldset { - margin: 0; - padding: 0; -} - -legend { - padding: 0; -} - -ol, -ul, -menu { - list-style: none; - margin: 0; - padding: 0; -} - -/* -Reset default styling for dialogs. -*/ - -dialog { - padding: 0; -} - -/* -Prevent resizing textareas horizontally by default. -*/ - -textarea { - resize: vertical; -} - -/* -1. Reset the default placeholder opacity in Firefox. (https://github.com/tailwindlabs/tailwindcss/issues/3300) -2. Set the default placeholder color to the user's configured gray 400 color. -*/ - -input::-moz-placeholder, textarea::-moz-placeholder { - opacity: 1; - /* 1 */ - color: #9ca3af; - /* 2 */ -} - -input::placeholder, -textarea::placeholder { - opacity: 1; - /* 1 */ - color: #9ca3af; - /* 2 */ -} - -/* -Set the default cursor for buttons. -*/ - -button, -[role="button"] { - cursor: pointer; -} - -/* -Make sure disabled buttons don't get the pointer cursor. -*/ - -:disabled { - cursor: default; -} - -/* -1. Make replaced elements `display: block` by default. (https://github.com/mozdevs/cssremedy/issues/14) -2. Add `vertical-align: middle` to align replaced elements more sensibly by default. (https://github.com/jensimmons/cssremedy/issues/14#issuecomment-634934210) - This can trigger a poorly considered lint error in some tools but is included by design. -*/ - -img, -svg, -video, -canvas, -audio, -iframe, -embed, -object { - display: block; - /* 1 */ - vertical-align: middle; - /* 2 */ -} - -/* -Constrain images and videos to the parent width and preserve their intrinsic aspect ratio. (https://github.com/mozdevs/cssremedy/issues/14) -*/ - -img, -video { - max-width: 100%; - height: auto; -} - -/* Make elements with the HTML hidden attribute stay hidden by default */ - -[hidden] { - display: none; -} - -/* Light mode defaults - BLACK AND WHITE TEST */ - -:root { - --background: 0 0% 100%; - /* Pure white */ - --foreground: 0 0% 0%; - /* Pure black */ - --muted: 0 0% 95%; - /* Very light grey */ - --muted-foreground: 0 0% 40%; - /* Dark grey */ - --card: 0 0% 100%; - /* White */ - --card-foreground: 0 0% 0%; - /* Black */ - --popover: 0 0% 100%; - /* White */ - --popover-foreground: 0 0% 0%; - /* Black */ - --border: 0 0% 85%; - /* Light grey border */ - --input: 0 0% 85%; - /* Light grey border */ - --primary: 0 0% 0%; - /* Black primary */ - --primary-foreground: 0 0% 100%; - /* White text on black */ - --secondary: 0 0% 90%; - /* Light grey */ - --secondary-foreground: 0 0% 0%; - /* Black text */ - --accent: 0 0% 70%; - /* Medium grey accent */ - --accent-foreground: 0 0% 0%; - /* Black text */ - --destructive: 0 0% 20%; - /* Dark grey for errors */ - --destructive-foreground: 0 0% 100%; - /* White text */ - --ring: 0 0% 50%; - /* Medium grey focus ring */ -} - -/* Dark mode - INVERTED BLACK AND WHITE */ - -.dark { - --background: 0 0% 0%; - /* Pure black */ - --foreground: 0 0% 100%; - /* Pure white */ - --muted: 0 0% 10%; - /* Very dark grey */ - --muted-foreground: 0 0% 60%; - /* Light grey */ - --card: 0 0% 0%; - /* Black */ - --card-foreground: 0 0% 100%; - /* White */ - --popover: 0 0% 0%; - /* Black */ - --popover-foreground: 0 0% 100%; - /* White */ - --border: 0 0% 20%; - /* Dark grey border */ - --input: 0 0% 20%; - /* Dark grey border */ - --primary: 0 0% 100%; - /* White primary */ - --primary-foreground: 0 0% 0%; - /* Black text on white */ - --secondary: 0 0% 15%; - /* Dark grey */ - --secondary-foreground: 0 0% 100%; - /* White text */ - --accent: 0 0% 30%; - /* Dark grey accent */ - --accent-foreground: 0 0% 100%; - /* White text */ - --destructive: 0 0% 80%; - /* Light grey for errors */ - --destructive-foreground: 0 0% 0%; - /* Black text */ - --ring: 0 0% 50%; - /* Medium grey focus ring */ -} - -*, ::before, ::after { - --tw-border-spacing-x: 0; - --tw-border-spacing-y: 0; - --tw-translate-x: 0; - --tw-translate-y: 0; - --tw-rotate: 0; - --tw-skew-x: 0; - --tw-skew-y: 0; - --tw-scale-x: 1; - --tw-scale-y: 1; - --tw-pan-x: ; - --tw-pan-y: ; - --tw-pinch-zoom: ; - --tw-scroll-snap-strictness: proximity; - --tw-gradient-from-position: ; - --tw-gradient-via-position: ; - --tw-gradient-to-position: ; - --tw-ordinal: ; - --tw-slashed-zero: ; - --tw-numeric-figure: ; - --tw-numeric-spacing: ; - --tw-numeric-fraction: ; - --tw-ring-inset: ; - --tw-ring-offset-width: 0px; - --tw-ring-offset-color: #fff; - --tw-ring-color: rgb(59 130 246 / 0.5); - --tw-ring-offset-shadow: 0 0 #0000; - --tw-ring-shadow: 0 0 #0000; - --tw-shadow: 0 0 #0000; - --tw-shadow-colored: 0 0 #0000; - --tw-blur: ; - --tw-brightness: ; - --tw-contrast: ; - --tw-grayscale: ; - --tw-hue-rotate: ; - --tw-invert: ; - --tw-saturate: ; - --tw-sepia: ; - --tw-drop-shadow: ; - --tw-backdrop-blur: ; - --tw-backdrop-brightness: ; - --tw-backdrop-contrast: ; - --tw-backdrop-grayscale: ; - --tw-backdrop-hue-rotate: ; - --tw-backdrop-invert: ; - --tw-backdrop-opacity: ; - --tw-backdrop-saturate: ; - --tw-backdrop-sepia: ; -} - -::backdrop { - --tw-border-spacing-x: 0; - --tw-border-spacing-y: 0; - --tw-translate-x: 0; - --tw-translate-y: 0; - --tw-rotate: 0; - --tw-skew-x: 0; - --tw-skew-y: 0; - --tw-scale-x: 1; - --tw-scale-y: 1; - --tw-pan-x: ; - --tw-pan-y: ; - --tw-pinch-zoom: ; - --tw-scroll-snap-strictness: proximity; - --tw-gradient-from-position: ; - --tw-gradient-via-position: ; - --tw-gradient-to-position: ; - --tw-ordinal: ; - --tw-slashed-zero: ; - --tw-numeric-figure: ; - --tw-numeric-spacing: ; - --tw-numeric-fraction: ; - --tw-ring-inset: ; - --tw-ring-offset-width: 0px; - --tw-ring-offset-color: #fff; - --tw-ring-color: rgb(59 130 246 / 0.5); - --tw-ring-offset-shadow: 0 0 #0000; - --tw-ring-shadow: 0 0 #0000; - --tw-shadow: 0 0 #0000; - --tw-shadow-colored: 0 0 #0000; - --tw-blur: ; - --tw-brightness: ; - --tw-contrast: ; - --tw-grayscale: ; - --tw-hue-rotate: ; - --tw-invert: ; - --tw-saturate: ; - --tw-sepia: ; - --tw-drop-shadow: ; - --tw-backdrop-blur: ; - --tw-backdrop-brightness: ; - --tw-backdrop-contrast: ; - --tw-backdrop-grayscale: ; - --tw-backdrop-hue-rotate: ; - --tw-backdrop-invert: ; - --tw-backdrop-opacity: ; - --tw-backdrop-saturate: ; - --tw-backdrop-sepia: ; -} - -.container { - width: 100%; -} - -@media (min-width: 640px) { - .container { - max-width: 640px; - } -} - -@media (min-width: 768px) { - .container { - max-width: 768px; - } -} - -@media (min-width: 1024px) { - .container { - max-width: 1024px; - } -} - -@media (min-width: 1280px) { - .container { - max-width: 1280px; - } -} - -@media (min-width: 1536px) { - .container { - max-width: 1536px; - } -} - -/* Simple, reusable components */ - -.input { - width: 100%; - border-radius: 0.375rem; - border-width: 1px; - border-color: hsl(var(--input)); - background-color: hsl(var(--background)); - padding-left: 0.75rem; - padding-right: 0.75rem; - padding-top: 0.5rem; - padding-bottom: 0.5rem; -} - -.input:focus { - border-color: hsl(var(--input)); - outline: 2px solid transparent; - outline-offset: 2px; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color); - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); - --tw-ring-color: hsl(var(--ring)); -} - -.visible { - visibility: visible; -} - -.static { - position: static; -} - -.absolute { - position: absolute; -} - -.relative { - position: relative; -} - -.inset-0 { - inset: 0px; -} - -.bottom-0 { - bottom: 0px; -} - -.left-0 { - left: 0px; -} - -.left-1 { - left: 0.25rem; -} - -.right-0 { - right: 0px; -} - -.top-1 { - top: 0.25rem; -} - -.z-10 { - z-index: 10; -} - -.mx-2 { - margin-left: 0.5rem; - margin-right: 0.5rem; -} - -.mx-3 { - margin-left: 0.75rem; - margin-right: 0.75rem; -} - -.mx-auto { - margin-left: auto; - margin-right: auto; -} - -.my-1 { - margin-top: 0.25rem; - margin-bottom: 0.25rem; -} - -.my-2 { - margin-top: 0.5rem; - margin-bottom: 0.5rem; -} - -.mb-12 { - margin-bottom: 3rem; -} - -.mb-2 { - margin-bottom: 0.5rem; -} - -.mb-3 { - margin-bottom: 0.75rem; -} - -.mb-4 { - margin-bottom: 1rem; -} - -.mb-6 { - margin-bottom: 1.5rem; -} - -.mb-8 { - margin-bottom: 2rem; -} - -.mr-2 { - margin-right: 0.5rem; -} - -.mr-3 { - margin-right: 0.75rem; -} - -.mr-4 { - margin-right: 1rem; -} - -.mt-1 { - margin-top: 0.25rem; -} - -.mt-12 { - margin-top: 3rem; -} - -.mt-2 { - margin-top: 0.5rem; -} - -.mt-4 { - margin-top: 1rem; -} - -.mt-6 { - margin-top: 1.5rem; -} - -.mt-8 { - margin-top: 2rem; -} - -.block { - display: block; -} - -.flex { - display: flex; -} - -.grid { - display: grid; -} - -.hidden { - display: none; -} - -.size-8 { - width: 2rem; - height: 2rem; -} - -.h-10 { - height: 2.5rem; -} - -.h-12 { - height: 3rem; -} - -.h-4 { - height: 1rem; -} - -.h-5 { - height: 1.25rem; -} - -.h-8 { - height: 2rem; -} - -.h-full { - height: 100%; -} - -.min-h-screen { - min-height: 100vh; -} - -.w-20 { - width: 5rem; -} - -.w-4 { - width: 1rem; -} - -.w-48 { - width: 12rem; -} - -.w-5 { - width: 1.25rem; -} - -.w-64 { - width: 16rem; -} - -.w-8 { - width: 2rem; -} - -.w-full { - width: 100%; -} - -.max-w-2xl { - max-width: 42rem; -} - -.max-w-sm { - max-width: 24rem; -} - -.flex-1 { - flex: 1 1 0%; -} - -.flex-grow { - flex-grow: 1; -} - -.transform { - transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)); -} - -.cursor-pointer { - cursor: pointer; -} - -.grid-cols-1 { - grid-template-columns: repeat(1, minmax(0, 1fr)); -} - -.grid-cols-3 { - grid-template-columns: repeat(3, minmax(0, 1fr)); -} - -.flex-col { - flex-direction: column; -} - -.items-start { - align-items: flex-start; -} - -.items-center { - align-items: center; -} - -.items-stretch { - align-items: stretch; -} - -.justify-center { - justify-content: center; -} - -.gap-12 { - gap: 3rem; -} - -.gap-2 { - gap: 0.5rem; -} - -.gap-4 { - gap: 1rem; -} - -.gap-6 { - gap: 1.5rem; -} - -.gap-8 { - gap: 2rem; -} - -.space-x-4 > :not([hidden]) ~ :not([hidden]) { - --tw-space-x-reverse: 0; - margin-right: calc(1rem * var(--tw-space-x-reverse)); - margin-left: calc(1rem * calc(1 - var(--tw-space-x-reverse))); -} - -.space-y-1 > :not([hidden]) ~ :not([hidden]) { - --tw-space-y-reverse: 0; - margin-top: calc(0.25rem * calc(1 - var(--tw-space-y-reverse))); - margin-bottom: calc(0.25rem * var(--tw-space-y-reverse)); -} - -.space-y-1\.5 > :not([hidden]) ~ :not([hidden]) { - --tw-space-y-reverse: 0; - margin-top: calc(0.375rem * calc(1 - var(--tw-space-y-reverse))); - margin-bottom: calc(0.375rem * var(--tw-space-y-reverse)); -} - -.space-y-2 > :not([hidden]) ~ :not([hidden]) { - --tw-space-y-reverse: 0; - margin-top: calc(0.5rem * calc(1 - var(--tw-space-y-reverse))); - margin-bottom: calc(0.5rem * var(--tw-space-y-reverse)); -} - -.space-y-6 > :not([hidden]) ~ :not([hidden]) { - --tw-space-y-reverse: 0; - margin-top: calc(1.5rem * calc(1 - var(--tw-space-y-reverse))); - margin-bottom: calc(1.5rem * var(--tw-space-y-reverse)); -} - -.text-balance { - text-wrap: balance; -} - -.rounded-full { - border-radius: 9999px; -} - -.rounded-lg { - border-radius: 0.5rem; -} - -.border { - border-width: 1px; -} - -.border-r { - border-right-width: 1px; -} - -.border-t { - border-top-width: 1px; -} - -.border-background\/10 { - border-color: hsl(var(--background) / 0.1); -} - -.border-border { - border-color: hsl(var(--border)); -} - -.border-foreground\/20 { - border-color: hsl(var(--foreground) / 0.2); -} - -.bg-background { - background-color: hsl(var(--background)); -} - -.bg-foreground { - background-color: hsl(var(--foreground)); -} - -.bg-muted { - background-color: hsl(var(--muted)); -} - -.fill-blue-500\/20 { - fill: rgb(59 130 246 / 0.2); -} - -.fill-yellow-100 { - fill: #fef9c3; -} - -.fill-yellow-200 { - fill: #fef08a; -} - -.fill-yellow-400 { - fill: #facc15; -} - -.p-2 { - padding: 0.5rem; -} - -.p-4 { - padding: 1rem; -} - -.p-8 { - padding: 2rem; -} - -.px-2 { - padding-left: 0.5rem; - padding-right: 0.5rem; -} - -.px-4 { - padding-left: 1rem; - padding-right: 1rem; -} - -.px-6 { - padding-left: 1.5rem; - padding-right: 1.5rem; -} - -.py-12 { - padding-top: 3rem; - padding-bottom: 3rem; -} - -.py-2 { - padding-top: 0.5rem; - padding-bottom: 0.5rem; -} - -.py-20 { - padding-top: 5rem; - padding-bottom: 5rem; -} - -.py-3 { - padding-top: 0.75rem; - padding-bottom: 0.75rem; -} - -.py-8 { - padding-top: 2rem; - padding-bottom: 2rem; -} - -.pt-8 { - padding-top: 2rem; -} - -.text-center { - text-align: center; -} - -.text-2xl { - font-size: 1.5rem; - line-height: 2rem; -} - -.text-3xl { - font-size: 1.875rem; - line-height: 2.25rem; -} - -.text-5xl { - font-size: 3rem; - line-height: 1; -} - -.text-lg { - font-size: 1.125rem; - line-height: 1.75rem; -} - -.text-sm { - font-size: 0.875rem; - line-height: 1.25rem; -} - -.text-xl { - font-size: 1.25rem; - line-height: 1.75rem; -} - -.font-black { - font-weight: 900; -} - -.font-bold { - font-weight: 700; -} - -.font-medium { - font-weight: 500; -} - -.font-semibold { - font-weight: 600; -} - -.leading-none { - line-height: 1; -} - -.text-background { - color: hsl(var(--background)); -} - -.text-blue-400 { - --tw-text-opacity: 1; - color: rgb(96 165 250 / var(--tw-text-opacity)); -} - -.text-foreground { - color: hsl(var(--foreground)); -} - -.text-muted-foreground { - color: hsl(var(--muted-foreground)); -} - -.text-primary { - color: hsl(var(--primary)); -} - -.text-yellow-100 { - --tw-text-opacity: 1; - color: rgb(254 249 195 / var(--tw-text-opacity)); -} - -.text-yellow-200 { - --tw-text-opacity: 1; - color: rgb(254 240 138 / var(--tw-text-opacity)); -} - -.text-yellow-500 { - --tw-text-opacity: 1; - color: rgb(234 179 8 / var(--tw-text-opacity)); -} - -.opacity-0 { - opacity: 0; -} - -.opacity-75 { - opacity: 0.75; -} - -.opacity-90 { - opacity: 0.9; -} - -.shadow-lg { - --tw-shadow: 0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1); - --tw-shadow-colored: 0 10px 15px -3px var(--tw-shadow-color), 0 4px 6px -4px var(--tw-shadow-color); - box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); -} - -.shadow-md { - --tw-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1); - --tw-shadow-colored: 0 4px 6px -1px var(--tw-shadow-color), 0 2px 4px -2px var(--tw-shadow-color); - box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); -} - -.shadow-sm { - --tw-shadow: 0 1px 2px 0 rgb(0 0 0 / 0.05); - --tw-shadow-colored: 0 1px 2px 0 var(--tw-shadow-color); - box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); -} - -.drop-shadow { - --tw-drop-shadow: drop-shadow(0 1px 2px rgb(0 0 0 / 0.1)) drop-shadow(0 1px 1px rgb(0 0 0 / 0.06)); - filter: var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow); -} - -.filter { - filter: var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow); -} - -.transition-all { - transition-property: all; - transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); - transition-duration: 150ms; -} - -.transition-colors { - transition-property: color, background-color, border-color, text-decoration-color, fill, stroke; - transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); - transition-duration: 150ms; -} - -.transition-opacity { - transition-property: opacity; - transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); - transition-duration: 150ms; -} - -.transition-transform { - transition-property: transform; - transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); - transition-duration: 150ms; -} - -.duration-200 { - transition-duration: 200ms; -} - -.duration-300 { - transition-duration: 300ms; -} - -/* Gradient text utility - GRAYSCALE TEST */ - -.gradient-text { - background-image: linear-gradient(to right, var(--tw-gradient-stops)); - --tw-gradient-from: #000 var(--tw-gradient-from-position); - --tw-gradient-to: rgb(0 0 0 / 0) var(--tw-gradient-to-position); - --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to); - --tw-gradient-to: #4b5563 var(--tw-gradient-to-position); - -webkit-background-clip: text; - background-clip: text; - color: transparent; -} - -.dark .gradient-text { - --tw-gradient-from: #fff var(--tw-gradient-from-position); - --tw-gradient-to: rgb(255 255 255 / 0) var(--tw-gradient-to-position); - --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to); - --tw-gradient-to: #9ca3af var(--tw-gradient-to-position); -} - -.after\:absolute::after { - content: var(--tw-content); - position: absolute; -} - -.after\:inset-0::after { - content: var(--tw-content); - inset: 0px; -} - -.after\:top-1\/2::after { - content: var(--tw-content); - top: 50%; -} - -.after\:z-0::after { - content: var(--tw-content); - z-index: 0; -} - -.after\:flex::after { - content: var(--tw-content); - display: flex; -} - -.after\:items-center::after { - content: var(--tw-content); - align-items: center; -} - -.after\:border-t::after { - content: var(--tw-content); - border-top-width: 1px; -} - -.after\:border-border::after { - content: var(--tw-content); - border-color: hsl(var(--border)); -} - -.hover\:bg-foreground\/90:hover { - background-color: hsl(var(--foreground) / 0.9); -} - -.hover\:bg-muted:hover { - background-color: hsl(var(--muted)); -} - -.hover\:opacity-100:hover { - opacity: 1; -} - -.hover\:opacity-90:hover { - opacity: 0.9; -} - -.hover\:shadow-lg:hover { - --tw-shadow: 0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1); - --tw-shadow-colored: 0 10px 15px -3px var(--tw-shadow-color), 0 4px 6px -4px var(--tw-shadow-color); - box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); -} - -.hover\:shadow-xl:hover { - --tw-shadow: 0 20px 25px -5px rgb(0 0 0 / 0.1), 0 8px 10px -6px rgb(0 0 0 / 0.1); - --tw-shadow-colored: 0 20px 25px -5px var(--tw-shadow-color), 0 8px 10px -6px var(--tw-shadow-color); - box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); -} - -@media (min-width: 768px) { - .md\:col-span-2 { - grid-column: span 2 / span 2; - } - - .md\:grid-cols-2 { - grid-template-columns: repeat(2, minmax(0, 1fr)); - } - - .md\:grid-cols-3 { - grid-template-columns: repeat(3, minmax(0, 1fr)); - } - - .md\:grid-cols-4 { - grid-template-columns: repeat(4, minmax(0, 1fr)); - } - - .md\:p-10 { - padding: 2.5rem; - } -} \ No newline at end of file +/*! tailwindcss v3.4.0 | MIT License | https://tailwindcss.com*/*,:after,:before{box-sizing:border-box;border:0 solid #e6e6e6}:after,:before{--tw-content:""}:host,html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:system-ui,-apple-system,sans-serif;font-feature-settings:normal;font-variation-settings:normal;-webkit-tap-highlight-color:transparent}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,pre,samp{font-family:ui-monospace,monospace;font-feature-settings:normal;font-variation-settings:normal;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:initial}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-feature-settings:inherit;font-variation-settings:inherit;font-size:100%;font-weight:inherit;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button;background-color:initial;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:initial}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dd,dl,figure,h1,h2,h3,h4,h5,h6,hr,p,pre{margin:0}fieldset{margin:0}fieldset,legend{padding:0}menu,ol,ul{list-style:none;margin:0;padding:0}dialog{padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#b3b3b3}input::placeholder,textarea::placeholder{opacity:1;color:#b3b3b3}[role=button],button{cursor:pointer}:disabled{cursor:default}audio,canvas,embed,iframe,img,object,svg,video{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]{display:none}*,::backdrop,:after,:before{--tw-border-spacing-x:0;--tw-border-spacing-y:0;--tw-translate-x:0;--tw-translate-y:0;--tw-rotate:0;--tw-skew-x:0;--tw-skew-y:0;--tw-scale-x:1;--tw-scale-y:1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness:proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:#3b82f680;--tw-ring-offset-shadow:0 0 #0000;--tw-ring-shadow:0 0 #0000;--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }.container{width:100%}@media (min-width:640px){.container{max-width:640px}}@media (min-width:768px){.container{max-width:768px}}@media (min-width:1024px){.container{max-width:1024px}}@media (min-width:1280px){.container{max-width:1280px}}@media (min-width:1536px){.container{max-width:1536px}}.visible{visibility:visible}.static{position:static}.absolute{position:absolute}.relative{position:relative}.inset-0{inset:0}.bottom-0{bottom:0}.left-0{left:0}.left-1{left:.25rem}.right-0{right:0}.top-1{top:.25rem}.z-10{z-index:10}.mx-2{margin-left:.5rem;margin-right:.5rem}.mx-3{margin-left:.75rem;margin-right:.75rem}.mx-auto{margin-left:auto;margin-right:auto}.my-1{margin-top:.25rem;margin-bottom:.25rem}.my-2{margin-top:.5rem;margin-bottom:.5rem}.mb-12{margin-bottom:3rem}.mb-2{margin-bottom:.5rem}.mb-3{margin-bottom:.75rem}.mb-4{margin-bottom:1rem}.mb-6{margin-bottom:1.5rem}.mb-8{margin-bottom:2rem}.mr-2{margin-right:.5rem}.mr-3{margin-right:.75rem}.mr-4{margin-right:1rem}.mt-1{margin-top:.25rem}.mt-12{margin-top:3rem}.mt-2{margin-top:.5rem}.mt-4{margin-top:1rem}.mt-6{margin-top:1.5rem}.mt-8{margin-top:2rem}.block{display:block}.flex{display:flex}.grid{display:grid}.hidden{display:none}.size-8{width:2rem;height:2rem}.h-10{height:2.5rem}.h-12{height:3rem}.h-4{height:1rem}.h-5{height:1.25rem}.h-8{height:2rem}.h-full{height:100%}.min-h-screen{min-height:100vh}.w-20{width:5rem}.w-4{width:1rem}.w-48{width:12rem}.w-5{width:1.25rem}.w-64{width:16rem}.w-8{width:2rem}.w-full{width:100%}.max-w-2xl{max-width:42rem}.max-w-sm{max-width:24rem}.flex-1{flex:1 1 0%}.flex-grow{flex-grow:1}.transform{transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.cursor-pointer{cursor:pointer}.grid-cols-1{grid-template-columns:repeat(1,minmax(0,1fr))}.grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.flex-col{flex-direction:column}.items-start{align-items:flex-start}.items-center{align-items:center}.items-stretch{align-items:stretch}.justify-center{justify-content:center}.gap-12{gap:3rem}.gap-2{gap:.5rem}.gap-4{gap:1rem}.gap-6{gap:1.5rem}.gap-8{gap:2rem}.space-x-4>:not([hidden])~:not([hidden]){--tw-space-x-reverse:0;margin-right:calc(1rem*var(--tw-space-x-reverse));margin-left:calc(1rem*(1 - var(--tw-space-x-reverse)))}.space-y-1>:not([hidden])~:not([hidden]){--tw-space-y-reverse:0;margin-top:calc(.25rem*(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.25rem*var(--tw-space-y-reverse))}.space-y-1\.5>:not([hidden])~:not([hidden]){--tw-space-y-reverse:0;margin-top:calc(.375rem*(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.375rem*var(--tw-space-y-reverse))}.space-y-2>:not([hidden])~:not([hidden]){--tw-space-y-reverse:0;margin-top:calc(.5rem*(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.5rem*var(--tw-space-y-reverse))}.space-y-6>:not([hidden])~:not([hidden]){--tw-space-y-reverse:0;margin-top:calc(1.5rem*(1 - var(--tw-space-y-reverse)));margin-bottom:calc(1.5rem*var(--tw-space-y-reverse))}.text-balance{text-wrap:balance}.rounded-full{border-radius:9999px}.rounded-lg{border-radius:.5rem}.border{border-width:1px}.border-r{border-right-width:1px}.border-t{border-top-width:1px}.border-background\/10{border-color:hsl(var(--background)/.1)}.border-border{border-color:hsl(var(--border))}.border-foreground\/20{border-color:hsl(var(--foreground)/.2)}.bg-background{background-color:hsl(var(--background))}.bg-foreground{background-color:hsl(var(--foreground))}.bg-muted{background-color:hsl(var(--muted))}.fill-blue-500\/20{fill:#3b82f633}.fill-yellow-100{fill:#fef9c3}.fill-yellow-200{fill:#fef08a}.fill-yellow-400{fill:#facc15}.p-2{padding:.5rem}.p-4{padding:1rem}.p-8{padding:2rem}.px-2{padding-left:.5rem;padding-right:.5rem}.px-4{padding-left:1rem;padding-right:1rem}.px-6{padding-left:1.5rem;padding-right:1.5rem}.py-12{padding-top:3rem;padding-bottom:3rem}.py-2{padding-top:.5rem;padding-bottom:.5rem}.py-20{padding-top:5rem;padding-bottom:5rem}.py-3{padding-top:.75rem;padding-bottom:.75rem}.py-8{padding-bottom:2rem}.pt-8,.py-8{padding-top:2rem}.text-center{text-align:center}.text-2xl{font-size:1.5rem}.text-3xl{font-size:1.875rem}.text-5xl{font-size:3rem}.text-lg{font-size:1.125rem}.text-sm{font-size:.875rem}.text-xl{font-size:1.25rem}.font-black{font-weight:900}.font-bold{font-weight:700}.font-medium{font-weight:500}.font-semibold{font-weight:600}.leading-none{line-height:1}.text-background{color:hsl(var(--background))}.text-blue-400{--tw-text-opacity:1;color:rgb(96 165 250/var(--tw-text-opacity))}.text-foreground{color:hsl(var(--foreground))}.text-muted-foreground{color:hsl(var(--muted-foreground))}.text-primary{color:hsl(var(--primary))}.text-yellow-100{--tw-text-opacity:1;color:rgb(254 249 195/var(--tw-text-opacity))}.text-yellow-200{--tw-text-opacity:1;color:rgb(254 240 138/var(--tw-text-opacity))}.text-yellow-500{--tw-text-opacity:1;color:rgb(234 179 8/var(--tw-text-opacity))}.opacity-0{opacity:0}.opacity-75{opacity:.75}.opacity-90{opacity:.9}.shadow-lg{--tw-shadow:0 10px 15px -3px #0000001a,0 4px 6px -4px #0000001a;--tw-shadow-colored:0 10px 15px -3px var(--tw-shadow-color),0 4px 6px -4px var(--tw-shadow-color)}.shadow-lg,.shadow-md{box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)}.shadow-md{--tw-shadow:0 4px 6px -1px #0000001a,0 2px 4px -2px #0000001a;--tw-shadow-colored:0 4px 6px -1px var(--tw-shadow-color),0 2px 4px -2px var(--tw-shadow-color)}.shadow-sm{--tw-shadow:0 1px 2px 0 #0000000d;--tw-shadow-colored:0 1px 2px 0 var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)}.drop-shadow{--tw-drop-shadow:drop-shadow(0 1px 2px #0000001a) drop-shadow(0 1px 1px #0000000f)}.drop-shadow,.filter{filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.transition-all{transition-property:all;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}.transition-colors{transition-property:color,background-color,border-color,text-decoration-color,fill,stroke;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}.transition-opacity{transition-property:opacity;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}.transition-transform{transition-property:transform;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}.duration-200{transition-duration:.2s}.duration-300{transition-duration:.3s}.after\:absolute:after{content:var(--tw-content);position:absolute}.after\:inset-0:after{content:var(--tw-content);inset:0}.after\:top-1\/2:after{content:var(--tw-content);top:50%}.after\:z-0:after{content:var(--tw-content);z-index:0}.after\:flex:after{content:var(--tw-content);display:flex}.after\:items-center:after{content:var(--tw-content);align-items:center}.after\:border-t:after{content:var(--tw-content);border-top-width:1px}.after\:border-border:after{content:var(--tw-content);border-color:hsl(var(--border))}.hover\:bg-foreground\/90:hover{background-color:hsl(var(--foreground)/.9)}.hover\:bg-muted:hover{background-color:hsl(var(--muted))}.hover\:opacity-100:hover{opacity:1}.hover\:opacity-80:hover{opacity:.8}.hover\:opacity-90:hover{opacity:.9}.hover\:shadow-lg:hover{--tw-shadow:0 10px 15px -3px #0000001a,0 4px 6px -4px #0000001a;--tw-shadow-colored:0 10px 15px -3px var(--tw-shadow-color),0 4px 6px -4px var(--tw-shadow-color)}.hover\:shadow-lg:hover,.hover\:shadow-xl:hover{box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)}.hover\:shadow-xl:hover{--tw-shadow:0 20px 25px -5px #0000001a,0 8px 10px -6px #0000001a;--tw-shadow-colored:0 20px 25px -5px var(--tw-shadow-color),0 8px 10px -6px var(--tw-shadow-color)}@media (min-width:768px){.md\:col-span-2{grid-column:span 2/span 2}.md\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.md\:grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.md\:grid-cols-4{grid-template-columns:repeat(4,minmax(0,1fr))}.md\:p-10{padding:2.5rem}} \ No newline at end of file diff --git a/py/styles/animations.css b/py/styles/animations.css index 8d13335..ba1f1bd 100644 --- a/py/styles/animations.css +++ b/py/styles/animations.css @@ -1,26 +1,127 @@ -/* Simplified animations - keeping only the cool ones */ +/* Shared animations that can be used across projects */ +/* Basic animations */ @keyframes fade-in { from { opacity: 0; } to { opacity: 1; } } @keyframes slide-up { - from { + from { opacity: 0; transform: translateY(10px); } - to { + to { opacity: 1; transform: translateY(0); } } +@keyframes slide-down { + from { + opacity: 0; + transform: translateY(-10px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +@keyframes slide-left { + from { + opacity: 0; + transform: translateX(10px); + } + to { + opacity: 1; + transform: translateX(0); + } +} + +@keyframes slide-right { + from { + opacity: 0; + transform: translateX(-10px); + } + to { + opacity: 1; + transform: translateX(0); + } +} + @keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } +@keyframes pulse { + 0%, 100% { + opacity: 1; + } + 50% { + opacity: 0.5; + } +} + +/* Advanced animations for dramatic effects */ +@keyframes blob { + 0%, 100% { + transform: translate(0, 0) scale(1); + } + 33% { + transform: translate(300px, -300px) scale(1.2); + } + 66% { + transform: translate(-200px, 200px) scale(0.8); + } +} + +@keyframes blob-reverse { + 0%, 100% { + transform: translate(0, 0) scale(1); + } + 33% { + transform: translate(-300px, 200px) scale(1.2); + } + 66% { + transform: translate(200px, -300px) scale(0.8); + } +} + +@keyframes float { + 0%, 100% { + transform: translateY(0px); + } + 50% { + transform: translateY(-200px); + } +} + +@keyframes drift { + 0%, 100% { + transform: translate(0, 0); + } + 25% { + transform: translate(400px, -200px); + } + 50% { + transform: translate(-300px, 300px); + } + 75% { + transform: translate(200px, 150px); + } +} + +@keyframes orbit { + from { + transform: rotate(0deg) translateX(250px) rotate(0deg); + } + to { + transform: rotate(360deg) translateX(250px) rotate(-360deg); + } +} + /* Utility classes */ .animate-fade-in { animation: fade-in 0.3s ease-out; @@ -30,11 +131,69 @@ animation: slide-up 0.4s ease-out; } +.animate-slide-down { + animation: slide-down 0.4s ease-out; +} + +.animate-slide-left { + animation: slide-left 0.4s ease-out; +} + +.animate-slide-right { + animation: slide-right 0.4s ease-out; +} + .animate-spin { animation: spin 1s linear infinite; } +.animate-pulse { + animation: pulse 2s cubic-bezier(0.4, 0, 0.6, 1) infinite; +} + +/* Advanced animation utilities */ +.animate-blob { + animation: blob 15s infinite ease-in-out; +} + +.animate-blob-reverse { + animation: blob-reverse 18s infinite ease-in-out; +} + +.animate-float { + animation: float 12s ease-in-out infinite; +} + +.animate-drift { + animation: drift 20s ease-in-out infinite; +} + +.animate-orbit { + animation: orbit 25s linear infinite; +} + +/* Animation delays */ +.animation-delay-200 { + animation-delay: 200ms; +} + +.animation-delay-500 { + animation-delay: 500ms; +} + +.animation-delay-1000 { + animation-delay: 1s; +} + +.animation-delay-2000 { + animation-delay: 2s; +} + +.animation-delay-4000 { + animation-delay: 4s; +} + /* Loading states */ .loading { - @apply animate-spin rounded-full h-8 w-8 border-2 border-[var(--muted)] border-t-primary; + @apply animate-spin rounded-full h-8 w-8 border-2 border-[var(--muted)] border-t-[var(--primary)]; } \ No newline at end of file diff --git a/py/styles/main.css b/py/styles/main.css index d06ef84..f8f5dc6 100644 --- a/py/styles/main.css +++ b/py/styles/main.css @@ -1,108 +1,12 @@ @tailwind base; @tailwind components; @tailwind utilities; -@import 'animations.css'; -@layer base { - /* Light mode defaults - BLACK AND WHITE TEST */ - :root { - --background: 0 0% 100%; /* Pure white */ - --foreground: 0 0% 0%; /* Pure black */ - - --muted: 0 0% 95%; /* Very light grey */ - --muted-foreground: 0 0% 40%; /* Dark grey */ - - --card: 0 0% 100%; /* White */ - --card-foreground: 0 0% 0%; /* Black */ - - --popover: 0 0% 100%; /* White */ - --popover-foreground: 0 0% 0%; /* Black */ - - --border: 0 0% 85%; /* Light grey border */ - --input: 0 0% 85%; /* Light grey border */ - - --primary: 0 0% 0%; /* Black primary */ - --primary-foreground: 0 0% 100%; /* White text on black */ - - --secondary: 0 0% 90%; /* Light grey */ - --secondary-foreground: 0 0% 0%; /* Black text */ - - --accent: 0 0% 70%; /* Medium grey accent */ - --accent-foreground: 0 0% 0%; /* Black text */ - - --destructive: 0 0% 20%; /* Dark grey for errors */ - --destructive-foreground: 0 0% 100%; /* White text */ - - --ring: 0 0% 50%; /* Medium grey focus ring */ - } - - /* Dark mode - INVERTED BLACK AND WHITE */ - .dark { - --background: 0 0% 0%; /* Pure black */ - --foreground: 0 0% 100%; /* Pure white */ - - --muted: 0 0% 10%; /* Very dark grey */ - --muted-foreground: 0 0% 60%; /* Light grey */ - - --card: 0 0% 0%; /* Black */ - --card-foreground: 0 0% 100%; /* White */ - - --popover: 0 0% 0%; /* Black */ - --popover-foreground: 0 0% 100%; /* White */ - - --border: 0 0% 20%; /* Dark grey border */ - --input: 0 0% 20%; /* Dark grey border */ - - --primary: 0 0% 100%; /* White primary */ - --primary-foreground: 0 0% 0%; /* Black text on white */ - - --secondary: 0 0% 15%; /* Dark grey */ - --secondary-foreground: 0 0% 100%; /* White text */ - - --accent: 0 0% 30%; /* Dark grey accent */ - --accent-foreground: 0 0% 100%; /* White text */ - - --destructive: 0 0% 80%; /* Light grey for errors */ - --destructive-foreground: 0 0% 0%; /* Black text */ - - --ring: 0 0% 50%; /* Medium grey focus ring */ - } -} +/* Import shared branding CSS variables and animations */ +@import '../../branding/styles/variables.css'; +@import '../../branding/styles/animations.css'; -@layer utilities { - /* Gradient text utility - GRAYSCALE TEST */ - .gradient-text { - @apply bg-gradient-to-r from-black to-gray-600 bg-clip-text text-transparent; - } - - .dark .gradient-text { - @apply from-white to-gray-400; - } -} +/* Import shared utility classes that use @apply */ +@import '../../branding/styles/utilities.css'; -@layer components { - /* Simple, reusable components */ - .card { - @apply bg-card border border-border rounded-lg p-6; - } - - .btn { - @apply px-4 py-2 rounded-md font-medium transition-colors; - } - - .btn-primary { - @apply bg-primary text-primary-foreground hover:bg-primary/90 transition-all; - } - - .btn-secondary { - @apply bg-secondary text-secondary-foreground hover:bg-secondary/80; - } - - .input { - @apply w-full px-3 py-2 bg-background border border-input rounded-md focus:outline-none focus:ring-2 focus:ring-ring focus:border-input; - } - - .text-muted { - @apply text-muted-foreground; - } -} \ No newline at end of file +/* Python-specific styles can be added below */ \ No newline at end of file diff --git a/py/tailwind.config.js b/py/tailwind.config.js index ac0c52f..3b9eb8c 100644 --- a/py/tailwind.config.js +++ b/py/tailwind.config.js @@ -1,52 +1,19 @@ /** @type {import('tailwindcss').Config} */ +// Auto-generated from branding/tailwind/preset.js +// DO NOT EDIT DIRECTLY - Edit branding/tailwind/preset.js instead + +const brandingPreset = require('../branding/tailwind/preset.js'); + module.exports = { content: [ "./templates/**/*.html", "./src/**/*.py", ], - darkMode: 'class', + presets: [brandingPreset], theme: { extend: { - colors: { - // Use CSS variables for theming - background: 'hsl(var(--background))', - foreground: 'hsl(var(--foreground))', - muted: { - DEFAULT: 'hsl(var(--muted))', - foreground: 'hsl(var(--muted-foreground))', - }, - card: { - DEFAULT: 'hsl(var(--card))', - foreground: 'hsl(var(--card-foreground))', - }, - popover: { - DEFAULT: 'hsl(var(--popover))', - foreground: 'hsl(var(--popover-foreground))', - }, - border: 'hsl(var(--border))', - input: 'hsl(var(--input))', - primary: { - DEFAULT: 'hsl(var(--primary))', - foreground: 'hsl(var(--primary-foreground))', - }, - secondary: { - DEFAULT: 'hsl(var(--secondary))', - foreground: 'hsl(var(--secondary-foreground))', - }, - accent: { - DEFAULT: 'hsl(var(--accent))', - foreground: 'hsl(var(--accent-foreground))', - }, - destructive: { - DEFAULT: 'hsl(var(--destructive))', - foreground: 'hsl(var(--destructive-foreground))', - }, - ring: 'hsl(var(--ring))', - }, - fontFamily: { - sans: ['system-ui', '-apple-system', 'sans-serif'], - }, + // Python-specific theme extensions can go here }, }, plugins: [], -} \ No newline at end of file +} diff --git a/py/templates/components/footer.html b/py/templates/components/footer.html index 1779324..f8bec76 100644 --- a/py/templates/components/footer.html +++ b/py/templates/components/footer.html @@ -1,29 +1,37 @@ -{# Footer Component #} -{% macro footer() %} +{# Footer Component #} {% macro footer() %}
-

generic

+

Generic

- A modern, full-stack Python application template with authentication, database integration, and a clean UI. + AI-powered tools for biochemistry research and discovery.

Quick Links

-
- © {{ current_year }} generic. All rights reserved. +
+ © {{ current_year }} Generic. All rights reserved.
-{% endmacro %} \ No newline at end of file +{% endmacro %} diff --git a/py/templates/layouts/app.html b/py/templates/layouts/app.html index a57be06..0c8ec2a 100644 --- a/py/templates/layouts/app.html +++ b/py/templates/layouts/app.html @@ -1,58 +1,70 @@ -{% extends "layouts/base.html" %} - -{% block body %} +{% extends "layouts/base.html" %} {% block body %}