From e386c1816eec4e26581fe02a4d86c84ae5bf74d4 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Sun, 22 Feb 2026 16:02:11 +0800 Subject: [PATCH 01/23] feat(observability): add local otel collector and backend tracing --- .env.example | 2 + Makefile | 2 +- README.md | 6 ++ backend/.env.example | 10 ++ backend/cmd/api/main.go | 20 +++- backend/go.mod | 20 ++++ backend/go.sum | 42 ++++++++ backend/internal/config/config.go | 10 ++ backend/internal/telemetry/telemetry.go | 136 ++++++++++++++++++++++++ docker-compose.yml | 10 ++ docs/README.md | 2 + docs/operations/observability.md | 25 +++++ infra/otel/collector.yaml | 20 ++++ 13 files changed, 303 insertions(+), 2 deletions(-) create mode 100644 backend/internal/telemetry/telemetry.go create mode 100644 docs/operations/observability.md create mode 100644 infra/otel/collector.yaml diff --git a/.env.example b/.env.example index e81885c..c7a4e73 100644 --- a/.env.example +++ b/.env.example @@ -6,6 +6,8 @@ APP_VERSION=dev PORT=8080 DATABASE_URL=postgres://postgres:postgres@localhost:5432/saas_core_template?sslmode=disable REDIS_URL=redis://localhost:6379 +OTEL_TRACES_EXPORTER=console +OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318 # Frontend NEXT_PUBLIC_API_URL=http://localhost:8080 diff --git a/Makefile b/Makefile index c93b2f5..e4c8eb2 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ SHELL := /bin/sh .PHONY: infra-up infra-down dev-api dev-ui test ci infra-up: - docker compose up -d postgres redis + docker compose up -d postgres redis otel-collector infra-down: docker compose down diff --git a/README.md b/README.md index 97d8f24..66e7a63 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,10 @@ Core variables: - `APP_BASE_URL` (frontend URL used for checkout return paths) - `APP_ENV` (`development` or `production`) - `APP_VERSION` (`dev`, commit SHA, or release tag) + - `OTEL_SERVICE_NAME` (default `saas-core-template-backend`) + - `OTEL_TRACES_EXPORTER` (`console`, `otlp`, or `none`) + - `OTEL_EXPORTER_OTLP_ENDPOINT` (local collector default `http://localhost:4318`) + - `OTEL_EXPORTER_OTLP_HEADERS` (for managed OTLP auth, e.g. Grafana Cloud) - `CLERK_SECRET_KEY` - `CLERK_API_URL` (default `https://api.clerk.com`) - `STRIPE_SECRET_KEY` @@ -70,6 +74,8 @@ Run infra first: make infra-up ``` +This starts Postgres, Redis, and a local OpenTelemetry collector (for local tracing). + Start backend in one terminal: ```bash diff --git a/backend/.env.example b/backend/.env.example index 5aa0f05..89affcb 100644 --- a/backend/.env.example +++ b/backend/.env.example @@ -4,6 +4,16 @@ PORT=8080 DATABASE_URL=postgres://postgres:postgres@localhost:5432/saas_core_template?sslmode=disable REDIS_URL=redis://localhost:6379 APP_BASE_URL=http://localhost:3000 + +# Observability (OpenTelemetry) +# - Local default: console spans in backend logs. +# - To use the local collector (docker compose service `otel-collector`): set OTEL_TRACES_EXPORTER=otlp. +OTEL_SERVICE_NAME=saas-core-template-backend +OTEL_TRACES_EXPORTER=console +OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318 +# Example for Grafana Cloud OTLP auth: +# OTEL_EXPORTER_OTLP_HEADERS=Authorization=Basic +OTEL_EXPORTER_OTLP_HEADERS= CLERK_SECRET_KEY= CLERK_API_URL=https://api.clerk.com STRIPE_SECRET_KEY= diff --git a/backend/cmd/api/main.go b/backend/cmd/api/main.go index 97798c6..6202639 100644 --- a/backend/cmd/api/main.go +++ b/backend/cmd/api/main.go @@ -11,12 +11,14 @@ import ( "syscall" "time" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "saas-core-template/backend/internal/api" "saas-core-template/backend/internal/auth" "saas-core-template/backend/internal/billing" "saas-core-template/backend/internal/cache" "saas-core-template/backend/internal/config" "saas-core-template/backend/internal/db" + "saas-core-template/backend/internal/telemetry" ) const appName = "saas-core-template-api" @@ -30,6 +32,22 @@ func main() { ctx := context.Background() + shutdownTelemetry, err := telemetry.Init(ctx, telemetry.Config{ + ServiceName: cfg.ServiceName, + Environment: cfg.Env, + Version: cfg.Version, + TracesExporter: cfg.OtelTracesExporter, + OTLPEndpoint: cfg.OtelOTLPEndpoint, + OTLPHeaders: telemetry.ParseOTLPHeaders(cfg.OtelOTLPHeadersRaw), + }) + if err != nil { + slog.Error("failed to initialize telemetry", "error", err) + os.Exit(1) + } + defer func() { + _ = shutdownTelemetry(context.Background()) + }() + pool, err := db.Connect(ctx, cfg.DatabaseURL) if err != nil { slog.Error("failed to connect to postgres", "error", err) @@ -76,7 +94,7 @@ func main() { ) httpServer := &http.Server{ Addr: fmt.Sprintf(":%s", cfg.Port), - Handler: apiServer.Handler(), + Handler: otelhttp.NewHandler(apiServer.Handler(), "http"), ReadHeaderTimeout: 5 * time.Second, } diff --git a/backend/go.mod b/backend/go.mod index 41b7d7b..0dc4d08 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -8,12 +8,32 @@ require ( ) require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect + go.opentelemetry.io/otel v1.26.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0 // indirect + go.opentelemetry.io/otel/metric v1.26.0 // indirect + go.opentelemetry.io/otel/sdk v1.26.0 // indirect + go.opentelemetry.io/otel/trace v1.26.0 // indirect + go.opentelemetry.io/proto/otlp v1.2.0 // indirect golang.org/x/crypto v0.31.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/grpc v1.63.2 // indirect + google.golang.org/protobuf v1.33.0 // indirect ) diff --git a/backend/go.sum b/backend/go.sum index 7cd5d49..08bac2b 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -2,6 +2,8 @@ github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -9,6 +11,15 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= @@ -26,12 +37,43 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= +go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= +go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0 h1:0W5o9SzoR15ocYHEQfvfipzcNog1lBxOLfnex91Hk6s= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0/go.mod h1:zVZ8nz+VSggWmnh6tTsJqXQ7rU4xLwRtna1M4x5jq58= +go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= +go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= +go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= +go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= +go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= +go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= +google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index cafd833..470cfe5 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -11,6 +11,11 @@ type Config struct { Port string DatabaseURL string RedisURL string + ServiceName string + + OtelTracesExporter string + OtelOTLPEndpoint string + OtelOTLPHeadersRaw string ClerkSecretKey string ClerkAPIURL string @@ -29,6 +34,11 @@ func Load() (Config, error) { Port: getEnv("PORT", "8080"), DatabaseURL: os.Getenv("DATABASE_URL"), RedisURL: os.Getenv("REDIS_URL"), + ServiceName: getEnv("OTEL_SERVICE_NAME", "saas-core-template-backend"), + + OtelTracesExporter: getEnv("OTEL_TRACES_EXPORTER", "console"), + OtelOTLPEndpoint: getEnv("OTEL_EXPORTER_OTLP_ENDPOINT", "http://localhost:4318"), + OtelOTLPHeadersRaw: getEnv("OTEL_EXPORTER_OTLP_HEADERS", ""), ClerkSecretKey: os.Getenv("CLERK_SECRET_KEY"), ClerkAPIURL: getEnv("CLERK_API_URL", ""), diff --git a/backend/internal/telemetry/telemetry.go b/backend/internal/telemetry/telemetry.go new file mode 100644 index 0000000..230cd1d --- /dev/null +++ b/backend/internal/telemetry/telemetry.go @@ -0,0 +1,136 @@ +package telemetry + +import ( + "context" + "fmt" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" +) + +type Config struct { + ServiceName string + Environment string + Version string + + TracesExporter string // "console", "otlp", "none" + OTLPEndpoint string + OTLPHeaders map[string]string +} + +type ShutdownFunc func(context.Context) error + +func Init(ctx context.Context, cfg Config) (ShutdownFunc, error) { + serviceName := strings.TrimSpace(cfg.ServiceName) + if serviceName == "" { + serviceName = "backend" + } + + res, err := resource.Merge( + resource.Default(), + resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceName(serviceName), + semconv.ServiceVersion(strings.TrimSpace(cfg.Version)), + semconv.DeploymentEnvironment(strings.TrimSpace(cfg.Environment)), + ), + ) + if err != nil { + return nil, fmt.Errorf("build otel resource: %w", err) + } + + traceExporter, err := buildTraceExporter(ctx, cfg) + if err != nil { + return nil, err + } + + if traceExporter == nil { + otel.SetTracerProvider(sdktrace.NewTracerProvider(sdktrace.WithResource(res))) + otel.SetTextMapPropagator(propagation.TraceContext{}) + return func(context.Context) error { return nil }, nil + } + + tp := sdktrace.NewTracerProvider( + sdktrace.WithResource(res), + sdktrace.WithBatcher(traceExporter), + ) + + otel.SetTracerProvider(tp) + otel.SetTextMapPropagator(propagation.TraceContext{}) + + return tp.Shutdown, nil +} + +func buildTraceExporter(ctx context.Context, cfg Config) (sdktrace.SpanExporter, error) { + switch strings.ToLower(strings.TrimSpace(cfg.TracesExporter)) { + case "", "console": + exp, err := stdouttrace.New(stdouttrace.WithPrettyPrint()) + if err != nil { + return nil, fmt.Errorf("create stdout trace exporter: %w", err) + } + return exp, nil + case "otlp": + endpoint := strings.TrimSpace(cfg.OTLPEndpoint) + if endpoint == "" { + endpoint = "http://localhost:4318" + } + + opts := []otlptracehttp.Option{} + switch { + case strings.HasPrefix(endpoint, "https://"): + opts = append(opts, otlptracehttp.WithEndpointURL(endpoint)) + case strings.HasPrefix(endpoint, "http://"): + opts = append(opts, otlptracehttp.WithEndpointURL(endpoint), otlptracehttp.WithInsecure()) + default: + opts = append(opts, otlptracehttp.WithEndpoint(endpoint), otlptracehttp.WithInsecure()) + } + + if len(cfg.OTLPHeaders) > 0 { + opts = append(opts, otlptracehttp.WithHeaders(cfg.OTLPHeaders)) + } + + exp, err := otlptracehttp.New(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("create otlp http trace exporter: %w", err) + } + return exp, nil + case "none", "noop", "disabled", "off": + return nil, nil + default: + return nil, fmt.Errorf("unknown OTEL_TRACES_EXPORTER %q (expected console|otlp|none)", cfg.TracesExporter) + } +} + +func ParseOTLPHeaders(raw string) map[string]string { + headers := map[string]string{} + for _, pair := range strings.Split(raw, ",") { + pair = strings.TrimSpace(pair) + if pair == "" { + continue + } + + key, value, ok := strings.Cut(pair, "=") + if !ok { + continue + } + + key = strings.TrimSpace(key) + value = strings.TrimSpace(value) + if key == "" || value == "" { + continue + } + + headers[key] = value + } + + if len(headers) == 0 { + return nil + } + return headers +} diff --git a/docker-compose.yml b/docker-compose.yml index 92f5ae2..1073dc1 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -27,5 +27,15 @@ services: timeout: 3s retries: 10 + otel-collector: + image: otel/opentelemetry-collector-contrib:0.97.0 + container_name: saas-core-template-otel-collector + command: ["--config=/etc/otelcol/config.yaml"] + volumes: + - ./infra/otel/collector.yaml:/etc/otelcol/config.yaml:ro + ports: + - "4317:4317" # OTLP gRPC + - "4318:4318" # OTLP HTTP + volumes: pgdata: diff --git a/docs/README.md b/docs/README.md index 1e9220e..aed5313 100644 --- a/docs/README.md +++ b/docs/README.md @@ -26,6 +26,8 @@ This directory contains implementation playbooks for contributors and AI agents. - [SOC 2 Foundations](operations/compliance-soc2-foundations.md) - Baseline controls and evidence expectations. +- [Observability (OpenTelemetry)](operations/observability.md) + - Local tracing collector and production export configuration. - [Provider Migration Playbook](operations/provider-migration-playbook.md) - Dual-run, just-in-time migration, and cutover strategy. - [Agent Workflow Runbook](operations/agent-workflow.md) diff --git a/docs/operations/observability.md b/docs/operations/observability.md new file mode 100644 index 0000000..d0fbce8 --- /dev/null +++ b/docs/operations/observability.md @@ -0,0 +1,25 @@ +# Observability (OpenTelemetry) + +This template supports OpenTelemetry tracing in the Go backend. + +## Local development + +`make infra-up` starts a local OpenTelemetry Collector (`otel-collector`) in `docker-compose.yml` that accepts OTLP: + +- OTLP HTTP: `http://localhost:4318` +- OTLP gRPC: `localhost:4317` + +Local defaults: + +- Backend uses `OTEL_TRACES_EXPORTER=console` to print spans to stdout. +- To send traces to the local collector, set `OTEL_TRACES_EXPORTER=otlp`. + +## Production (Grafana Cloud) + +To export traces directly to Grafana Cloud (no collector required), configure: + +- `OTEL_TRACES_EXPORTER=otlp` +- `OTEL_EXPORTER_OTLP_ENDPOINT=` +- `OTEL_EXPORTER_OTLP_HEADERS=Authorization=Basic ` + +Keep provider-specific details (endpoints, auth) in env vars so swapping backends is configuration-only. diff --git a/infra/otel/collector.yaml b/infra/otel/collector.yaml new file mode 100644 index 0000000..da2c024 --- /dev/null +++ b/infra/otel/collector.yaml @@ -0,0 +1,20 @@ +receivers: + otlp: + protocols: + grpc: + http: + +exporters: + logging: + verbosity: detailed + +extensions: + health_check: + endpoint: 0.0.0.0:13133 + +service: + extensions: [health_check] + pipelines: + traces: + receivers: [otlp] + exporters: [logging] From d9d7dd76202fb5241744ec0ac68bf336f80c0833 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Sun, 22 Feb 2026 16:41:50 +0800 Subject: [PATCH 02/23] feat(integrations): add analytics, support widget, and error reporting --- .env.example | 12 ++ AGENTS.md | 1 + README.md | 11 ++ backend/.env.example | 7 ++ backend/cmd/api/main.go | 22 +++- backend/go.mod | 1 + backend/go.sum | 2 + backend/internal/config/config.go | 8 ++ .../internal/errorreporting/errorreporting.go | 111 ++++++++++++++++++ backend/internal/errorreporting/http.go | 88 ++++++++++++++ docs/README.md | 6 + docs/operations/agent-workflow.md | 2 + docs/operations/error-reporting.md | 31 +++++ docs/operations/product-analytics.md | 22 ++++ docs/operations/support.md | 20 ++++ frontend/.env.example | 16 +++ frontend/app/app/dashboard-client.tsx | 6 + frontend/app/integrations-provider.tsx | 98 ++++++++++++++++ frontend/app/layout.tsx | 5 +- frontend/app/pricing/pricing-client.tsx | 3 + frontend/lib/integrations/analytics.ts | 63 ++++++++++ frontend/lib/integrations/crisp-loader.ts | 32 +++++ frontend/lib/integrations/error-reporting.ts | 87 ++++++++++++++ frontend/lib/integrations/posthog-loader.ts | 45 +++++++ frontend/lib/integrations/support.ts | 43 +++++++ 25 files changed, 740 insertions(+), 2 deletions(-) create mode 100644 backend/internal/errorreporting/errorreporting.go create mode 100644 backend/internal/errorreporting/http.go create mode 100644 docs/operations/error-reporting.md create mode 100644 docs/operations/product-analytics.md create mode 100644 docs/operations/support.md create mode 100644 frontend/app/integrations-provider.tsx create mode 100644 frontend/lib/integrations/analytics.ts create mode 100644 frontend/lib/integrations/crisp-loader.ts create mode 100644 frontend/lib/integrations/error-reporting.ts create mode 100644 frontend/lib/integrations/posthog-loader.ts create mode 100644 frontend/lib/integrations/support.ts diff --git a/.env.example b/.env.example index c7a4e73..0a66272 100644 --- a/.env.example +++ b/.env.example @@ -8,7 +8,19 @@ DATABASE_URL=postgres://postgres:postgres@localhost:5432/saas_core_template?sslm REDIS_URL=redis://localhost:6379 OTEL_TRACES_EXPORTER=console OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318 +ERROR_REPORTING_PROVIDER=console +SENTRY_DSN= +SENTRY_ENVIRONMENT=development # Frontend NEXT_PUBLIC_API_URL=http://localhost:8080 NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY= +NEXT_PUBLIC_ANALYTICS_PROVIDER=console +NEXT_PUBLIC_POSTHOG_KEY= +NEXT_PUBLIC_POSTHOG_HOST=https://app.posthog.com +NEXT_PUBLIC_SUPPORT_PROVIDER=none +NEXT_PUBLIC_CRISP_WEBSITE_ID= +NEXT_PUBLIC_ERROR_REPORTING_PROVIDER=console +NEXT_PUBLIC_SENTRY_DSN= +NEXT_PUBLIC_SENTRY_ENVIRONMENT=development +NEXT_PUBLIC_APP_VERSION=dev diff --git a/AGENTS.md b/AGENTS.md index 6227abd..0790dbe 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -55,6 +55,7 @@ This file defines non-negotiable engineering guardrails for humans and AI agents - Cross-cutting: - Re-run targeted searches for old identifiers and stale provider references. - Verify no secrets were added to tracked files. + - Confirm managed integrations remain optional and local E2E works with console/noop defaults (telemetry, analytics, error reporting, support). ## Git and change hygiene diff --git a/README.md b/README.md index 66e7a63..75f01ac 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,9 @@ Core variables: - `OTEL_TRACES_EXPORTER` (`console`, `otlp`, or `none`) - `OTEL_EXPORTER_OTLP_ENDPOINT` (local collector default `http://localhost:4318`) - `OTEL_EXPORTER_OTLP_HEADERS` (for managed OTLP auth, e.g. Grafana Cloud) + - `ERROR_REPORTING_PROVIDER` (`console`, `sentry`, or `none`) + - `SENTRY_DSN` (backend error reporting) + - `SENTRY_ENVIRONMENT` (defaults to empty) - `CLERK_SECRET_KEY` - `CLERK_API_URL` (default `https://api.clerk.com`) - `STRIPE_SECRET_KEY` @@ -57,6 +60,14 @@ Core variables: - Frontend - `NEXT_PUBLIC_API_URL` (e.g. `http://localhost:8080`) - `NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY` + - `NEXT_PUBLIC_ANALYTICS_PROVIDER` (`console`, `posthog`, or `none`) + - `NEXT_PUBLIC_POSTHOG_KEY` + - `NEXT_PUBLIC_POSTHOG_HOST` + - `NEXT_PUBLIC_SUPPORT_PROVIDER` (`crisp` or `none`) + - `NEXT_PUBLIC_CRISP_WEBSITE_ID` + - `NEXT_PUBLIC_ERROR_REPORTING_PROVIDER` (`console`, `sentry`, or `none`) + - `NEXT_PUBLIC_SENTRY_DSN` + - `NEXT_PUBLIC_SENTRY_ENVIRONMENT` ## Database migrations diff --git a/backend/.env.example b/backend/.env.example index 89affcb..ce88e85 100644 --- a/backend/.env.example +++ b/backend/.env.example @@ -14,6 +14,13 @@ OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318 # Example for Grafana Cloud OTLP auth: # OTEL_EXPORTER_OTLP_HEADERS=Authorization=Basic OTEL_EXPORTER_OTLP_HEADERS= + +# Error reporting (Sentry) +# - Local default: logs errors to console. +# - To enable Sentry: set ERROR_REPORTING_PROVIDER=sentry and SENTRY_DSN. +ERROR_REPORTING_PROVIDER=console +SENTRY_DSN= +SENTRY_ENVIRONMENT=development CLERK_SECRET_KEY= CLERK_API_URL=https://api.clerk.com STRIPE_SECRET_KEY= diff --git a/backend/cmd/api/main.go b/backend/cmd/api/main.go index 6202639..ed3fe79 100644 --- a/backend/cmd/api/main.go +++ b/backend/cmd/api/main.go @@ -18,6 +18,7 @@ import ( "saas-core-template/backend/internal/cache" "saas-core-template/backend/internal/config" "saas-core-template/backend/internal/db" + "saas-core-template/backend/internal/errorreporting" "saas-core-template/backend/internal/telemetry" ) @@ -48,6 +49,20 @@ func main() { _ = shutdownTelemetry(context.Background()) }() + reporter, err := errorreporting.New(ctx, errorreporting.Config{ + Provider: cfg.ErrorReportingProvider, + DSN: cfg.SentryDSN, + Environment: cfg.SentryEnvironment, + Release: cfg.Version, + }) + if err != nil { + slog.Error("failed to initialize error reporting", "error", err) + os.Exit(1) + } + defer func() { + _ = reporter.Shutdown(context.Background()) + }() + pool, err := db.Connect(ctx, cfg.DatabaseURL) if err != nil { slog.Error("failed to connect to postgres", "error", err) @@ -92,9 +107,14 @@ func main() { api.WithBillingService(billingService), api.WithAppBaseURL(cfg.AppBaseURL), ) + + baseHandler := apiServer.Handler() + baseHandler = errorreporting.NewMiddleware(reporter).Wrap(baseHandler) + baseHandler = otelhttp.NewHandler(baseHandler, "http") + httpServer := &http.Server{ Addr: fmt.Sprintf(":%s", cfg.Port), - Handler: otelhttp.NewHandler(apiServer.Handler(), "http"), + Handler: baseHandler, ReadHeaderTimeout: 5 * time.Second, } diff --git a/backend/go.mod b/backend/go.mod index 0dc4d08..cd5105d 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -12,6 +12,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/getsentry/sentry-go v0.29.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect diff --git a/backend/go.sum b/backend/go.sum index 08bac2b..a097a85 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -13,6 +13,8 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/getsentry/sentry-go v0.29.0 h1:YtWluuCFg9OfcqnaujpY918N/AhCCwarIDWOYSBAjCA= +github.com/getsentry/sentry-go v0.29.0/go.mod h1:jhPesDAL0Q0W2+2YEuVOvdWmVtdsr1+jtBrlDEVWwLY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 470cfe5..d6ac1b3 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -17,6 +17,10 @@ type Config struct { OtelOTLPEndpoint string OtelOTLPHeadersRaw string + ErrorReportingProvider string + SentryDSN string + SentryEnvironment string + ClerkSecretKey string ClerkAPIURL string StripeSecretKey string @@ -40,6 +44,10 @@ func Load() (Config, error) { OtelOTLPEndpoint: getEnv("OTEL_EXPORTER_OTLP_ENDPOINT", "http://localhost:4318"), OtelOTLPHeadersRaw: getEnv("OTEL_EXPORTER_OTLP_HEADERS", ""), + ErrorReportingProvider: getEnv("ERROR_REPORTING_PROVIDER", "console"), + SentryDSN: os.Getenv("SENTRY_DSN"), + SentryEnvironment: getEnv("SENTRY_ENVIRONMENT", ""), + ClerkSecretKey: os.Getenv("CLERK_SECRET_KEY"), ClerkAPIURL: getEnv("CLERK_API_URL", ""), StripeSecretKey: os.Getenv("STRIPE_SECRET_KEY"), diff --git a/backend/internal/errorreporting/errorreporting.go b/backend/internal/errorreporting/errorreporting.go new file mode 100644 index 0000000..20e442a --- /dev/null +++ b/backend/internal/errorreporting/errorreporting.go @@ -0,0 +1,111 @@ +package errorreporting + +import ( + "context" + "errors" + "fmt" + "log/slog" + "strings" + "time" + + "github.com/getsentry/sentry-go" +) + +type Reporter interface { + CaptureException(ctx context.Context, err error, attrs map[string]string) + Shutdown(ctx context.Context) error +} + +type Config struct { + Provider string // "console", "sentry", "none" + DSN string + Environment string + Release string +} + +func New(ctx context.Context, cfg Config) (Reporter, error) { + switch strings.ToLower(strings.TrimSpace(cfg.Provider)) { + case "", "console": + return &consoleReporter{}, nil + case "none", "noop", "disabled", "off": + return &noopReporter{}, nil + case "sentry": + dsn := strings.TrimSpace(cfg.DSN) + if dsn == "" { + return &consoleReporter{}, nil + } + + if err := sentry.Init(sentry.ClientOptions{ + Dsn: dsn, + Environment: strings.TrimSpace(cfg.Environment), + Release: strings.TrimSpace(cfg.Release), + AttachStacktrace: true, + }); err != nil { + return nil, fmt.Errorf("init sentry: %w", err) + } + + // Confirm SDK is ready by capturing a breadcrumb-style no-op on startup. + sentry.ConfigureScope(func(scope *sentry.Scope) { + scope.SetTag("component", "backend") + }) + + return &sentryReporter{}, nil + default: + return nil, fmt.Errorf("unknown ERROR_REPORTING_PROVIDER %q (expected console|sentry|none)", cfg.Provider) + } +} + +type noopReporter struct{} + +func (r *noopReporter) CaptureException(context.Context, error, map[string]string) {} +func (r *noopReporter) Shutdown(context.Context) error { return nil } + +type consoleReporter struct{} + +func (r *consoleReporter) CaptureException(_ context.Context, err error, attrs map[string]string) { + if err == nil { + return + } + + fields := []any{"error", err} + for k, v := range attrs { + fields = append(fields, k, v) + } + slog.Error("captured exception", fields...) +} + +func (r *consoleReporter) Shutdown(context.Context) error { return nil } + +type sentryReporter struct{} + +func (r *sentryReporter) CaptureException(_ context.Context, err error, attrs map[string]string) { + if err == nil { + return + } + if errors.Is(err, context.Canceled) { + return + } + + sentry.WithScope(func(scope *sentry.Scope) { + for k, v := range attrs { + scope.SetTag(k, v) + } + sentry.CaptureException(err) + }) +} + +func (r *sentryReporter) Shutdown(ctx context.Context) error { + deadline, ok := ctx.Deadline() + if !ok { + sentry.Flush(2 * time.Second) + return nil + } + + remaining := time.Until(deadline) + if remaining <= 0 { + return nil + } + + sentry.Flush(remaining) + return nil +} diff --git a/backend/internal/errorreporting/http.go b/backend/internal/errorreporting/http.go new file mode 100644 index 0000000..4926c6f --- /dev/null +++ b/backend/internal/errorreporting/http.go @@ -0,0 +1,88 @@ +package errorreporting + +import ( + "context" + "fmt" + "net/http" + "runtime/debug" + "strings" +) + +type Middleware struct { + reporter Reporter +} + +func NewMiddleware(reporter Reporter) Middleware { + return Middleware{reporter: reporter} +} + +func (m Middleware) Wrap(next http.Handler) http.Handler { + if m.reporter == nil { + return next + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + sw := &statusWriter{ResponseWriter: w, status: http.StatusOK} + + defer func() { + if recovered := recover(); recovered != nil { + err := fmt.Errorf("panic: %v", recovered) + m.reporter.CaptureException(r.Context(), err, map[string]string{ + "http.method": r.Method, + "http.path": r.URL.Path, + "panic": "true", + }) + + // Don't leak panic details to clients. + http.Error(sw, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) + return + } + + if sw.status >= 500 { + m.reporter.CaptureException(r.Context(), fmt.Errorf("server error %d", sw.status), map[string]string{ + "http.method": r.Method, + "http.path": r.URL.Path, + "http.status": fmt.Sprintf("%d", sw.status), + }) + } + }() + + next.ServeHTTP(sw, r) + }) +} + +type statusWriter struct { + http.ResponseWriter + status int +} + +func (w *statusWriter) WriteHeader(code int) { + w.status = code + w.ResponseWriter.WriteHeader(code) +} + +func (w *statusWriter) Write(b []byte) (int, error) { + // If a handler never explicitly calls WriteHeader, net/http will write 200 on first write. + if w.status == 0 { + w.status = http.StatusOK + } + return w.ResponseWriter.Write(b) +} + +func WithStack(err error) error { + if err == nil { + return nil + } + stack := strings.TrimSpace(string(debug.Stack())) + if stack == "" { + return err + } + return fmt.Errorf("%w\n%s", err, stack) +} + +func Capture(ctx context.Context, reporter Reporter, err error, attrs map[string]string) { + if reporter == nil || err == nil { + return + } + reporter.CaptureException(ctx, err, attrs) +} diff --git a/docs/README.md b/docs/README.md index aed5313..a644602 100644 --- a/docs/README.md +++ b/docs/README.md @@ -28,6 +28,12 @@ This directory contains implementation playbooks for contributors and AI agents. - Baseline controls and evidence expectations. - [Observability (OpenTelemetry)](operations/observability.md) - Local tracing collector and production export configuration. +- [Product Analytics (PostHog)](operations/product-analytics.md) + - Local console analytics and managed PostHog configuration. +- [Error Reporting (Sentry)](operations/error-reporting.md) + - Local console error capture and managed Sentry configuration. +- [Support (Crisp)](operations/support.md) + - Optional support widget integration and provider swaps. - [Provider Migration Playbook](operations/provider-migration-playbook.md) - Dual-run, just-in-time migration, and cutover strategy. - [Agent Workflow Runbook](operations/agent-workflow.md) diff --git a/docs/operations/agent-workflow.md b/docs/operations/agent-workflow.md index 610aafb..6a167b9 100644 --- a/docs/operations/agent-workflow.md +++ b/docs/operations/agent-workflow.md @@ -37,6 +37,8 @@ This runbook defines the standard operating flow for AI-agent-assisted developme - Configuration: - Validate env example files are still consistent and complete. - Validate deployment config changes reflect new variables (for example `render.yaml` for Render backend, and Vercel project env vars for frontend). + - Confirm managed integrations are optional and local E2E still works with console/noop defaults (OpenTelemetry, analytics, error reporting, support widget). + - Confirm no secrets were committed while adding integration variables. ## 5) Documentation and traceability diff --git a/docs/operations/error-reporting.md b/docs/operations/error-reporting.md new file mode 100644 index 0000000..facc329 --- /dev/null +++ b/docs/operations/error-reporting.md @@ -0,0 +1,31 @@ +# Error Reporting (Sentry) + +This template supports error reporting with local console defaults and managed provider opt-in. + +## Local development + +- Backend defaults to `ERROR_REPORTING_PROVIDER=console` and logs captured exceptions. +- Frontend defaults to `NEXT_PUBLIC_ERROR_REPORTING_PROVIDER=console` and logs captured exceptions in the browser console. + +## Production (Sentry) + +### Backend (Go) + +Set: + +- `ERROR_REPORTING_PROVIDER=sentry` +- `SENTRY_DSN=` +- `SENTRY_ENVIRONMENT=production` + +### Frontend (Next.js) + +Set: + +- `NEXT_PUBLIC_ERROR_REPORTING_PROVIDER=sentry` +- `NEXT_PUBLIC_SENTRY_DSN=` +- `NEXT_PUBLIC_SENTRY_ENVIRONMENT=production` +- `NEXT_PUBLIC_APP_VERSION=` (optional) + +## Switching providers + +Keep provider-specific SDK calls behind an internal adapter boundary. Only env vars should change when swapping providers. diff --git a/docs/operations/product-analytics.md b/docs/operations/product-analytics.md new file mode 100644 index 0000000..0411846 --- /dev/null +++ b/docs/operations/product-analytics.md @@ -0,0 +1,22 @@ +# Product Analytics (PostHog) + +This template supports product analytics via a provider boundary in the frontend. + +## Local development + +Defaults are safe for local end-to-end runs: + +- `NEXT_PUBLIC_ANALYTICS_PROVIDER=console` logs analytics calls in the browser console. +- Set `NEXT_PUBLIC_ANALYTICS_PROVIDER=none` to disable. + +## Production (PostHog Cloud) + +Configure the frontend env vars: + +- `NEXT_PUBLIC_ANALYTICS_PROVIDER=posthog` +- `NEXT_PUBLIC_POSTHOG_KEY=` +- `NEXT_PUBLIC_POSTHOG_HOST=https://app.posthog.com` (or your PostHog instance URL) + +## Switching providers + +Analytics calls should use the internal client boundary (not provider SDKs directly). This keeps provider swaps localized to the integration adapter. diff --git a/docs/operations/support.md b/docs/operations/support.md new file mode 100644 index 0000000..5d93db0 --- /dev/null +++ b/docs/operations/support.md @@ -0,0 +1,20 @@ +# Support (Crisp) + +This template supports an optional support widget in the frontend. + +## Local development + +Default is disabled: + +- `NEXT_PUBLIC_SUPPORT_PROVIDER=none` + +## Production (Crisp) + +Set: + +- `NEXT_PUBLIC_SUPPORT_PROVIDER=crisp` +- `NEXT_PUBLIC_CRISP_WEBSITE_ID=` + +## Switching providers + +Support widgets should be loaded and controlled through an internal client boundary so swaps are localized to the adapter. diff --git a/frontend/.env.example b/frontend/.env.example index 8e92eb7..5ed274a 100644 --- a/frontend/.env.example +++ b/frontend/.env.example @@ -1,2 +1,18 @@ NEXT_PUBLIC_API_URL=http://localhost:8080 NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY= + +# Integrations +# Analytics +NEXT_PUBLIC_ANALYTICS_PROVIDER=console +NEXT_PUBLIC_POSTHOG_KEY= +NEXT_PUBLIC_POSTHOG_HOST=https://app.posthog.com + +# Support widget +NEXT_PUBLIC_SUPPORT_PROVIDER=none +NEXT_PUBLIC_CRISP_WEBSITE_ID= + +# Error reporting +NEXT_PUBLIC_ERROR_REPORTING_PROVIDER=console +NEXT_PUBLIC_SENTRY_DSN= +NEXT_PUBLIC_SENTRY_ENVIRONMENT=development +NEXT_PUBLIC_APP_VERSION=dev diff --git a/frontend/app/app/dashboard-client.tsx b/frontend/app/app/dashboard-client.tsx index 6e285f2..f888850 100644 --- a/frontend/app/app/dashboard-client.tsx +++ b/frontend/app/app/dashboard-client.tsx @@ -3,6 +3,7 @@ import { UserButton, useAuth } from "@clerk/nextjs"; import { useEffect, useMemo, useState } from "react"; import { createBillingPortalSession, fetchViewer, type ViewerResponse } from "@/lib/api"; +import { createAnalyticsClient } from "@/lib/integrations/analytics"; type LoadState = "idle" | "loading" | "error"; @@ -11,6 +12,10 @@ export function DashboardClient() { const [viewer, setViewer] = useState(null); const [state, setState] = useState("idle"); const [portalLoading, setPortalLoading] = useState(false); + const analytics = useMemo( + () => createAnalyticsClient((process.env.NEXT_PUBLIC_ANALYTICS_PROVIDER ?? "console") as "console" | "posthog" | "none"), + [] + ); const hasClerk = useMemo(() => Boolean(process.env.NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY), []); @@ -52,6 +57,7 @@ export function DashboardClient() { if (!hasClerk) { return; } + analytics.track("billing_portal_open_clicked"); setPortalLoading(true); const token = await getToken(); if (!token) { diff --git a/frontend/app/integrations-provider.tsx b/frontend/app/integrations-provider.tsx new file mode 100644 index 0000000..5aef687 --- /dev/null +++ b/frontend/app/integrations-provider.tsx @@ -0,0 +1,98 @@ +"use client"; + +import { useAuth } from "@clerk/nextjs"; +import { PropsWithChildren, useEffect, useMemo, useRef } from "react"; +import { createAnalyticsClient } from "@/lib/integrations/analytics"; +import { maybeLoadPostHog } from "@/lib/integrations/posthog-loader"; +import { maybeLoadCrisp } from "@/lib/integrations/crisp-loader"; +import { createSupportClient } from "@/lib/integrations/support"; +import { createErrorReportingClient } from "@/lib/integrations/error-reporting"; + +type IntegrationClients = { + analytics: ReturnType; + support: ReturnType; + errorReporting: ReturnType; +}; + +function useIntegrationsBase(): IntegrationClients { + const analyticsProvider = (process.env.NEXT_PUBLIC_ANALYTICS_PROVIDER ?? "console") as "console" | "posthog" | "none"; + const supportProvider = (process.env.NEXT_PUBLIC_SUPPORT_PROVIDER ?? "none") as "crisp" | "none"; + const errorReportingProvider = (process.env.NEXT_PUBLIC_ERROR_REPORTING_PROVIDER ?? "console") as "console" | "sentry" | "none"; + + const clients = useRef(null); + + const analytics = useMemo(() => createAnalyticsClient(analyticsProvider), [analyticsProvider]); + const support = useMemo(() => createSupportClient(supportProvider), [supportProvider]); + const errorReporting = useMemo(() => createErrorReportingClient(errorReportingProvider), [errorReportingProvider]); + + if ( + !clients.current || + clients.current.analytics !== analytics || + clients.current.support !== support || + clients.current.errorReporting !== errorReporting + ) { + clients.current = { + analytics, + support, + errorReporting + }; + } + + useEffect(() => { + maybeLoadPostHog({ + provider: analyticsProvider, + apiKey: process.env.NEXT_PUBLIC_POSTHOG_KEY, + host: process.env.NEXT_PUBLIC_POSTHOG_HOST + }); + + maybeLoadCrisp({ + provider: supportProvider, + websiteId: process.env.NEXT_PUBLIC_CRISP_WEBSITE_ID + }); + + clients.current?.errorReporting.init({ + dsn: process.env.NEXT_PUBLIC_SENTRY_DSN, + environment: process.env.NEXT_PUBLIC_SENTRY_ENVIRONMENT, + release: process.env.NEXT_PUBLIC_APP_VERSION + }); + }, [analyticsProvider, errorReportingProvider, supportProvider]); + + return clients.current; +} + +function IntegrationsWithClerk({ children }: PropsWithChildren) { + const { isLoaded, userId, orgId } = useAuth(); + const integrations = useIntegrationsBase(); + + useEffect(() => { + integrations.analytics.page(); + }, [integrations.analytics]); + + useEffect(() => { + if (!isLoaded || !userId) { + return; + } + + integrations.analytics.identify(userId); + integrations.analytics.group("organization", orgId ?? "none"); + integrations.support.identify({ userId, organizationId: orgId ?? undefined }); + integrations.errorReporting.setUser({ id: userId }); + }, [integrations, isLoaded, orgId, userId]); + + return <>{children}; +} + +export function AppIntegrationsProvider({ children }: PropsWithChildren) { + const hasClerk = Boolean(process.env.NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY); + const integrations = useIntegrationsBase(); + + useEffect(() => { + integrations.analytics.page(); + }, [integrations.analytics]); + + if (!hasClerk) { + return <>{children}; + } + + return {children}; +} diff --git a/frontend/app/layout.tsx b/frontend/app/layout.tsx index 3889a9c..14aaa3f 100644 --- a/frontend/app/layout.tsx +++ b/frontend/app/layout.tsx @@ -1,6 +1,7 @@ import type { Metadata } from "next"; import "./globals.css"; import { AppClerkProvider } from "./clerk-provider"; +import { AppIntegrationsProvider } from "./integrations-provider"; export const metadata: Metadata = { title: "SaaS Core Template", @@ -15,7 +16,9 @@ export default function RootLayout({ return ( - {children} + + {children} + ); diff --git a/frontend/app/pricing/pricing-client.tsx b/frontend/app/pricing/pricing-client.tsx index d2d1983..2bd483b 100644 --- a/frontend/app/pricing/pricing-client.tsx +++ b/frontend/app/pricing/pricing-client.tsx @@ -3,6 +3,7 @@ import { useAuth } from "@clerk/nextjs"; import { useState } from "react"; import { createCheckoutSession } from "@/lib/api"; +import { createAnalyticsClient } from "@/lib/integrations/analytics"; const PLANS = [ { @@ -25,12 +26,14 @@ export function PricingClient() { const { getToken, orgId } = useAuth(); const [loadingPlan, setLoadingPlan] = useState(null); const hasClerk = Boolean(process.env.NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY); + const analytics = createAnalyticsClient((process.env.NEXT_PUBLIC_ANALYTICS_PROVIDER ?? "console") as "console" | "posthog" | "none"); const startCheckout = async (planCode: string) => { if (!hasClerk) { return; } + analytics.track("pricing_choose_plan_clicked", { planCode }); setLoadingPlan(planCode); const token = await getToken(); if (!token) { diff --git a/frontend/lib/integrations/analytics.ts b/frontend/lib/integrations/analytics.ts new file mode 100644 index 0000000..8ee5a84 --- /dev/null +++ b/frontend/lib/integrations/analytics.ts @@ -0,0 +1,63 @@ +export type AnalyticsProvider = "console" | "posthog" | "none"; + +export type AnalyticsClient = { + identify: (userId: string, props?: Record) => void; + group: (groupType: string, groupKey: string, props?: Record) => void; + track: (event: string, props?: Record) => void; + page: (path?: string) => void; +}; + +function log(method: string, event: string, props?: Record) { + const payload = props ? JSON.stringify(props) : ""; + // eslint-disable-next-line no-console + console.info(`[analytics:${method}] ${event}`, payload); +} + +export function createAnalyticsClient(provider: AnalyticsProvider): AnalyticsClient { + if (provider === "none") { + return { + identify: () => {}, + group: () => {}, + track: () => {}, + page: () => {} + }; + } + + if (provider === "console") { + return { + identify: (userId, props) => log("identify", userId, props), + group: (groupType, groupKey, props) => log("group", `${groupType}:${groupKey}`, props), + track: (event, props) => log("track", event, props), + page: (path) => log("page", path ?? window.location.pathname) + }; + } + + // posthog + return { + identify: (userId, props) => { + window.posthog?.identify?.(userId, props); + }, + group: (groupType, groupKey, props) => { + window.posthog?.group?.(groupType, groupKey, props); + }, + track: (event, props) => { + window.posthog?.capture?.(event, props); + }, + page: (path) => { + const url = path ?? window.location.pathname; + window.posthog?.capture?.("$pageview", { $current_url: url }); + } + }; +} + +declare global { + interface Window { + posthog?: { + init?: (key: string, options: { api_host: string; capture_pageview?: boolean }) => void; + identify?: (distinctId: string, props?: Record) => void; + group?: (groupType: string, groupKey: string, props?: Record) => void; + capture?: (event: string, props?: Record) => void; + }; + } +} + diff --git a/frontend/lib/integrations/crisp-loader.ts b/frontend/lib/integrations/crisp-loader.ts new file mode 100644 index 0000000..2f9086e --- /dev/null +++ b/frontend/lib/integrations/crisp-loader.ts @@ -0,0 +1,32 @@ +import type { SupportProvider } from "./support"; + +export type CrispConfig = { + provider: SupportProvider; + websiteId?: string; +}; + +export function maybeLoadCrisp(config: CrispConfig): void { + if (config.provider !== "crisp") { + return; + } + + const websiteId = config.websiteId?.trim(); + if (!websiteId) { + return; + } + + window.$crisp = window.$crisp ?? []; + window.CRISP_WEBSITE_ID = websiteId; + + if (document.querySelector('script[data-crisp-loader="true"]')) { + return; + } + + const script = document.createElement("script"); + script.async = true; + script.defer = true; + script.dataset.crispLoader = "true"; + script.src = "https://client.crisp.chat/l.js"; + document.head.appendChild(script); +} + diff --git a/frontend/lib/integrations/error-reporting.ts b/frontend/lib/integrations/error-reporting.ts new file mode 100644 index 0000000..b0eef88 --- /dev/null +++ b/frontend/lib/integrations/error-reporting.ts @@ -0,0 +1,87 @@ +export type ErrorReportingProvider = "console" | "sentry" | "none"; + +export type ErrorReportingClient = { + init: (config: { dsn?: string; environment?: string; release?: string }) => void; + captureException: (error: unknown, context?: Record) => void; + setUser: (user: { id: string } | null) => void; +}; + +export function createErrorReportingClient(provider: ErrorReportingProvider): ErrorReportingClient { + if (provider === "none") { + return { + init: () => {}, + captureException: () => {}, + setUser: () => {} + }; + } + + if (provider === "console") { + return { + init: () => {}, + captureException: (error, context) => { + // eslint-disable-next-line no-console + console.error("[error-reporting]", error, context); + }, + setUser: (user) => { + // eslint-disable-next-line no-console + console.info("[error-reporting:user]", user); + } + }; + } + + // sentry (browser SDK via script loader) + return { + init: ({ dsn, environment, release }) => { + maybeLoadSentryBrowser(); + + const trimmed = dsn?.trim(); + if (!trimmed) { + return; + } + + window.Sentry?.init?.({ + dsn: trimmed, + environment: environment?.trim() || undefined, + release: release?.trim() || undefined + }); + }, + captureException: (error, context) => { + window.Sentry?.withScope?.((scope: any) => { + if (context) { + scope.setContext?.("extra", context); + } + window.Sentry?.captureException?.(error); + }); + }, + setUser: (user) => { + window.Sentry?.setUser?.(user); + } + }; +} + +function maybeLoadSentryBrowser() { + if (window.Sentry?.init) { + return; + } + + if (document.querySelector('script[data-sentry-loader="true"]')) { + return; + } + + const script = document.createElement("script"); + script.async = true; + script.defer = true; + script.dataset.sentryLoader = "true"; + script.crossOrigin = "anonymous"; + // Keep version pinned for deterministic builds; update intentionally. + script.src = "https://browser.sentry-cdn.com/7.120.0/bundle.tracing.min.js"; + + document.head.appendChild(script); +} + +declare global { + interface Window { + Sentry?: any; + } +} + diff --git a/frontend/lib/integrations/posthog-loader.ts b/frontend/lib/integrations/posthog-loader.ts new file mode 100644 index 0000000..4df4bb7 --- /dev/null +++ b/frontend/lib/integrations/posthog-loader.ts @@ -0,0 +1,45 @@ +import type { AnalyticsProvider } from "./analytics"; + +export type PostHogConfig = { + provider: AnalyticsProvider; + apiKey?: string; + host?: string; +}; + +export function maybeLoadPostHog(config: PostHogConfig): void { + if (config.provider !== "posthog") { + return; + } + + const key = config.apiKey?.trim(); + if (!key) { + return; + } + + const host = (config.host?.trim() || "https://app.posthog.com").replace(/\/+$/, ""); + + if (window.posthog?.init) { + window.posthog.init(key, { api_host: host, capture_pageview: false }); + return; + } + + if (document.querySelector('script[data-posthog-loader="true"]')) { + return; + } + + // Minimal loader: injects the official library and initializes it once loaded. + // Avoids adding an npm dependency so local E2E works without installs. + window.posthog = window.posthog ?? {}; + + const script = document.createElement("script"); + script.async = true; + script.defer = true; + script.dataset.posthogLoader = "true"; + script.src = `${host}/static/array.js`; + script.onload = () => { + window.posthog?.init?.(key, { api_host: host, capture_pageview: false }); + }; + + document.head.appendChild(script); +} + diff --git a/frontend/lib/integrations/support.ts b/frontend/lib/integrations/support.ts new file mode 100644 index 0000000..cfe620d --- /dev/null +++ b/frontend/lib/integrations/support.ts @@ -0,0 +1,43 @@ +export type SupportProvider = "crisp" | "none"; + +export type SupportClient = { + identify: (params: { userId?: string; email?: string; organizationId?: string }) => void; + open: () => void; +}; + +export function createSupportClient(provider: SupportProvider): SupportClient { + if (provider === "none") { + return { identify: () => {}, open: () => {} }; + } + + // crisp + return { + identify: ({ userId, email, organizationId }) => { + const crisp = window.$crisp; + if (!crisp) { + return; + } + + if (email) { + crisp.push(["set", "user:email", [email]]); + } + if (userId) { + crisp.push(["set", "session:data", [[["user_id", userId]]]]); + } + if (organizationId) { + crisp.push(["set", "session:data", [[["organization_id", organizationId]]]]); + } + }, + open: () => { + window.$crisp?.push?.(["do", "chat:open"]); + } + }; +} + +declare global { + interface Window { + $crisp?: any[]; + CRISP_WEBSITE_ID?: string; + } +} + From 89e9c593348f9464abcad437ebd3916d6a2469c2 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Sun, 22 Feb 2026 17:26:40 +0800 Subject: [PATCH 03/23] feat: backend analytics + UI i18n + UI guide --- .env.example | 3 + README.md | 4 + backend/.env.example | 7 ++ backend/cmd/api/main.go | 18 +++++ backend/internal/analytics/analytics.go | 19 +++++ backend/internal/analytics/console.go | 28 +++++++ backend/internal/analytics/posthog.go | 89 +++++++++++++++++++++ backend/internal/api/router.go | 42 ++++++++++ backend/internal/config/config.go | 8 ++ docs/README.md | 5 ++ docs/frontend/ui-design-guide.md | 49 ++++++++++++ docs/operations/agent-workflow.md | 1 + docs/operations/product-analytics.md | 11 ++- frontend/.env.example | 4 + frontend/app/language-switcher.tsx | 40 ++++++++++ frontend/app/layout.tsx | 9 ++- frontend/app/page.tsx | 30 ++++--- frontend/app/pricing/page.tsx | 13 ++- frontend/lib/i18n/locale.ts | 15 ++++ frontend/lib/i18n/messages.ts | 100 ++++++++++++++++++++++++ frontend/lib/i18n/translate.ts | 29 +++++++ 21 files changed, 505 insertions(+), 19 deletions(-) create mode 100644 backend/internal/analytics/analytics.go create mode 100644 backend/internal/analytics/console.go create mode 100644 backend/internal/analytics/posthog.go create mode 100644 docs/frontend/ui-design-guide.md create mode 100644 frontend/app/language-switcher.tsx create mode 100644 frontend/lib/i18n/locale.ts create mode 100644 frontend/lib/i18n/messages.ts create mode 100644 frontend/lib/i18n/translate.ts diff --git a/.env.example b/.env.example index 0a66272..0e69ef6 100644 --- a/.env.example +++ b/.env.example @@ -11,6 +11,9 @@ OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318 ERROR_REPORTING_PROVIDER=console SENTRY_DSN= SENTRY_ENVIRONMENT=development +ANALYTICS_PROVIDER=console +POSTHOG_PROJECT_KEY= +POSTHOG_HOST=https://app.posthog.com # Frontend NEXT_PUBLIC_API_URL=http://localhost:8080 diff --git a/README.md b/README.md index 75f01ac..16aa4a4 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,9 @@ Core variables: - `ERROR_REPORTING_PROVIDER` (`console`, `sentry`, or `none`) - `SENTRY_DSN` (backend error reporting) - `SENTRY_ENVIRONMENT` (defaults to empty) + - `ANALYTICS_PROVIDER` (`console`, `posthog`, or `none`) + - `POSTHOG_PROJECT_KEY` + - `POSTHOG_HOST` - `CLERK_SECRET_KEY` - `CLERK_API_URL` (default `https://api.clerk.com`) - `STRIPE_SECRET_KEY` @@ -68,6 +71,7 @@ Core variables: - `NEXT_PUBLIC_ERROR_REPORTING_PROVIDER` (`console`, `sentry`, or `none`) - `NEXT_PUBLIC_SENTRY_DSN` - `NEXT_PUBLIC_SENTRY_ENVIRONMENT` + - Locale is stored in a `locale` cookie (supported: `en`, `es`) ## Database migrations diff --git a/backend/.env.example b/backend/.env.example index ce88e85..5581101 100644 --- a/backend/.env.example +++ b/backend/.env.example @@ -21,6 +21,13 @@ OTEL_EXPORTER_OTLP_HEADERS= ERROR_REPORTING_PROVIDER=console SENTRY_DSN= SENTRY_ENVIRONMENT=development + +# Analytics (PostHog) +# - Local default: logs events to console. +# - To enable PostHog: set ANALYTICS_PROVIDER=posthog and POSTHOG_PROJECT_KEY. +ANALYTICS_PROVIDER=console +POSTHOG_PROJECT_KEY= +POSTHOG_HOST=https://app.posthog.com CLERK_SECRET_KEY= CLERK_API_URL=https://api.clerk.com STRIPE_SECRET_KEY= diff --git a/backend/cmd/api/main.go b/backend/cmd/api/main.go index ed3fe79..d8a0667 100644 --- a/backend/cmd/api/main.go +++ b/backend/cmd/api/main.go @@ -12,6 +12,7 @@ import ( "time" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "saas-core-template/backend/internal/analytics" "saas-core-template/backend/internal/api" "saas-core-template/backend/internal/auth" "saas-core-template/backend/internal/billing" @@ -33,6 +34,22 @@ func main() { ctx := context.Background() + analyticsProvider, err := analytics.ProviderFromEnv(cfg.AnalyticsProvider) + if err != nil { + slog.Error("failed to parse analytics provider", "error", err) + os.Exit(1) + } + + var analyticsClient analytics.Client + switch analyticsProvider { + case "none": + analyticsClient = analytics.NewNoop() + case "posthog": + analyticsClient = analytics.NewPostHog(cfg.PostHogProjectKey, cfg.PostHogHost) + default: + analyticsClient = analytics.NewConsole() + } + shutdownTelemetry, err := telemetry.Init(ctx, telemetry.Config{ ServiceName: cfg.ServiceName, Environment: cfg.Env, @@ -106,6 +123,7 @@ func main() { api.WithAuthService(authService), api.WithBillingService(billingService), api.WithAppBaseURL(cfg.AppBaseURL), + api.WithAnalytics(analyticsClient), ) baseHandler := apiServer.Handler() diff --git a/backend/internal/analytics/analytics.go b/backend/internal/analytics/analytics.go new file mode 100644 index 0000000..7478976 --- /dev/null +++ b/backend/internal/analytics/analytics.go @@ -0,0 +1,19 @@ +package analytics + +import "context" + +type Event struct { + Name string + DistinctID string + Properties map[string]any +} + +type Client interface { + Track(ctx context.Context, event Event) +} + +type noopClient struct{} + +func NewNoop() Client { return &noopClient{} } + +func (c *noopClient) Track(context.Context, Event) {} diff --git a/backend/internal/analytics/console.go b/backend/internal/analytics/console.go new file mode 100644 index 0000000..8d2eb7a --- /dev/null +++ b/backend/internal/analytics/console.go @@ -0,0 +1,28 @@ +package analytics + +import ( + "context" + "encoding/json" + "log/slog" + "strings" +) + +type ConsoleClient struct{} + +func NewConsole() *ConsoleClient { return &ConsoleClient{} } + +func (c *ConsoleClient) Track(_ context.Context, event Event) { + name := strings.TrimSpace(event.Name) + if name == "" { + return + } + + props := "" + if len(event.Properties) > 0 { + if encoded, err := json.Marshal(event.Properties); err == nil { + props = string(encoded) + } + } + + slog.Info("analytics event", "name", name, "distinct_id", event.DistinctID, "properties", props) +} diff --git a/backend/internal/analytics/posthog.go b/backend/internal/analytics/posthog.go new file mode 100644 index 0000000..9102178 --- /dev/null +++ b/backend/internal/analytics/posthog.go @@ -0,0 +1,89 @@ +package analytics + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "strings" + "time" +) + +type PostHogClient struct { + apiKey string + host string + client *http.Client +} + +func NewPostHog(apiKey string, host string) *PostHogClient { + key := strings.TrimSpace(apiKey) + base := strings.TrimRight(strings.TrimSpace(host), "/") + if base == "" { + base = "https://app.posthog.com" + } + + return &PostHogClient{ + apiKey: key, + host: base, + client: &http.Client{Timeout: 3 * time.Second}, + } +} + +func (c *PostHogClient) Track(ctx context.Context, event Event) { + if strings.TrimSpace(c.apiKey) == "" { + return + } + name := strings.TrimSpace(event.Name) + if name == "" { + return + } + + distinctID := strings.TrimSpace(event.DistinctID) + if distinctID == "" { + distinctID = "anonymous" + } + + body, err := json.Marshal(map[string]any{ + "api_key": c.apiKey, + "event": name, + "distinct_id": distinctID, + "properties": event.Properties, + }) + if err != nil { + slog.Debug("failed to encode posthog event", "error", err) + return + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.host+"/capture/", bytes.NewReader(body)) + if err != nil { + return + } + req.Header.Set("Content-Type", "application/json") + + resp, err := c.client.Do(req) + if err != nil { + slog.Debug("posthog track failed", "error", err) + return + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + slog.Debug("posthog track non-2xx", "status", resp.StatusCode, "event", name) + return + } +} + +func ProviderFromEnv(value string) (string, error) { + switch strings.ToLower(strings.TrimSpace(value)) { + case "", "console": + return "console", nil + case "posthog": + return "posthog", nil + case "none", "noop", "disabled", "off": + return "none", nil + default: + return "", fmt.Errorf("unknown ANALYTICS_PROVIDER %q (expected console|posthog|none)", value) + } +} diff --git a/backend/internal/api/router.go b/backend/internal/api/router.go index f47f6f9..e7cec28 100644 --- a/backend/internal/api/router.go +++ b/backend/internal/api/router.go @@ -11,6 +11,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/redis/go-redis/v9" + "saas-core-template/backend/internal/analytics" "saas-core-template/backend/internal/auth" "saas-core-template/backend/internal/billing" ) @@ -24,12 +25,14 @@ type Server struct { redis *redis.Client auth *auth.Service billing *billing.Service + analytics analytics.Client } type serverOptions struct { authService *auth.Service billingService *billing.Service appBaseURL string + analytics analytics.Client } func NewServer(appName string, env string, version string, db *pgxpool.Pool, redisClient *redis.Client, opts ...func(*serverOptions)) *Server { @@ -47,9 +50,17 @@ func NewServer(appName string, env string, version string, db *pgxpool.Pool, red redis: redisClient, auth: options.authService, billing: options.billingService, + analytics: defaultAnalytics(options.analytics), } } +func defaultAnalytics(client analytics.Client) analytics.Client { + if client == nil { + return analytics.NewNoop() + } + return client +} + func WithAuthService(authService *auth.Service) func(*serverOptions) { return func(opts *serverOptions) { opts.authService = authService @@ -68,6 +79,12 @@ func WithAppBaseURL(appBaseURL string) func(*serverOptions) { } } +func WithAnalytics(client analytics.Client) func(*serverOptions) { + return func(opts *serverOptions) { + opts.analytics = client + } +} + func (s *Server) Handler() http.Handler { mux := http.NewServeMux() mux.HandleFunc("GET /healthz", s.healthz) @@ -158,6 +175,12 @@ func (s *Server) requireAuth(next http.HandlerFunc) http.HandlerFunc { return } + s.analytics.Track(r.Context(), analytics.Event{ + Name: "auth_authenticated", + DistinctID: user.ID, + Properties: map[string]any{"provider": "clerk"}, + }) + ctx := context.WithValue(r.Context(), authUserContextKey, user) next.ServeHTTP(w, r.WithContext(ctx)) } @@ -209,6 +232,7 @@ func (s *Server) billingCheckoutSession(w http.ResponseWriter, r *http.Request) return } + user := authUserFromContext(r.Context()) org := authOrgFromContext(r.Context()) if org.ID == "" { writeJSON(w, http.StatusForbidden, map[string]string{"error": "organization_required"}) @@ -242,6 +266,15 @@ func (s *Server) billingCheckoutSession(w http.ResponseWriter, r *http.Request) return } + s.analytics.Track(r.Context(), analytics.Event{ + Name: "billing_checkout_session_created", + DistinctID: user.ID, + Properties: map[string]any{ + "organization_id": org.ID, + "plan_code": req.PlanCode, + }, + }) + writeJSON(w, http.StatusOK, map[string]string{"url": session.URL}) } @@ -251,6 +284,7 @@ func (s *Server) billingPortalSession(w http.ResponseWriter, r *http.Request) { return } + user := authUserFromContext(r.Context()) org := authOrgFromContext(r.Context()) if org.ID == "" { writeJSON(w, http.StatusForbidden, map[string]string{"error": "organization_required"}) @@ -272,6 +306,14 @@ func (s *Server) billingPortalSession(w http.ResponseWriter, r *http.Request) { return } + s.analytics.Track(r.Context(), analytics.Event{ + Name: "billing_portal_session_created", + DistinctID: user.ID, + Properties: map[string]any{ + "organization_id": org.ID, + }, + }) + writeJSON(w, http.StatusOK, map[string]string{"url": session.URL}) } diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index d6ac1b3..e305413 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -21,6 +21,10 @@ type Config struct { SentryDSN string SentryEnvironment string + AnalyticsProvider string + PostHogProjectKey string + PostHogHost string + ClerkSecretKey string ClerkAPIURL string StripeSecretKey string @@ -48,6 +52,10 @@ func Load() (Config, error) { SentryDSN: os.Getenv("SENTRY_DSN"), SentryEnvironment: getEnv("SENTRY_ENVIRONMENT", ""), + AnalyticsProvider: getEnv("ANALYTICS_PROVIDER", "console"), + PostHogProjectKey: os.Getenv("POSTHOG_PROJECT_KEY"), + PostHogHost: getEnv("POSTHOG_HOST", "https://app.posthog.com"), + ClerkSecretKey: os.Getenv("CLERK_SECRET_KEY"), ClerkAPIURL: getEnv("CLERK_API_URL", ""), StripeSecretKey: os.Getenv("STRIPE_SECRET_KEY"), diff --git a/docs/README.md b/docs/README.md index a644602..a9588a0 100644 --- a/docs/README.md +++ b/docs/README.md @@ -66,3 +66,8 @@ Before opening a PR: - Confirm provider SDK usage is isolated behind interfaces/adapters. - Confirm logs do not include secrets or sensitive payloads. - Add/update docs for any auth, tenancy, billing, or compliance-sensitive change. + +## Frontend guides + +- [UI Design Guide](frontend/ui-design-guide.md) + - Tailwind + Radix component conventions and UI principles. diff --git a/docs/frontend/ui-design-guide.md b/docs/frontend/ui-design-guide.md new file mode 100644 index 0000000..0c3bcc7 --- /dev/null +++ b/docs/frontend/ui-design-guide.md @@ -0,0 +1,49 @@ +# UI Design Guide (Tailwind + Radix) + +This template keeps UI styling intentionally simple, but the recommended direction for “production SaaS UI” is: + +- Tailwind CSS for styling and design tokens. +- Radix UI primitives for accessibility-correct components. +- A small component system using variants (for example `Button` with `intent`/`size`) to avoid one-off styles. + +## Principles + +- Accessibility is non-negotiable: keyboard support, focus rings, aria labels. +- Consistency beats perfection: prefer reusing a small set of primitives. +- Composition over abstraction: wrap Radix primitives with thin styling, don’t hide behavior. +- Design tokens, not ad-hoc colors: define a palette and use semantic tokens. + +## Component conventions + +- Put primitives in `frontend/components/ui/*`. +- Use a variant helper (e.g. `class-variance-authority`) to keep Tailwind class logic centralized. +- Always support: + - `disabled` state + - `loading` state (with `aria-busy`) + - focus-visible ring + - consistent spacing and typography + +## Recommended primitives + +- `Button`, `Input`, `Textarea`, `Select`, `Badge` +- `Dialog`, `Popover`, `DropdownMenu`, `Tooltip` +- `Toast` / `Toaster` + +## Layout guidance + +- Top-level pages should have one primary CTA. +- Marketing pages: + - hero + 3–6 feature bullets + - social proof + - pricing + - FAQ +- App pages: + - left nav or top nav + - consistent page header with title + actions + - empty states for first-run UX + +## i18n-friendly UI + +- Do not embed strings in deeply nested components; pass copy in from the page/screen layer. +- Avoid concatenating translated strings; prefer full sentences in message catalogs. + diff --git a/docs/operations/agent-workflow.md b/docs/operations/agent-workflow.md index 6a167b9..00edc50 100644 --- a/docs/operations/agent-workflow.md +++ b/docs/operations/agent-workflow.md @@ -39,6 +39,7 @@ This runbook defines the standard operating flow for AI-agent-assisted developme - Validate deployment config changes reflect new variables (for example `render.yaml` for Render backend, and Vercel project env vars for frontend). - Confirm managed integrations are optional and local E2E still works with console/noop defaults (OpenTelemetry, analytics, error reporting, support widget). - Confirm no secrets were committed while adding integration variables. + - Confirm i18n defaults render correctly (language switcher changes locale without breaking SSR pages). ## 5) Documentation and traceability diff --git a/docs/operations/product-analytics.md b/docs/operations/product-analytics.md index 0411846..9f83dbc 100644 --- a/docs/operations/product-analytics.md +++ b/docs/operations/product-analytics.md @@ -1,6 +1,6 @@ -# Product Analytics (PostHog) +# Analytics (PostHog) -This template supports product analytics via a provider boundary in the frontend. +This template supports analytics via provider boundaries in both the frontend and backend. ## Local development @@ -8,6 +8,7 @@ Defaults are safe for local end-to-end runs: - `NEXT_PUBLIC_ANALYTICS_PROVIDER=console` logs analytics calls in the browser console. - Set `NEXT_PUBLIC_ANALYTICS_PROVIDER=none` to disable. +- Backend defaults to `ANALYTICS_PROVIDER=console` and logs events via structured logs. ## Production (PostHog Cloud) @@ -17,6 +18,12 @@ Configure the frontend env vars: - `NEXT_PUBLIC_POSTHOG_KEY=` - `NEXT_PUBLIC_POSTHOG_HOST=https://app.posthog.com` (or your PostHog instance URL) +Configure the backend env vars: + +- `ANALYTICS_PROVIDER=posthog` +- `POSTHOG_PROJECT_KEY=` +- `POSTHOG_HOST=https://app.posthog.com` (or your PostHog instance URL) + ## Switching providers Analytics calls should use the internal client boundary (not provider SDKs directly). This keeps provider swaps localized to the integration adapter. diff --git a/frontend/.env.example b/frontend/.env.example index 5ed274a..b742670 100644 --- a/frontend/.env.example +++ b/frontend/.env.example @@ -16,3 +16,7 @@ NEXT_PUBLIC_ERROR_REPORTING_PROVIDER=console NEXT_PUBLIC_SENTRY_DSN= NEXT_PUBLIC_SENTRY_ENVIRONMENT=development NEXT_PUBLIC_APP_VERSION=dev + +# i18n +# Stored in a cookie set by the language switcher. +# Supported values: en, es diff --git a/frontend/app/language-switcher.tsx b/frontend/app/language-switcher.tsx new file mode 100644 index 0000000..2fdfb12 --- /dev/null +++ b/frontend/app/language-switcher.tsx @@ -0,0 +1,40 @@ +"use client"; + +import { useRouter } from "next/navigation"; +import { useMemo } from "react"; +import { LOCALES, type Locale } from "@/lib/i18n/messages"; +import { localeCookieName } from "@/lib/i18n/locale"; + +export function LanguageSwitcher({ currentLocale }: { currentLocale: Locale }) { + const router = useRouter(); + + const options = useMemo( + () => + LOCALES.map((locale) => ({ + value: locale, + label: locale.toUpperCase() + })), + [] + ); + + return ( + + ); +} + diff --git a/frontend/app/layout.tsx b/frontend/app/layout.tsx index 14aaa3f..e53f05e 100644 --- a/frontend/app/layout.tsx +++ b/frontend/app/layout.tsx @@ -2,6 +2,8 @@ import type { Metadata } from "next"; import "./globals.css"; import { AppClerkProvider } from "./clerk-provider"; import { AppIntegrationsProvider } from "./integrations-provider"; +import { getServerLocale } from "@/lib/i18n/locale"; +import { LanguageSwitcher } from "./language-switcher"; export const metadata: Metadata = { title: "SaaS Core Template", @@ -13,9 +15,14 @@ export default function RootLayout({ }: Readonly<{ children: React.ReactNode; }>) { + const locale = getServerLocale(); + return ( - + +
+ +
{children} diff --git a/frontend/app/page.tsx b/frontend/app/page.tsx index 8f60037..dc930fe 100644 --- a/frontend/app/page.tsx +++ b/frontend/app/page.tsx @@ -1,29 +1,34 @@ import Link from "next/link"; import { fetchMeta } from "@/lib/api"; +import { getServerLocale } from "@/lib/i18n/locale"; +import { getMessages } from "@/lib/i18n/messages"; +import { t } from "@/lib/i18n/translate"; export default async function HomePage() { const meta = await fetchMeta(); const hasClerk = Boolean(process.env.NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY); + const messages = getMessages(getServerLocale()); return (
-

SaaS Core Template

-

Launch a production-shaped SaaS baseline with auth, multi-tenant workspaces, and billing foundations.

+

{t(messages, "home.title")}

+

{t(messages, "home.subtitle")}

-

What you get

+

{t(messages, "home.whatYouGetTitle")}

    -
  • Landing + pricing pages with clear upgrade paths
  • -
  • Protected app area and organization-aware APIs
  • -
  • Managed auth and billing integrations that stay migration-friendly
  • + {messages.home.whatYouGetBullets.map((bullet) => ( +
  • {bullet}
  • + ))}

- See pricing or continue to the app dashboard. + {t(messages, "home.whatYouGetCtaPrefix")} {t(messages, "home.whatYouGetCtaPricing")}{" "} + {t(messages, "home.whatYouGetCtaOr")} {t(messages, "home.whatYouGetCtaDashboard")}.

-

Platform Status

+

{t(messages, "home.statusTitle")}

{meta ? (
  • app: {meta.app}
  • @@ -32,19 +37,20 @@ export default async function HomePage() {
  • time: {new Date(meta.time).toLocaleString()}
) : ( -

Backend is unreachable. Start API on port 8080.

+

{t(messages, "home.backendUnreachable")}

)}
-

Get started

+

{t(messages, "home.getStartedTitle")}

{hasClerk ? (

- Use sign up to create your account or sign in if you already have one. + {t(messages, "home.getStartedWithClerk")}{" "} + sign up / sign in

) : (

- Clerk is not configured yet. Add publishable and secret keys, then use /app as your protected dashboard. + {t(messages, "home.getStartedWithoutClerk")} /app

)}
diff --git a/frontend/app/pricing/page.tsx b/frontend/app/pricing/page.tsx index effc049..b70c9cd 100644 --- a/frontend/app/pricing/page.tsx +++ b/frontend/app/pricing/page.tsx @@ -1,16 +1,21 @@ import Link from "next/link"; import { PricingClient } from "./pricing-client"; +import { getServerLocale } from "@/lib/i18n/locale"; +import { getMessages } from "@/lib/i18n/messages"; +import { t } from "@/lib/i18n/translate"; export default function PricingPage() { + const messages = getMessages(getServerLocale()); + return (
-

Pricing

-

Simple plans designed to get from zero to production without custom billing plumbing.

+

{t(messages, "pricing.title")}

+

{t(messages, "pricing.subtitle")}

-

Need help choosing?

+

{t(messages, "pricing.helpTitle")}

- Start with Pro and upgrade anytime. If you are already onboarded, head to the dashboard. + {t(messages, "pricing.helpBody")} dashboard.

diff --git a/frontend/lib/i18n/locale.ts b/frontend/lib/i18n/locale.ts new file mode 100644 index 0000000..37e2e3a --- /dev/null +++ b/frontend/lib/i18n/locale.ts @@ -0,0 +1,15 @@ +import { cookies } from "next/headers"; +import { isLocale, type Locale } from "./messages"; + +const COOKIE_NAME = "locale"; + +export function getServerLocale(): Locale { + const value = cookies().get(COOKIE_NAME)?.value; + if (isLocale(value)) { + return value; + } + return "en"; +} + +export const localeCookieName = COOKIE_NAME; + diff --git a/frontend/lib/i18n/messages.ts b/frontend/lib/i18n/messages.ts new file mode 100644 index 0000000..bc985a0 --- /dev/null +++ b/frontend/lib/i18n/messages.ts @@ -0,0 +1,100 @@ +export const LOCALES = ["en", "es"] as const; +export type Locale = (typeof LOCALES)[number]; + +export type Messages = { + home: { + title: string; + subtitle: string; + whatYouGetTitle: string; + whatYouGetBullets: [string, string, string]; + whatYouGetCtaPrefix: string; + whatYouGetCtaPricing: string; + whatYouGetCtaOr: string; + whatYouGetCtaDashboard: string; + statusTitle: string; + backendUnreachable: string; + getStartedTitle: string; + getStartedWithClerk: string; + getStartedWithoutClerk: string; + }; + pricing: { + title: string; + subtitle: string; + helpTitle: string; + helpBody: string; + }; + common: { + language: string; + }; +}; + +const EN: Messages = { + home: { + title: "SaaS Core Template", + subtitle: "Launch a production-shaped SaaS baseline with auth, multi-tenant workspaces, and billing foundations.", + whatYouGetTitle: "What you get", + whatYouGetBullets: [ + "Landing + pricing pages with clear upgrade paths", + "Protected app area and organization-aware APIs", + "Managed auth and billing integrations that stay migration-friendly" + ], + whatYouGetCtaPrefix: "See", + whatYouGetCtaPricing: "pricing", + whatYouGetCtaOr: "or continue to the", + whatYouGetCtaDashboard: "app dashboard", + statusTitle: "Platform Status", + backendUnreachable: "Backend is unreachable. Start API on port 8080.", + getStartedTitle: "Get started", + getStartedWithClerk: "Use sign up to create your account or sign in if you already have one.", + getStartedWithoutClerk: "Clerk is not configured yet. Add keys, then use /app as your protected dashboard." + }, + pricing: { + title: "Pricing", + subtitle: "Simple plans designed to get from zero to production without custom billing plumbing.", + helpTitle: "Need help choosing?", + helpBody: "Start with Pro and upgrade anytime. If you are already onboarded, head to the dashboard." + }, + common: { + language: "Language" + } +}; + +const ES: Messages = { + home: { + title: "Plantilla SaaS Core", + subtitle: "Lanza una base SaaS lista para producción con autenticación, multi-tenant y facturación.", + whatYouGetTitle: "Qué incluye", + whatYouGetBullets: [ + "Landing + precios con rutas claras de upgrade", + "Área protegida y APIs con contexto de organización", + "Integraciones gestionadas de auth y billing con migración fácil" + ], + whatYouGetCtaPrefix: "Ver", + whatYouGetCtaPricing: "precios", + whatYouGetCtaOr: "o continuar al", + whatYouGetCtaDashboard: "panel de la app", + statusTitle: "Estado de la plataforma", + backendUnreachable: "El backend no responde. Inicia el API en el puerto 8080.", + getStartedTitle: "Empezar", + getStartedWithClerk: "Usa registro para crear tu cuenta o iniciar sesión si ya tienes una.", + getStartedWithoutClerk: "Clerk no está configurado. Agrega las claves y usa /app como panel protegido." + }, + pricing: { + title: "Precios", + subtitle: "Planes simples para pasar de cero a producción sin construir facturación desde cero.", + helpTitle: "¿Necesitas ayuda para elegir?", + helpBody: "Empieza con Pro y actualiza cuando quieras. Si ya estás listo, ve al panel." + }, + common: { + language: "Idioma" + } +}; + +export function isLocale(value: string | undefined | null): value is Locale { + return (LOCALES as readonly string[]).includes(value ?? ""); +} + +export function getMessages(locale: Locale): Messages { + return locale === "es" ? ES : EN; +} + diff --git a/frontend/lib/i18n/translate.ts b/frontend/lib/i18n/translate.ts new file mode 100644 index 0000000..2cc8fb4 --- /dev/null +++ b/frontend/lib/i18n/translate.ts @@ -0,0 +1,29 @@ +import type { Messages } from "./messages"; + +type DotPrefix = T extends "" ? "" : `.${T}`; +type DotNestedKeys = T extends object + ? { + [K in Extract]: T[K] extends string + ? `${K}` + : T[K] extends readonly any[] + ? `${K}` + : `${K}${DotPrefix>}`; + }[Extract] + : ""; + +export type MessageKey = DotNestedKeys; + +export function t(messages: Messages, key: MessageKey): string { + const parts = key.split("."); + let current: any = messages; + for (const part of parts) { + current = current?.[part]; + } + + if (typeof current === "string") { + return current; + } + + return key; +} + From 79ce652f6440117ea081256e88c23a14963881df Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Sun, 22 Feb 2026 17:40:12 +0800 Subject: [PATCH 04/23] docs: add production checklist and reading order --- docs/README.md | 6 + .../git-branching-and-versioning.md | 2 +- docs/operations/production-setup-checklist.md | 137 ++++++++++++++++++ docs/overview.md | 75 ++++++++++ 4 files changed, 219 insertions(+), 1 deletion(-) create mode 100644 docs/operations/production-setup-checklist.md create mode 100644 docs/overview.md diff --git a/docs/README.md b/docs/README.md index a9588a0..d20830e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,6 +2,10 @@ This directory contains implementation playbooks for contributors and AI agents. +## Start here + +- `overview.md` gives a reading order and repo map. + ## First 30 minutes 1. Run `./scripts/init-template.sh ""` if this is a new clone from template. @@ -26,6 +30,8 @@ This directory contains implementation playbooks for contributors and AI agents. - [SOC 2 Foundations](operations/compliance-soc2-foundations.md) - Baseline controls and evidence expectations. +- [Production Setup Checklist](operations/production-setup-checklist.md) + - End-to-end deployment wiring (Render + Vercel + providers). - [Observability (OpenTelemetry)](operations/observability.md) - Local tracing collector and production export configuration. - [Product Analytics (PostHog)](operations/product-analytics.md) diff --git a/docs/operations/git-branching-and-versioning.md b/docs/operations/git-branching-and-versioning.md index 419ba9e..01632db 100644 --- a/docs/operations/git-branching-and-versioning.md +++ b/docs/operations/git-branching-and-versioning.md @@ -35,7 +35,7 @@ Version bump guidance: ## CI expectations -- CI runs on pushes for `main`, `develop`, `dev`, and `feature/*`, and on PRs targeting `main`, `develop`, and `dev`. +- CI runs on pushes for `main`, `develop`, and `dev`, and on PRs targeting `main`, `develop`, and `dev`. - CI validates: - backend tests/build - frontend lint/typecheck/build diff --git a/docs/operations/production-setup-checklist.md b/docs/operations/production-setup-checklist.md new file mode 100644 index 0000000..4715eef --- /dev/null +++ b/docs/operations/production-setup-checklist.md @@ -0,0 +1,137 @@ +# Production Setup Checklist + +This checklist wires the template end-to-end in production with: + +- Frontend on Vercel +- Backend + Postgres on Render +- Redis on Upstash +- Providers: Clerk (auth), Stripe (billing), PostHog (analytics), Sentry (error reporting), Crisp (support), Grafana Cloud (telemetry) + +Local development should continue to work with console/noop defaults and Docker Compose infra. + +## 0) Create provider accounts (once) + +- Clerk: create an application. +- Stripe: create an account and products/prices. +- Upstash: create a Redis database. +- PostHog: create a project. +- Sentry: create a project (frontend + backend can share or be separate). +- Crisp: create a website. +- Grafana Cloud: create a stack with OTLP endpoint + API token. + +## 1) Deploy backend + Postgres (Render) + +1. In Render, create services from `render.yaml`. +2. Confirm backend service is reachable (Render service URL): + - `GET /healthz` + - `GET /readyz` (will fail until DB + Redis configured) + - `GET /api/v1/meta` + +### Backend env vars (Render) + +Set these in the backend service: + +- Core + - `APP_ENV=production` + - `APP_VERSION=` + - `APP_BASE_URL=` (used for Stripe return URLs) + - `DATABASE_URL` (from Render Postgres) + - `REDIS_URL` (from Upstash) +- Auth (Clerk) + - `CLERK_SECRET_KEY` + - `CLERK_API_URL=https://api.clerk.com` +- Billing (Stripe) + - `STRIPE_SECRET_KEY` + - `STRIPE_WEBHOOK_SECRET` + - `STRIPE_PRICE_PRO_MONTHLY` + - `STRIPE_PRICE_TEAM_MONTHLY` +- Telemetry (Grafana Cloud via OTLP) + - `OTEL_TRACES_EXPORTER=otlp` + - `OTEL_SERVICE_NAME=saas-core-template-backend` (or your service name) + - `OTEL_EXPORTER_OTLP_ENDPOINT=` + - `OTEL_EXPORTER_OTLP_HEADERS=Authorization=Basic ` +- Error reporting (Sentry) + - `ERROR_REPORTING_PROVIDER=sentry` + - `SENTRY_DSN` + - `SENTRY_ENVIRONMENT=production` +- Analytics (PostHog) + - `ANALYTICS_PROVIDER=posthog` + - `POSTHOG_PROJECT_KEY` + - `POSTHOG_HOST=https://app.posthog.com` (or your host) + +### Database migration (Render Postgres) + +Apply `backend/migrations/0001_identity_tenancy_billing.up.sql` against the Render Postgres database before using auth/billing endpoints. + +## 2) Deploy frontend (Vercel) + +1. Import the repo in Vercel. +2. Set project root directory to `frontend/`. +3. Set environment variables. +4. Deploy. + +### Frontend env vars (Vercel) + +- API + - `NEXT_PUBLIC_API_URL=` +- Auth (Clerk) + - `NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY` +- Analytics (PostHog) + - `NEXT_PUBLIC_ANALYTICS_PROVIDER=posthog` + - `NEXT_PUBLIC_POSTHOG_KEY` + - `NEXT_PUBLIC_POSTHOG_HOST=https://app.posthog.com` (or your host) +- Support (Crisp) + - `NEXT_PUBLIC_SUPPORT_PROVIDER=crisp` + - `NEXT_PUBLIC_CRISP_WEBSITE_ID` +- Error reporting (Sentry) + - `NEXT_PUBLIC_ERROR_REPORTING_PROVIDER=sentry` + - `NEXT_PUBLIC_SENTRY_DSN` + - `NEXT_PUBLIC_SENTRY_ENVIRONMENT=production` + - `NEXT_PUBLIC_APP_VERSION=` (optional) + +## 3) Provider dashboard configuration + +### Clerk + +- Add the Vercel frontend URL to allowed origins / redirect URLs. + +### Stripe + +- Create price IDs for `pro` and `team` and set them in backend env. +- Configure webhook endpoint: + - URL: `https:///api/v1/billing/webhook` + - Events: `checkout.session.completed`, `customer.subscription.created`, `customer.subscription.updated`, `customer.subscription.deleted` + +### PostHog + +- Ensure project keys match frontend/back env vars. + +### Sentry + +- Ensure DSNs match frontend/back env vars. + +### Crisp + +- Ensure website ID matches frontend env var. + +### Grafana Cloud + +- Confirm OTLP endpoint + Basic auth header. +- Validate traces arrive after a few API requests. + +## 4) Smoke test (production) + +1. Frontend loads and can reach backend: + - Landing page renders platform status. +2. Backend readiness: + - `GET /readyz` returns `{"status":"ready"}` +3. Auth: + - Sign in via Clerk, then open `/app` and confirm `GET /api/v1/auth/me` works. +4. Billing: + - Use `/pricing` and confirm checkout redirects, then confirm webhook updates subscription state. +5. Integrations: + - PostHog events appear (frontend and backend). + - Sentry captures test error (optional). + - Crisp widget loads (optional). + - Grafana Cloud receives traces (optional). + diff --git a/docs/overview.md b/docs/overview.md new file mode 100644 index 0000000..863183e --- /dev/null +++ b/docs/overview.md @@ -0,0 +1,75 @@ +# Project Overview and Code Reading Order + +This doc is a “start here” guide for understanding the template quickly. + +## Reading order (recommended) + +1. Guardrails and workflow + - `AGENTS.md` + - `docs/operations/agent-workflow.md` +2. Architecture playbooks (what the template is optimizing for) + - `docs/architecture/auth-and-identity.md` + - `docs/architecture/multi-tenant-model.md` + - `docs/architecture/billing-and-pricing.md` +3. Local run + production wiring + - `README.md` + - `docs/operations/production-setup-checklist.md` +4. Integrations (optional, managed-first) + - `docs/operations/observability.md` + - `docs/operations/product-analytics.md` + - `docs/operations/error-reporting.md` + - `docs/operations/support.md` +5. Frontend UI conventions + - `docs/frontend/ui-design-guide.md` + +## Repository layout (what lives where) + +- `backend/` + - `cmd/api/main.go`: composition root (config, DB/Redis, providers, server start). + - `internal/api/`: HTTP transport (routes, middleware, JSON responses). + - `internal/auth/`: auth provider adapter + identity mapping + org resolution. + - `internal/billing/`: billing provider adapter + webhook handling + subscription state. + - `internal/analytics/`: backend analytics adapter boundary (console/PostHog/noop). + - `internal/telemetry/`: OpenTelemetry init and exporter selection. + - `internal/errorreporting/`: backend error reporting adapter (console/Sentry/noop). + - `migrations/`: SQL migrations (identity, tenancy, billing tables). +- `frontend/` + - `app/`: Next.js routes and UI shells. + - `lib/api.ts`: frontend API client to the Go backend. + - `lib/integrations/*`: frontend integrations (analytics/support/error reporting). + - `lib/i18n/*`: minimal i18n layer (cookie locale + message catalog). +- `docker-compose.yml`: local Postgres + Redis + local OTel collector. +- `render.yaml`: Render blueprint (backend + Postgres). + +## Key request flows + +### Frontend → Backend connectivity + +- Frontend calls `GET /api/v1/meta` and `GET /api/v1/auth/me` using `NEXT_PUBLIC_API_URL`. + +### Auth flow (Clerk) + +- Frontend gets a Clerk session token and calls backend with `Authorization: Bearer `. +- Backend verifies token with Clerk, then ensures: + - internal `users` row exists + - identity mapping exists in `auth_identities` + - user has at least one organization membership + +### Tenancy / org context + +- Frontend sends `X-Organization-ID` (when available). +- Backend resolves org membership and denies by default. + +### Billing flow (Stripe) + +- Frontend calls checkout/portal endpoints (org-scoped). +- Backend uses internal plan mapping (`plans`) and creates Stripe sessions. +- Stripe sends webhooks to `/api/v1/billing/webhook` which upserts internal subscription state. + +## Local-first defaults (important) + +- If provider keys are unset: + - auth and billing endpoints return “not configured” errors + - telemetry/analytics/error reporting default to console output or no-ops + - support widget is disabled by default + From 4af28e03775cad71901301ace5b12bbdacafdcf4 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Sun, 22 Feb 2026 18:09:57 +0800 Subject: [PATCH 05/23] feat(ui): add shadcn-ui primitives and Tailwind --- README.md | 1 + docs/frontend/ui-design-guide.md | 18 +- docs/operations/agent-workflow.md | 1 + frontend/app/app/dashboard-client.tsx | 86 ++- frontend/app/globals.css | 73 +-- frontend/app/language-switcher.tsx | 43 +- frontend/app/layout.tsx | 27 +- frontend/app/page.tsx | 126 ++-- frontend/app/pricing/page.tsx | 33 +- frontend/app/pricing/pricing-client.tsx | 41 +- frontend/components.json | 19 + frontend/components/ui/button.tsx | 43 ++ frontend/components/ui/card.tsx | 36 ++ frontend/lib/i18n/locale.ts | 6 +- frontend/lib/i18n/translate.ts | 16 +- frontend/lib/utils.ts | 7 + frontend/package-lock.json | 809 +++++++++++++++++++++++- frontend/package.json | 8 + frontend/postcss.config.js | 8 + frontend/tailwind.config.ts | 57 ++ 20 files changed, 1244 insertions(+), 214 deletions(-) create mode 100644 frontend/components.json create mode 100644 frontend/components/ui/button.tsx create mode 100644 frontend/components/ui/card.tsx create mode 100644 frontend/lib/utils.ts create mode 100644 frontend/postcss.config.js create mode 100644 frontend/tailwind.config.ts diff --git a/README.md b/README.md index 16aa4a4..3c17764 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@ Production-shaped foundation for launching a startup SaaS baseline quickly. ## Stack - Frontend: Next.js (TypeScript) +- UI: shadcn/ui (Tailwind + Radix) - Backend: Go (`net/http`) - Database: Postgres - Cache: Redis (Upstash in cloud, local Redis in development) diff --git a/docs/frontend/ui-design-guide.md b/docs/frontend/ui-design-guide.md index 0c3bcc7..95afc7d 100644 --- a/docs/frontend/ui-design-guide.md +++ b/docs/frontend/ui-design-guide.md @@ -1,10 +1,10 @@ -# UI Design Guide (Tailwind + Radix) +# UI Design Guide (shadcn/ui + Tailwind + Radix) This template keeps UI styling intentionally simple, but the recommended direction for “production SaaS UI” is: +- shadcn/ui component patterns (Tailwind + Radix + variants). - Tailwind CSS for styling and design tokens. - Radix UI primitives for accessibility-correct components. -- A small component system using variants (for example `Button` with `intent`/`size`) to avoid one-off styles. ## Principles @@ -15,14 +15,23 @@ This template keeps UI styling intentionally simple, but the recommended directi ## Component conventions -- Put primitives in `frontend/components/ui/*`. -- Use a variant helper (e.g. `class-variance-authority`) to keep Tailwind class logic centralized. +- Put primitives in `frontend/components/ui/*` (shadcn-style). +- Use variants via `class-variance-authority` and utility composition via `frontend/lib/utils.ts`. - Always support: - `disabled` state - `loading` state (with `aria-busy`) - focus-visible ring - consistent spacing and typography +## Adding components (recommended workflow) + +This repo is set up to support shadcn’s generator config (`frontend/components.json`). To add new primitives: + +- From `frontend/`, run `npx shadcn@latest add ` +- Commit generated files under `frontend/components/ui/` + +Prefer adding a small set of primitives and using them everywhere instead of one-off custom markup. + ## Recommended primitives - `Button`, `Input`, `Textarea`, `Select`, `Badge` @@ -46,4 +55,3 @@ This template keeps UI styling intentionally simple, but the recommended directi - Do not embed strings in deeply nested components; pass copy in from the page/screen layer. - Avoid concatenating translated strings; prefer full sentences in message catalogs. - diff --git a/docs/operations/agent-workflow.md b/docs/operations/agent-workflow.md index 00edc50..6218528 100644 --- a/docs/operations/agent-workflow.md +++ b/docs/operations/agent-workflow.md @@ -40,6 +40,7 @@ This runbook defines the standard operating flow for AI-agent-assisted developme - Confirm managed integrations are optional and local E2E still works with console/noop defaults (OpenTelemetry, analytics, error reporting, support widget). - Confirm no secrets were committed while adding integration variables. - Confirm i18n defaults render correctly (language switcher changes locale without breaking SSR pages). + - For UI work, prefer shadcn/ui primitives in `frontend/components/ui/` and avoid one-off styling. ## 5) Documentation and traceability diff --git a/frontend/app/app/dashboard-client.tsx b/frontend/app/app/dashboard-client.tsx index f888850..10abb26 100644 --- a/frontend/app/app/dashboard-client.tsx +++ b/frontend/app/app/dashboard-client.tsx @@ -4,6 +4,8 @@ import { UserButton, useAuth } from "@clerk/nextjs"; import { useEffect, useMemo, useState } from "react"; import { createBillingPortalSession, fetchViewer, type ViewerResponse } from "@/lib/api"; import { createAnalyticsClient } from "@/lib/integrations/analytics"; +import { Button } from "@/components/ui/button"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; type LoadState = "idle" | "loading" | "error"; @@ -72,45 +74,59 @@ export function DashboardClient() { }; return ( -
-

App Dashboard

-

Protected workspace for authenticated organizations.

+
+
+

App Dashboard

+

Protected workspace for authenticated organizations.

+
{hasClerk && ( -
-
-

Session

-

{userId ? "Signed in with Clerk" : "Not signed in"}

-
- -
+ + +
+ Session +

{userId ? "Signed in with Clerk" : "Not signed in"}

+
+ +
+
)} -
-

Workspace

- {!hasClerk && ( -

- Clerk is not configured yet. Set NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY to enable auth and organization context. -

- )} - {hasClerk && state === "loading" &&

Loading your workspace context...

} - {hasClerk && state === "error" &&

Could not load workspace context from API. Ensure backend auth and migrations are configured.

} - {hasClerk && viewer && ( -
    -
  • User: {viewer.user.primaryEmail || viewer.user.id}
  • -
  • Organization: {viewer.organization.name || viewer.organization.id}
  • -
  • Role: {viewer.organization.role}
  • -
- )} -
+ + + Workspace + + + {!hasClerk && ( +

+ Clerk is not configured yet. Set NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY to enable auth and organization context. +

+ )} + {hasClerk && state === "loading" &&

Loading your workspace context...

} + {hasClerk && state === "error" &&

Could not load workspace context from API. Ensure backend auth and migrations are configured.

} + {hasClerk && viewer && ( +
    +
  • User: {viewer.user.primaryEmail || viewer.user.id}
  • +
  • Organization: {viewer.organization.name || viewer.organization.id}
  • +
  • Role: {viewer.organization.role}
  • +
+ )} +
+
-
-

Billing

-

Manage your plan and invoices in the Stripe customer portal.

- -
-
+ + + Billing + + +

Manage your plan and invoices in the Stripe customer portal.

+
+ +
+
+
+ ); } diff --git a/frontend/app/globals.css b/frontend/app/globals.css index c3f5134..3ad8e8f 100644 --- a/frontend/app/globals.css +++ b/frontend/app/globals.css @@ -1,47 +1,34 @@ -:root { - color-scheme: light dark; - font-family: Inter, system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", sans-serif; -} - -* { - box-sizing: border-box; -} +@tailwind base; +@tailwind components; +@tailwind utilities; -body { - margin: 0; - background: #0b1020; - color: #e7ecff; -} - -a { - color: inherit; - text-decoration: none; -} +@layer base { + :root { + --background: 222 47% 11%; + --foreground: 210 40% 98%; + --card: 222 47% 13%; + --card-foreground: 210 40% 98%; + --primary: 233 94% 69%; + --primary-foreground: 222 47% 11%; + --secondary: 217 33% 18%; + --secondary-foreground: 210 40% 98%; + --muted: 217 33% 18%; + --muted-foreground: 215 20% 70%; + --accent: 217 33% 18%; + --accent-foreground: 210 40% 98%; + --destructive: 0 84% 60%; + --destructive-foreground: 210 40% 98%; + --border: 217 33% 25%; + --input: 217 33% 25%; + --ring: 233 94% 69%; + --radius: 0.75rem; + } -main { - max-width: 960px; - margin: 0 auto; - padding: 2rem 1.25rem 4rem; -} - -.card { - background: rgba(255, 255, 255, 0.06); - border: 1px solid rgba(255, 255, 255, 0.16); - border-radius: 12px; - padding: 1rem; -} - -button { - border: 1px solid rgba(255, 255, 255, 0.24); - border-radius: 8px; - background: #6f7dff; - color: #ffffff; - padding: 0.55rem 0.85rem; - cursor: pointer; - font-weight: 600; -} + * { + @apply border-border; + } -button:disabled { - opacity: 0.6; - cursor: not-allowed; + body { + @apply bg-background text-foreground; + } } diff --git a/frontend/app/language-switcher.tsx b/frontend/app/language-switcher.tsx index 2fdfb12..1e31334 100644 --- a/frontend/app/language-switcher.tsx +++ b/frontend/app/language-switcher.tsx @@ -4,6 +4,7 @@ import { useRouter } from "next/navigation"; import { useMemo } from "react"; import { LOCALES, type Locale } from "@/lib/i18n/messages"; import { localeCookieName } from "@/lib/i18n/locale"; +import { Button } from "@/components/ui/button"; export function LanguageSwitcher({ currentLocale }: { currentLocale: Locale }) { const router = useRouter(); @@ -18,23 +19,29 @@ export function LanguageSwitcher({ currentLocale }: { currentLocale: Locale }) { ); return ( - +
+ Lang +
+ {options.map((opt) => { + const active = opt.value === currentLocale; + return ( + + ); + })} +
+
); } - diff --git a/frontend/app/layout.tsx b/frontend/app/layout.tsx index e53f05e..04e2f7e 100644 --- a/frontend/app/layout.tsx +++ b/frontend/app/layout.tsx @@ -4,27 +4,42 @@ import { AppClerkProvider } from "./clerk-provider"; import { AppIntegrationsProvider } from "./integrations-provider"; import { getServerLocale } from "@/lib/i18n/locale"; import { LanguageSwitcher } from "./language-switcher"; +import { Card } from "@/components/ui/card"; export const metadata: Metadata = { title: "SaaS Core Template", description: "Startup-ready SaaS template with auth, multi-tenant, and billing foundations." }; -export default function RootLayout({ +export default async function RootLayout({ children }: Readonly<{ children: React.ReactNode; }>) { - const locale = getServerLocale(); + const locale = await getServerLocale(); return ( - -
- + +
+
+
SaaS Core Template
+
+ +
+
+
+
+ +
+ This template uses shadcn/ui primitives. Replace this banner with your product nav. +
+
- {children} + +
{children}
+
diff --git a/frontend/app/page.tsx b/frontend/app/page.tsx index dc930fe..8bc8062 100644 --- a/frontend/app/page.tsx +++ b/frontend/app/page.tsx @@ -3,57 +3,99 @@ import { fetchMeta } from "@/lib/api"; import { getServerLocale } from "@/lib/i18n/locale"; import { getMessages } from "@/lib/i18n/messages"; import { t } from "@/lib/i18n/translate"; +import { Button } from "@/components/ui/button"; +import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card"; export default async function HomePage() { const meta = await fetchMeta(); const hasClerk = Boolean(process.env.NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY); - const messages = getMessages(getServerLocale()); + const messages = getMessages(await getServerLocale()); return ( -
-

{t(messages, "home.title")}

-

{t(messages, "home.subtitle")}

- -
-

{t(messages, "home.whatYouGetTitle")}

-
    - {messages.home.whatYouGetBullets.map((bullet) => ( -
  • {bullet}
  • - ))} -
-

- {t(messages, "home.whatYouGetCtaPrefix")} {t(messages, "home.whatYouGetCtaPricing")}{" "} - {t(messages, "home.whatYouGetCtaOr")} {t(messages, "home.whatYouGetCtaDashboard")}. -

+
+
+

{t(messages, "home.title")}

+

{t(messages, "home.subtitle")}

+
+ + +
-
-

{t(messages, "home.statusTitle")}

- {meta ? ( -
    -
  • app: {meta.app}
  • -
  • env: {meta.env}
  • -
  • version: {meta.version}
  • -
  • time: {new Date(meta.time).toLocaleString()}
  • + + + {t(messages, "home.whatYouGetTitle")} + Template baseline features included out of the box. + + +
      + {messages.home.whatYouGetBullets.map((bullet) => ( +
    • {bullet}
    • + ))}
    - ) : ( -

    {t(messages, "home.backendUnreachable")}

    - )} -
+
+ + {t(messages, "home.whatYouGetCtaPrefix")} {t(messages, "home.whatYouGetCtaPricing")} + {" "} + {t(messages, "home.whatYouGetCtaOr")}{" "} + + {t(messages, "home.whatYouGetCtaDashboard")} + + . +
+ + -
-

{t(messages, "home.getStartedTitle")}

- {hasClerk ? ( -

- {t(messages, "home.getStartedWithClerk")}{" "} - sign up / sign in -

- ) : ( -

- {t(messages, "home.getStartedWithoutClerk")} /app -

- )} -
-
+ + + {t(messages, "home.statusTitle")} + Live backend connectivity check. + + + {meta ? ( +
    +
  • app: {meta.app}
  • +
  • env: {meta.env}
  • +
  • version: {meta.version}
  • +
  • time: {new Date(meta.time).toLocaleString()}
  • +
+ ) : ( +

{t(messages, "home.backendUnreachable")}

+ )} +
+
+ + + + {t(messages, "home.getStartedTitle")} + Auth and tenancy are optional until configured. + + + {hasClerk ? ( +

+ {t(messages, "home.getStartedWithClerk")}{" "} + + sign up + {" "} + /{" "} + + sign in + +

+ ) : ( +

+ {t(messages, "home.getStartedWithoutClerk")}{" "} + + /app + +

+ )} +
+
+
); } diff --git a/frontend/app/pricing/page.tsx b/frontend/app/pricing/page.tsx index b70c9cd..0b7b969 100644 --- a/frontend/app/pricing/page.tsx +++ b/frontend/app/pricing/page.tsx @@ -3,21 +3,30 @@ import { PricingClient } from "./pricing-client"; import { getServerLocale } from "@/lib/i18n/locale"; import { getMessages } from "@/lib/i18n/messages"; import { t } from "@/lib/i18n/translate"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; -export default function PricingPage() { - const messages = getMessages(getServerLocale()); +export default async function PricingPage() { + const messages = getMessages(await getServerLocale()); return ( -
-

{t(messages, "pricing.title")}

-

{t(messages, "pricing.subtitle")}

- -
-

{t(messages, "pricing.helpTitle")}

-

- {t(messages, "pricing.helpBody")} dashboard. -

+
+
+

{t(messages, "pricing.title")}

+

{t(messages, "pricing.subtitle")}

-
+ + + + {t(messages, "pricing.helpTitle")} + + + {t(messages, "pricing.helpBody")}{" "} + + dashboard + + . + + + ); } diff --git a/frontend/app/pricing/pricing-client.tsx b/frontend/app/pricing/pricing-client.tsx index 2bd483b..0fd0800 100644 --- a/frontend/app/pricing/pricing-client.tsx +++ b/frontend/app/pricing/pricing-client.tsx @@ -4,6 +4,8 @@ import { useAuth } from "@clerk/nextjs"; import { useState } from "react"; import { createCheckoutSession } from "@/lib/api"; import { createAnalyticsClient } from "@/lib/integrations/analytics"; +import { Button } from "@/components/ui/button"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; const PLANS = [ { @@ -55,28 +57,37 @@ export function PricingClient() { }; return ( -
+
{PLANS.map((plan) => ( -
-

{plan.name}

-

{plan.price}/mo

-

{plan.description}

-
    + + + {plan.name} +
    + {plan.price} + /mo +
    +

    {plan.description}

    +
    + +
      {plan.features.map((feature) => (
    • {feature}
    • ))} -
    - -
+ +
+ +
+ + ))} {!hasClerk && ( -
-

+ + Pricing checkout requires Clerk session auth. Configure NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY and backend keys to enable checkout. -

-
+ + )}
); diff --git a/frontend/components.json b/frontend/components.json new file mode 100644 index 0000000..410d186 --- /dev/null +++ b/frontend/components.json @@ -0,0 +1,19 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "new-york", + "rsc": true, + "tsx": true, + "tailwind": { + "config": "tailwind.config.ts", + "css": "app/globals.css", + "baseColor": "slate", + "cssVariables": true, + "prefix": "" + }, + "aliases": { + "components": "@/components", + "utils": "@/lib/utils", + "ui": "@/components/ui" + } +} + diff --git a/frontend/components/ui/button.tsx b/frontend/components/ui/button.tsx new file mode 100644 index 0000000..952777b --- /dev/null +++ b/frontend/components/ui/button.tsx @@ -0,0 +1,43 @@ +import * as React from "react"; +import { Slot } from "@radix-ui/react-slot"; +import { cva, type VariantProps } from "class-variance-authority"; + +import { cn } from "@/lib/utils"; + +const buttonVariants = cva( + "inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50 ring-offset-background", + { + variants: { + variant: { + default: "bg-primary text-primary-foreground hover:bg-primary/90", + secondary: "bg-secondary text-secondary-foreground hover:bg-secondary/80", + outline: "border border-input bg-background hover:bg-accent hover:text-accent-foreground", + ghost: "hover:bg-accent hover:text-accent-foreground", + link: "text-primary underline-offset-4 hover:underline" + }, + size: { + default: "h-10 px-4 py-2", + sm: "h-9 rounded-md px-3", + lg: "h-11 rounded-md px-8", + icon: "h-10 w-10" + } + }, + defaultVariants: { + variant: "default", + size: "default" + } + } +); + +export interface ButtonProps extends React.ButtonHTMLAttributes, VariantProps { + asChild?: boolean; +} + +export const Button = React.forwardRef( + ({ className, variant, size, asChild = false, ...props }, ref) => { + const Comp = asChild ? Slot : "button"; + return ; + } +); +Button.displayName = "Button"; + diff --git a/frontend/components/ui/card.tsx b/frontend/components/ui/card.tsx new file mode 100644 index 0000000..5cba835 --- /dev/null +++ b/frontend/components/ui/card.tsx @@ -0,0 +1,36 @@ +import * as React from "react"; + +import { cn } from "@/lib/utils"; + +export const Card = React.forwardRef>(({ className, ...props }, ref) => ( +
+)); +Card.displayName = "Card"; + +export const CardHeader = React.forwardRef>(({ className, ...props }, ref) => ( +
+)); +CardHeader.displayName = "CardHeader"; + +export const CardTitle = React.forwardRef>(({ className, ...props }, ref) => ( +

+)); +CardTitle.displayName = "CardTitle"; + +export const CardDescription = React.forwardRef>( + ({ className, ...props }, ref) => ( +

+ ) +); +CardDescription.displayName = "CardDescription"; + +export const CardContent = React.forwardRef>(({ className, ...props }, ref) => ( +

+)); +CardContent.displayName = "CardContent"; + +export const CardFooter = React.forwardRef>(({ className, ...props }, ref) => ( +
+)); +CardFooter.displayName = "CardFooter"; + diff --git a/frontend/lib/i18n/locale.ts b/frontend/lib/i18n/locale.ts index 37e2e3a..43310d0 100644 --- a/frontend/lib/i18n/locale.ts +++ b/frontend/lib/i18n/locale.ts @@ -3,8 +3,9 @@ import { isLocale, type Locale } from "./messages"; const COOKIE_NAME = "locale"; -export function getServerLocale(): Locale { - const value = cookies().get(COOKIE_NAME)?.value; +export async function getServerLocale(): Promise { + const store = await cookies(); + const value = store.get(COOKIE_NAME)?.value; if (isLocale(value)) { return value; } @@ -12,4 +13,3 @@ export function getServerLocale(): Locale { } export const localeCookieName = COOKIE_NAME; - diff --git a/frontend/lib/i18n/translate.ts b/frontend/lib/i18n/translate.ts index 2cc8fb4..07ff101 100644 --- a/frontend/lib/i18n/translate.ts +++ b/frontend/lib/i18n/translate.ts @@ -1,19 +1,6 @@ import type { Messages } from "./messages"; -type DotPrefix = T extends "" ? "" : `.${T}`; -type DotNestedKeys = T extends object - ? { - [K in Extract]: T[K] extends string - ? `${K}` - : T[K] extends readonly any[] - ? `${K}` - : `${K}${DotPrefix>}`; - }[Extract] - : ""; - -export type MessageKey = DotNestedKeys; - -export function t(messages: Messages, key: MessageKey): string { +export function t(messages: Messages, key: string): string { const parts = key.split("."); let current: any = messages; for (const part of parts) { @@ -26,4 +13,3 @@ export function t(messages: Messages, key: MessageKey): string { return key; } - diff --git a/frontend/lib/utils.ts b/frontend/lib/utils.ts new file mode 100644 index 0000000..6fcf374 --- /dev/null +++ b/frontend/lib/utils.ts @@ -0,0 +1,7 @@ +import { clsx, type ClassValue } from "clsx"; +import { twMerge } from "tailwind-merge"; + +export function cn(...inputs: ClassValue[]) { + return twMerge(clsx(inputs)); +} + diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 682fe7e..438d570 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -9,19 +9,39 @@ "version": "0.1.0", "dependencies": { "@clerk/nextjs": "^6.37.5", + "@radix-ui/react-slot": "^1.1.0", + "class-variance-authority": "^0.7.0", + "clsx": "^2.1.1", "next": "^15.1.0", "react": "^19.0.0", - "react-dom": "^19.0.0" + "react-dom": "^19.0.0", + "tailwind-merge": "^2.5.4" }, "devDependencies": { "@types/node": "^22.10.1", "@types/react": "^19.0.2", "@types/react-dom": "^19.0.2", + "autoprefixer": "^10.4.20", "eslint": "^9.16.0", "eslint-config-next": "^15.1.0", + "postcss": "^8.4.49", + "tailwindcss": "^3.4.16", + "tailwindcss-animate": "^1.0.7", "typescript": "^5.7.2" } }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/@clerk/backend": { "version": "2.31.1", "resolved": "https://registry.npmjs.org/@clerk/backend/-/backend-2.31.1.tgz", @@ -819,6 +839,41 @@ "url": "https://opencollective.com/libvips" } }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, "node_modules/@napi-rs/wasm-runtime": { "version": "0.2.12", "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-0.2.12.tgz", @@ -1024,6 +1079,37 @@ "node": ">=12.4.0" } }, + "node_modules/@radix-ui/react-compose-refs": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz", + "integrity": "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==", + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@radix-ui/react-slot": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.4.tgz", + "integrity": "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==", + "dependencies": { + "@radix-ui/react-compose-refs": "1.1.2" + }, + "peerDependencies": { + "@types/react": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/@rtsao/scc": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@rtsao/scc/-/scc-1.1.0.tgz", @@ -1099,9 +1185,8 @@ "version": "19.2.14", "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", - "dev": true, + "devOptional": true, "license": "MIT", - "peer": true, "dependencies": { "csstype": "^3.2.2" } @@ -1161,7 +1246,6 @@ "integrity": "sha512-IgSWvLobTDOjnaxAfDTIHaECbkNlAlKv2j5SjpB2v7QHKv1FIfjwMy8FsDbVfDX/KjmCmYICcw7uGaXLhtsLNg==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@typescript-eslint/scope-manager": "8.56.0", "@typescript-eslint/types": "8.56.0", @@ -1661,7 +1745,6 @@ "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "license": "MIT", - "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -1712,6 +1795,31 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true + }, "node_modules/argparse": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", @@ -1906,6 +2014,42 @@ "node": ">= 0.4" } }, + "node_modules/autoprefixer": { + "version": "10.4.24", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.24.tgz", + "integrity": "sha512-uHZg7N9ULTVbutaIsDRoUkoS8/h3bdsmVJYZ5l3wv8Cp/6UIIoRDm90hZ+BwxUj/hGBEzLxdHNSKuFpn8WOyZw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "browserslist": "^4.28.1", + "caniuse-lite": "^1.0.30001766", + "fraction.js": "^5.3.4", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, "node_modules/available-typed-arrays": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", @@ -1949,6 +2093,30 @@ "dev": true, "license": "MIT" }, + "node_modules/baseline-browser-mapping": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.0.tgz", + "integrity": "sha512-lIyg0szRfYbiy67j9KN8IyeD7q7hcmqnJ1ddWmNt19ItGpNN64mnllmxUNFIOdOm6by97jlL6wfpTTJrmnjWAA==", + "dev": true, + "bin": { + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/brace-expansion": { "version": "1.1.12", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", @@ -1973,6 +2141,39 @@ "node": ">=8" } }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, "node_modules/call-bind": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", @@ -2033,6 +2234,15 @@ "node": ">=6" } }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, "node_modules/caniuse-lite": { "version": "1.0.30001770", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001770.tgz", @@ -2070,12 +2280,67 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/class-variance-authority": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz", + "integrity": "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==", + "dependencies": { + "clsx": "^2.1.1" + }, + "funding": { + "url": "https://polar.sh/cva" + } + }, "node_modules/client-only": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==", "license": "MIT" }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "engines": { + "node": ">=6" + } + }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", @@ -2096,6 +2361,15 @@ "dev": true, "license": "MIT" }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", @@ -2118,11 +2392,23 @@ "node": ">= 8" } }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/csstype": { "version": "3.2.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", - "dev": true, + "devOptional": true, "license": "MIT" }, "node_modules/damerau-levenshtein": { @@ -2266,6 +2552,18 @@ "node": ">=8" } }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "dev": true + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "dev": true + }, "node_modules/doctrine": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", @@ -2294,6 +2592,12 @@ "node": ">= 0.4" } }, + "node_modules/electron-to-chromium": { + "version": "1.5.302", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.302.tgz", + "integrity": "sha512-sM6HAN2LyK82IyPBpznDRqlTQAtuSaO+ShzFiWTvoMJLHyZ+Y39r8VMfHzwbU8MVBzQ4Wdn85+wlZl2TLGIlwg==", + "dev": true + }, "node_modules/emoji-regex": { "version": "9.2.2", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", @@ -2478,6 +2782,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/escape-string-regexp": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", @@ -2497,7 +2810,6 @@ "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", @@ -2671,7 +2983,6 @@ "integrity": "sha512-whOE1HFo/qJDyX4SnXzP4N6zOWn79WhnCUY/iDR0mPfQZO8wcYE4JClzI2oZrhBnnMUCBCHZhO6VQyoBU95mZA==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@rtsao/scc": "^1.1.0", "array-includes": "^3.1.9", @@ -3071,6 +3382,33 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/fraction.js": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", + "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", + "dev": true, + "engines": { + "node": "*" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, "node_modules/function-bind": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", @@ -3454,6 +3792,18 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/is-boolean-object": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.2.tgz", @@ -3834,6 +4184,15 @@ "node": ">= 0.4" } }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "dev": true, + "bin": { + "jiti": "bin/jiti.js" + } + }, "node_modules/js-cookie": { "version": "3.0.5", "resolved": "https://registry.npmjs.org/js-cookie/-/js-cookie-3.0.5.tgz", @@ -3957,6 +4316,24 @@ "node": ">= 0.8.0" } }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true + }, "node_modules/locate-path": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", @@ -4057,6 +4434,17 @@ "dev": true, "license": "MIT" }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, "node_modules/nanoid": { "version": "3.3.11", "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", @@ -4103,7 +4491,6 @@ "resolved": "https://registry.npmjs.org/next/-/next-15.5.12.tgz", "integrity": "sha512-Fi/wQ4Etlrn60rz78bebG1i1SR20QxvV8tVp6iJspjLUSHcZoeUXCt+vmWoEcza85ElZzExK/jJ/F6SvtGktjA==", "license": "MIT", - "peer": true, "dependencies": { "@next/env": "15.5.12", "@swc/helpers": "0.5.15", @@ -4151,6 +4538,33 @@ } } }, + "node_modules/next/node_modules/postcss": { + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.6", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, "node_modules/node-exports-info": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/node-exports-info/-/node-exports-info-1.6.0.tgz", @@ -4180,6 +4594,21 @@ "semver": "bin/semver.js" } }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/object-assign": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", @@ -4190,6 +4619,15 @@ "node": ">=0.10.0" } }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, "node_modules/object-inspect": { "version": "1.13.4", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", @@ -4430,6 +4868,24 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, "node_modules/possible-typed-array-names": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", @@ -4441,9 +4897,10 @@ } }, "node_modules/postcss": { - "version": "8.4.31", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", - "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "dev": true, "funding": [ { "type": "opencollective", @@ -4458,16 +4915,143 @@ "url": "https://github.com/sponsors/ai" } ], - "license": "MIT", "dependencies": { - "nanoid": "^3.3.6", - "picocolors": "^1.0.0", - "source-map-js": "^1.0.2" + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" }, "engines": { "node": "^10 || ^12 || >=14" } }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "dev": true, + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.1.0.tgz", + "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz", + "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "lilconfig": "^3.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "jiti": ">=1.21.0", + "postcss": ">=8.0.9", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + }, + "postcss": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "dev": true, + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true + }, "node_modules/prelude-ls": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", @@ -4526,7 +5110,6 @@ "resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz", "integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==", "license": "MIT", - "peer": true, "engines": { "node": ">=0.10.0" } @@ -4536,7 +5119,6 @@ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz", "integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==", "license": "MIT", - "peer": true, "dependencies": { "scheduler": "^0.27.0" }, @@ -4551,6 +5133,27 @@ "dev": true, "license": "MIT" }, + "node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "dev": true, + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, "node_modules/reflect.getprototypeof": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz", @@ -5149,6 +5752,28 @@ } } }, + "node_modules/sucrase": { + "version": "3.35.1", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz", + "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "tinyglobby": "^0.2.11", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", @@ -5188,6 +5813,110 @@ "react": "^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, + "node_modules/tailwind-merge": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-2.6.1.tgz", + "integrity": "sha512-Oo6tHdpZsGpkKG88HJ8RR1rg/RdnEkQEfMoEk2x1XRI3F1AxeU+ijRXpiVUF4UbLfcxxRGw6TbUINKYdWVsQTQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/tailwindcss": { + "version": "3.4.19", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz", + "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==", + "dev": true, + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.6.0", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.2", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.7", + "lilconfig": "^3.1.3", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.1.1", + "postcss": "^8.4.47", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", + "postcss-nested": "^6.2.0", + "postcss-selector-parser": "^6.1.2", + "resolve": "^1.22.8", + "sucrase": "^3.35.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tailwindcss-animate": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/tailwindcss-animate/-/tailwindcss-animate-1.0.7.tgz", + "integrity": "sha512-bl6mpH3T7I3UFxuvDEXLxy/VuFxBk5bbzplh7tXI68mwMokNYd1t9qPBHlnyTwfa4JGC4zP516I1hYYtQ/vspA==", + "dev": true, + "peerDependencies": { + "tailwindcss": ">=3.0.0 || insiders" + } + }, + "node_modules/tailwindcss/node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/tailwindcss/node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, "node_modules/tinyglobby": { "version": "0.2.15", "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", @@ -5229,7 +5958,6 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -5263,6 +5991,12 @@ "typescript": ">=4.8.4" } }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "dev": true + }, "node_modules/tsconfig-paths": { "version": "3.15.0", "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz", @@ -5379,7 +6113,6 @@ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", "dev": true, "license": "Apache-2.0", - "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -5449,6 +6182,36 @@ "@unrs/resolver-binding-win32-x64-msvc": "1.11.1" } }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, "node_modules/uri-js": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", @@ -5468,6 +6231,12 @@ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true + }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", diff --git a/frontend/package.json b/frontend/package.json index 006db0f..01c6de0 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -11,6 +11,10 @@ }, "dependencies": { "@clerk/nextjs": "^6.37.5", + "@radix-ui/react-slot": "^1.1.0", + "class-variance-authority": "^0.7.0", + "clsx": "^2.1.1", + "tailwind-merge": "^2.5.4", "next": "^15.1.0", "react": "^19.0.0", "react-dom": "^19.0.0" @@ -19,8 +23,12 @@ "@types/node": "^22.10.1", "@types/react": "^19.0.2", "@types/react-dom": "^19.0.2", + "autoprefixer": "^10.4.20", "eslint": "^9.16.0", "eslint-config-next": "^15.1.0", + "postcss": "^8.4.49", + "tailwindcss": "^3.4.16", + "tailwindcss-animate": "^1.0.7", "typescript": "^5.7.2" } } diff --git a/frontend/postcss.config.js b/frontend/postcss.config.js new file mode 100644 index 0000000..264ab4f --- /dev/null +++ b/frontend/postcss.config.js @@ -0,0 +1,8 @@ +/** @type {import('postcss-load-config').Config} */ +module.exports = { + plugins: { + tailwindcss: {}, + autoprefixer: {} + } +}; + diff --git a/frontend/tailwind.config.ts b/frontend/tailwind.config.ts new file mode 100644 index 0000000..afd1352 --- /dev/null +++ b/frontend/tailwind.config.ts @@ -0,0 +1,57 @@ +import type { Config } from "tailwindcss"; + +const config: Config = { + darkMode: ["class"], + content: ["./app/**/*.{ts,tsx}", "./components/**/*.{ts,tsx}", "./lib/**/*.{ts,tsx}"], + theme: { + container: { + center: true, + padding: "2rem", + screens: { + "2xl": "1400px" + } + }, + extend: { + colors: { + border: "hsl(var(--border))", + input: "hsl(var(--input))", + ring: "hsl(var(--ring))", + background: "hsl(var(--background))", + foreground: "hsl(var(--foreground))", + primary: { + DEFAULT: "hsl(var(--primary))", + foreground: "hsl(var(--primary-foreground))" + }, + secondary: { + DEFAULT: "hsl(var(--secondary))", + foreground: "hsl(var(--secondary-foreground))" + }, + muted: { + DEFAULT: "hsl(var(--muted))", + foreground: "hsl(var(--muted-foreground))" + }, + accent: { + DEFAULT: "hsl(var(--accent))", + foreground: "hsl(var(--accent-foreground))" + }, + destructive: { + DEFAULT: "hsl(var(--destructive))", + foreground: "hsl(var(--destructive-foreground))" + }, + card: { + DEFAULT: "hsl(var(--card))", + foreground: "hsl(var(--card-foreground))" + } + }, + borderRadius: { + lg: "var(--radius)", + md: "calc(var(--radius) - 2px)", + sm: "calc(var(--radius) - 4px)" + } + } + }, + plugins: [require("tailwindcss-animate")] +}; + +export default config; + From 750bc16f9f89d9df37251c2415aaaec8baee90e8 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Sun, 22 Feb 2026 19:08:04 +0800 Subject: [PATCH 06/23] feat: add jobs, email, uploads, and audit logs --- .env.example | 8 + .gitignore | 1 + Makefile | 3 + README.md | 18 +- backend/.env.example | 24 ++ backend/cmd/api/main.go | 39 ++- backend/cmd/worker/main.go | 157 ++++++++++ backend/go.mod | 19 ++ backend/go.sum | 40 +++ backend/internal/api/router.go | 280 ++++++++++++++++++ backend/internal/audit/audit.go | 33 +++ backend/internal/audit/db.go | 100 +++++++ backend/internal/auth/auth.go | 74 ++++- backend/internal/config/config.go | 60 ++++ backend/internal/email/email.go | 51 ++++ backend/internal/email/resend.go | 66 +++++ backend/internal/files/files.go | 257 ++++++++++++++++ backend/internal/files/s3.go | 108 +++++++ backend/internal/jobs/jobs.go | 192 ++++++++++++ .../migrations/0002_jobs_audit_files.down.sql | 11 + .../migrations/0002_jobs_audit_files.up.sql | 46 +++ docs/README.md | 8 + docs/operations/agent-workflow.md | 1 + docs/operations/audit-logs.md | 20 ++ docs/operations/background-jobs.md | 24 ++ docs/operations/email.md | 25 ++ docs/operations/file-uploads.md | 30 ++ docs/operations/production-setup-checklist.md | 39 ++- docs/overview.md | 6 +- frontend/app/app/dashboard-client.tsx | 172 ++++++++++- frontend/lib/api.ts | 112 +++++++ render.yaml | 82 +++++ 32 files changed, 2083 insertions(+), 23 deletions(-) create mode 100644 backend/cmd/worker/main.go create mode 100644 backend/internal/audit/audit.go create mode 100644 backend/internal/audit/db.go create mode 100644 backend/internal/email/email.go create mode 100644 backend/internal/email/resend.go create mode 100644 backend/internal/files/files.go create mode 100644 backend/internal/files/s3.go create mode 100644 backend/internal/jobs/jobs.go create mode 100644 backend/migrations/0002_jobs_audit_files.down.sql create mode 100644 backend/migrations/0002_jobs_audit_files.up.sql create mode 100644 docs/operations/audit-logs.md create mode 100644 docs/operations/background-jobs.md create mode 100644 docs/operations/email.md create mode 100644 docs/operations/file-uploads.md diff --git a/.env.example b/.env.example index 0e69ef6..5f2ee29 100644 --- a/.env.example +++ b/.env.example @@ -14,6 +14,14 @@ SENTRY_ENVIRONMENT=development ANALYTICS_PROVIDER=console POSTHOG_PROJECT_KEY= POSTHOG_HOST=https://app.posthog.com +EMAIL_PROVIDER=console +EMAIL_FROM= +RESEND_API_KEY= +JOBS_ENABLED=true +JOBS_WORKER_ID=local +JOBS_POLL_INTERVAL=1s +FILE_STORAGE_PROVIDER=disk +FILE_STORAGE_DISK_PATH=./backend/.data/uploads # Frontend NEXT_PUBLIC_API_URL=http://localhost:8080 diff --git a/.gitignore b/.gitignore index 1f6f4ec..5558810 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,7 @@ frontend/node_modules/ # Backend backend/bin/ backend/api +backend/.data/ # Local infra pgdata/ diff --git a/Makefile b/Makefile index e4c8eb2..6ada069 100644 --- a/Makefile +++ b/Makefile @@ -11,6 +11,9 @@ infra-down: dev-api: cd backend && go run ./cmd/api +dev-worker: + cd backend && go run ./cmd/worker + dev-ui: cd frontend && npm run dev diff --git a/README.md b/README.md index 3c17764..b479f99 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,15 @@ Core variables: - `ANALYTICS_PROVIDER` (`console`, `posthog`, or `none`) - `POSTHOG_PROJECT_KEY` - `POSTHOG_HOST` + - `EMAIL_PROVIDER` (`console`, `resend`, or `none`) + - `EMAIL_FROM` + - `RESEND_API_KEY` + - `JOBS_ENABLED` (worker toggle) + - `JOBS_WORKER_ID` + - `JOBS_POLL_INTERVAL` + - `FILE_STORAGE_PROVIDER` (`disk`, `s3`, or `none`) + - `FILE_STORAGE_DISK_PATH` + - `S3_BUCKET`, `S3_REGION`, `S3_ENDPOINT`, `S3_ACCESS_KEY_ID`, `S3_SECRET_ACCESS_KEY`, `S3_FORCE_PATH_STYLE` - `CLERK_SECRET_KEY` - `CLERK_API_URL` (default `https://api.clerk.com`) - `STRIPE_SECRET_KEY` @@ -79,9 +88,10 @@ Core variables: SQL migrations live in `backend/migrations/`. Apply them with your preferred migration tool before using auth/billing endpoints. -Initial migration file: +Initial migration files: - `backend/migrations/0001_identity_tenancy_billing.up.sql` +- `backend/migrations/0002_jobs_audit_files.up.sql` ## Local development Run infra first: @@ -98,6 +108,12 @@ Start backend in one terminal: make dev-api ``` +Start worker in another terminal (jobs + email): + +```bash +make dev-worker +``` + Start frontend in another terminal: ```bash diff --git a/backend/.env.example b/backend/.env.example index 5581101..435b4a1 100644 --- a/backend/.env.example +++ b/backend/.env.example @@ -28,6 +28,30 @@ SENTRY_ENVIRONMENT=development ANALYTICS_PROVIDER=console POSTHOG_PROJECT_KEY= POSTHOG_HOST=https://app.posthog.com + +# Email (Resend) +# - Local default: logs emails to console. +# - To enable Resend: set EMAIL_PROVIDER=resend, RESEND_API_KEY, and EMAIL_FROM. +EMAIL_PROVIDER=console +EMAIL_FROM= +RESEND_API_KEY= + +# Background jobs +JOBS_ENABLED=true +JOBS_WORKER_ID=local +JOBS_POLL_INTERVAL=1s + +# File uploads +# - Local default: store files on disk under FILE_STORAGE_DISK_PATH. +# - For S3/R2: set FILE_STORAGE_PROVIDER=s3 and configure S3_* variables. +FILE_STORAGE_PROVIDER=disk +FILE_STORAGE_DISK_PATH=./.data/uploads +S3_BUCKET= +S3_REGION=auto +S3_ENDPOINT= +S3_ACCESS_KEY_ID= +S3_SECRET_ACCESS_KEY= +S3_FORCE_PATH_STYLE=true CLERK_SECRET_KEY= CLERK_API_URL=https://api.clerk.com STRIPE_SECRET_KEY= diff --git a/backend/cmd/api/main.go b/backend/cmd/api/main.go index d8a0667..f85e9e0 100644 --- a/backend/cmd/api/main.go +++ b/backend/cmd/api/main.go @@ -14,12 +14,15 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "saas-core-template/backend/internal/analytics" "saas-core-template/backend/internal/api" + "saas-core-template/backend/internal/audit" "saas-core-template/backend/internal/auth" "saas-core-template/backend/internal/billing" "saas-core-template/backend/internal/cache" "saas-core-template/backend/internal/config" "saas-core-template/backend/internal/db" "saas-core-template/backend/internal/errorreporting" + "saas-core-template/backend/internal/files" + "saas-core-template/backend/internal/jobs" "saas-core-template/backend/internal/telemetry" ) @@ -96,10 +99,42 @@ func main() { _ = redisClient.Close() }() + auditRecorder := audit.NewDBRecorder(pool) + jobStore := jobs.NewStore(pool) + + var s3Provider *files.S3Provider + if cfg.FileStorageProvider == "s3" { + p, err := files.NewS3Provider(ctx, files.S3Config{ + Bucket: cfg.S3Bucket, + Region: cfg.S3Region, + Endpoint: cfg.S3Endpoint, + AccessKeyID: cfg.S3AccessKeyID, + SecretAccessKey: cfg.S3SecretAccessKey, + ForcePathStyle: cfg.S3ForcePathStyle, + }) + if err != nil { + slog.Error("failed to initialize s3 provider", "error", err) + os.Exit(1) + } + s3Provider = p + } + + var filesService *files.Service + switch cfg.FileStorageProvider { + case "none", "noop", "off", "disabled": + filesService = nil + default: + filesService = files.NewService(pool, files.Config{ + Provider: cfg.FileStorageProvider, + DiskPath: cfg.FileStorageDiskPath, + S3: s3Provider, + }) + } + var authService *auth.Service if cfg.ClerkSecretKey != "" { authProvider := auth.NewClerkProvider(cfg.ClerkSecretKey, cfg.ClerkAPIURL) - authService = auth.NewService(authProvider, pool) + authService = auth.NewService(authProvider, pool, auth.WithJobs(jobStore), auth.WithAudit(auditRecorder)) } var billingService *billing.Service @@ -124,6 +159,8 @@ func main() { api.WithBillingService(billingService), api.WithAppBaseURL(cfg.AppBaseURL), api.WithAnalytics(analyticsClient), + api.WithAudit(auditRecorder), + api.WithFiles(filesService), ) baseHandler := apiServer.Handler() diff --git a/backend/cmd/worker/main.go b/backend/cmd/worker/main.go new file mode 100644 index 0000000..d33879e --- /dev/null +++ b/backend/cmd/worker/main.go @@ -0,0 +1,157 @@ +package main + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log/slog" + "os" + "os/signal" + "syscall" + "time" + + "saas-core-template/backend/internal/config" + "saas-core-template/backend/internal/db" + "saas-core-template/backend/internal/email" + "saas-core-template/backend/internal/errorreporting" + "saas-core-template/backend/internal/jobs" + "saas-core-template/backend/internal/telemetry" +) + +const workerName = "saas-core-template-worker" + +func main() { + cfg, err := config.Load() + if err != nil { + slog.Error("failed to load config", "error", err) + os.Exit(1) + } + + if !cfg.JobsEnabled { + slog.Info("jobs disabled; exiting") + return + } + + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + shutdownTelemetry, err := telemetry.Init(ctx, telemetry.Config{ + ServiceName: defaultString(cfg.ServiceName, workerName), + Environment: cfg.Env, + Version: cfg.Version, + TracesExporter: cfg.OtelTracesExporter, + OTLPEndpoint: cfg.OtelOTLPEndpoint, + OTLPHeaders: telemetry.ParseOTLPHeaders(cfg.OtelOTLPHeadersRaw), + }) + if err != nil { + slog.Error("failed to initialize telemetry", "error", err) + os.Exit(1) + } + defer func() { _ = shutdownTelemetry(context.Background()) }() + + reporter, err := errorreporting.New(ctx, errorreporting.Config{ + Provider: cfg.ErrorReportingProvider, + DSN: cfg.SentryDSN, + Environment: cfg.SentryEnvironment, + Release: cfg.Version, + }) + if err != nil { + slog.Error("failed to initialize error reporting", "error", err) + os.Exit(1) + } + defer func() { _ = reporter.Shutdown(context.Background()) }() + + pool, err := db.Connect(ctx, cfg.DatabaseURL) + if err != nil { + slog.Error("failed to connect to postgres", "error", err) + os.Exit(1) + } + defer pool.Close() + + sender := buildEmailSender(cfg) + claimer := jobs.NewClaimer(pool, jobs.ClaimerConfig{ + WorkerID: cfg.JobsWorkerID, + LockTTL: 5 * time.Minute, + }) + + slog.Info("worker started", "name", workerName, "worker_id", cfg.JobsWorkerID, "poll", cfg.JobsPollInterval.String()) + + ticker := time.NewTicker(cfg.JobsPollInterval) + defer ticker.Stop() + + from := defaultString(cfg.EmailFrom, "local@example.com") + + for { + select { + case <-ctx.Done(): + slog.Info("worker shutting down") + return + case <-ticker.C: + if err := runOnce(ctx, claimer, sender, from); err != nil && !errors.Is(err, context.Canceled) { + errorreporting.Capture(ctx, reporter, err, map[string]string{"component": "worker"}) + } + } + } +} + +func runOnce(ctx context.Context, claimer *jobs.Claimer, sender email.Sender, from string) error { + job, err := claimer.ClaimNext(ctx) + if err != nil { + return err + } + if job == nil { + return nil + } + + switch job.Type { + case "send_email": + var payload struct { + To string `json:"to"` + Subject string `json:"subject"` + Text string `json:"text"` + HTML string `json:"html"` + } + if err := json.Unmarshal(job.PayloadJSON, &payload); err != nil { + _ = claimer.Fail(ctx, jobs.FailureInput{JobID: job.ID, Attempts: job.Attempts, MaxAttempts: job.MaxAttempts, Err: fmt.Errorf("decode payload: %w", err)}) + return nil + } + + msg := email.Message{ + To: payload.To, + From: from, + Subject: payload.Subject, + Text: payload.Text, + HTML: payload.HTML, + } + + if err := sender.Send(ctx, msg); err != nil { + _ = claimer.Fail(ctx, jobs.FailureInput{JobID: job.ID, Attempts: job.Attempts, MaxAttempts: job.MaxAttempts, Err: err}) + return nil + } + return claimer.Complete(ctx, job.ID) + default: + _ = claimer.Fail(ctx, jobs.FailureInput{JobID: job.ID, Attempts: job.Attempts, MaxAttempts: job.MaxAttempts, Err: fmt.Errorf("unknown job type %q", job.Type)}) + return nil + } +} + +func buildEmailSender(cfg config.Config) email.Sender { + switch cfg.EmailProvider { + case "", "console": + return email.NewConsole() + case "none", "noop", "off", "disabled": + return email.NewNoop() + case "resend": + return email.NewResend(cfg.ResendAPIKey) + default: + return email.NewConsole() + } +} + +func defaultString(value string, fallback string) string { + if value == "" { + return fallback + } + return value +} diff --git a/backend/go.mod b/backend/go.mod index cd5105d..3772a9e 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -8,6 +8,24 @@ require ( ) require ( + github.com/aws/aws-sdk-go-v2 v1.30.3 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.27.27 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.27 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.14 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.12 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.57.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect + github.com/aws/smithy-go v1.20.3 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect @@ -19,6 +37,7 @@ require ( github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect go.opentelemetry.io/otel v1.26.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 // indirect diff --git a/backend/go.sum b/backend/go.sum index a097a85..72cf5bb 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -1,3 +1,39 @@ +github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= +github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= +github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90= +github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27 h1:2raNba6gr2IfA0eqqiP2XiQ0UVOpGPgDSi0I9iAP+UI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12 h1:DXFWyt7ymx/l1ygdyTTS0X923e+Q2wXIxConJzrgwc0= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.12/go.mod h1:mVOr/LbvaNySK1/BTy4cBOCjhCNY2raWBwK4v+WR5J4= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.14 h1:oWccitSnByVU74rQRHac4gLfDqjB6Z1YQGOY/dXKedI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.14/go.mod h1:8SaZBlQdCLrc/2U3CEO48rYj9uR8qRsPRkmzwNM52pM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.12 h1:tzha+v1SCEBpXWEuw6B/+jm4h5z8hZbTpXz0zRZqTnw= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.12/go.mod h1:n+nt2qjHGoseWeLHt1vEr6ZRCCxIN2KcNpJxBcYQSwI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.57.0 h1:v2DWNY6ll3JK62Bx1khUu9fJ4f3TwXllIEJxI7dDv/o= +github.com/aws/aws-sdk-go-v2/service/s3 v1.57.0/go.mod h1:8rDw3mVwmvIWWX/+LWY3PPIMZuwnQdJMCt0iVFVT3qw= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ= +github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= +github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= @@ -30,6 +66,9 @@ github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= @@ -77,6 +116,7 @@ google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDom google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/backend/internal/api/router.go b/backend/internal/api/router.go index e7cec28..c9ac21b 100644 --- a/backend/internal/api/router.go +++ b/backend/internal/api/router.go @@ -12,8 +12,10 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/redis/go-redis/v9" "saas-core-template/backend/internal/analytics" + "saas-core-template/backend/internal/audit" "saas-core-template/backend/internal/auth" "saas-core-template/backend/internal/billing" + "saas-core-template/backend/internal/files" ) type Server struct { @@ -26,6 +28,8 @@ type Server struct { auth *auth.Service billing *billing.Service analytics analytics.Client + audit audit.Recorder + files *files.Service } type serverOptions struct { @@ -33,6 +37,8 @@ type serverOptions struct { billingService *billing.Service appBaseURL string analytics analytics.Client + audit audit.Recorder + files *files.Service } func NewServer(appName string, env string, version string, db *pgxpool.Pool, redisClient *redis.Client, opts ...func(*serverOptions)) *Server { @@ -51,6 +57,8 @@ func NewServer(appName string, env string, version string, db *pgxpool.Pool, red auth: options.authService, billing: options.billingService, analytics: defaultAnalytics(options.analytics), + audit: defaultAudit(options.audit), + files: options.files, } } @@ -61,6 +69,13 @@ func defaultAnalytics(client analytics.Client) analytics.Client { return client } +func defaultAudit(recorder audit.Recorder) audit.Recorder { + if recorder == nil { + return audit.NewNoop() + } + return recorder +} + func WithAuthService(authService *auth.Service) func(*serverOptions) { return func(opts *serverOptions) { opts.authService = authService @@ -85,6 +100,18 @@ func WithAnalytics(client analytics.Client) func(*serverOptions) { } } +func WithAudit(recorder audit.Recorder) func(*serverOptions) { + return func(opts *serverOptions) { + opts.audit = recorder + } +} + +func WithFiles(service *files.Service) func(*serverOptions) { + return func(opts *serverOptions) { + opts.files = service + } +} + func (s *Server) Handler() http.Handler { mux := http.NewServeMux() mux.HandleFunc("GET /healthz", s.healthz) @@ -94,6 +121,12 @@ func (s *Server) Handler() http.Handler { mux.HandleFunc("POST /api/v1/billing/checkout-session", s.requireOrg(s.billingCheckoutSession)) mux.HandleFunc("POST /api/v1/billing/portal-session", s.requireOrg(s.billingPortalSession)) mux.HandleFunc("POST /api/v1/billing/webhook", s.billingWebhook) + mux.HandleFunc("GET /api/v1/audit/events", s.requireOrg(s.auditEvents)) + mux.HandleFunc("POST /api/v1/files/upload-url", s.requireOrg(s.filesUploadURL)) + mux.HandleFunc("POST /api/v1/files/{id}/upload", s.requireOrg(s.filesDirectUpload)) + mux.HandleFunc("POST /api/v1/files/{id}/complete", s.requireOrg(s.filesComplete)) + mux.HandleFunc("GET /api/v1/files/{id}/download-url", s.requireOrg(s.filesDownloadURL)) + mux.HandleFunc("GET /api/v1/files/{id}/download", s.requireOrg(s.filesDownload)) return withCommonMiddleware(mux) } @@ -274,6 +307,12 @@ func (s *Server) billingCheckoutSession(w http.ResponseWriter, r *http.Request) "plan_code": req.PlanCode, }, }) + _ = s.audit.Record(r.Context(), audit.Event{ + OrganizationID: org.ID, + UserID: user.ID, + Action: "billing_checkout_session_created", + Data: map[string]any{"plan_code": req.PlanCode}, + }) writeJSON(w, http.StatusOK, map[string]string{"url": session.URL}) } @@ -313,6 +352,12 @@ func (s *Server) billingPortalSession(w http.ResponseWriter, r *http.Request) { "organization_id": org.ID, }, }) + _ = s.audit.Record(r.Context(), audit.Event{ + OrganizationID: org.ID, + UserID: user.ID, + Action: "billing_portal_session_created", + Data: map[string]any{}, + }) writeJSON(w, http.StatusOK, map[string]string{"url": session.URL}) } @@ -382,3 +427,238 @@ func writeJSON(w http.ResponseWriter, status int, payload any) { w.WriteHeader(status) _ = json.NewEncoder(w).Encode(payload) } + +func (s *Server) auditEvents(w http.ResponseWriter, r *http.Request) { + org := authOrgFromContext(r.Context()) + if org.ID == "" { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "organization_required"}) + return + } + + reader, ok := s.audit.(audit.Reader) + if !ok { + writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "audit_not_configured"}) + return + } + + events, err := reader.ListByOrganization(r.Context(), org.ID, 50) + if err != nil { + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed_to_list_audit_events"}) + return + } + + writeJSON(w, http.StatusOK, map[string]any{"events": events}) +} + +func (s *Server) filesUploadURL(w http.ResponseWriter, r *http.Request) { + if s.files == nil { + writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "files_not_configured"}) + return + } + + user := authUserFromContext(r.Context()) + org := authOrgFromContext(r.Context()) + if org.ID == "" { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "organization_required"}) + return + } + + var req struct { + Filename string `json:"filename"` + ContentType string `json:"contentType"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid_request_body"}) + return + } + + resp, err := s.files.CreateUploadURL(r.Context(), files.CreateInput{ + OrganizationID: org.ID, + UploaderUserID: user.ID, + Filename: req.Filename, + ContentType: req.ContentType, + }, requestBaseURL(r)) + if err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "failed_to_create_upload_url"}) + return + } + + s.analytics.Track(r.Context(), analytics.Event{ + Name: "file_upload_url_created", + DistinctID: user.ID, + Properties: map[string]any{"organization_id": org.ID}, + }) + _ = s.audit.Record(r.Context(), audit.Event{ + OrganizationID: org.ID, + UserID: user.ID, + Action: "file_upload_url_created", + Data: map[string]any{"filename": req.Filename}, + }) + + writeJSON(w, http.StatusOK, resp) +} + +func (s *Server) filesDirectUpload(w http.ResponseWriter, r *http.Request) { + if s.files == nil { + writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "files_not_configured"}) + return + } + + org := authOrgFromContext(r.Context()) + if org.ID == "" { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "organization_required"}) + return + } + + fileID := strings.TrimSpace(r.PathValue("id")) + if fileID == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "missing_file_id"}) + return + } + + if err := r.ParseMultipartForm(25 * 1024 * 1024); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid_multipart_form"}) + return + } + + f, header, err := r.FormFile("file") + if err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "missing_file"}) + return + } + defer f.Close() + + if err := s.files.HandleDirectUpload(r.Context(), org.ID, fileID, f, header); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "failed_to_upload_file"}) + return + } + + writeJSON(w, http.StatusOK, map[string]string{"status": "uploaded"}) +} + +func (s *Server) filesDownload(w http.ResponseWriter, r *http.Request) { + if s.files == nil { + writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "files_not_configured"}) + return + } + + org := authOrgFromContext(r.Context()) + if org.ID == "" { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "organization_required"}) + return + } + + fileID := strings.TrimSpace(r.PathValue("id")) + if fileID == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "missing_file_id"}) + return + } + + url, err := s.files.GetDownloadURL(r.Context(), org.ID, fileID, requestBaseURL(r)) + if err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "failed_to_get_download_url"}) + return + } + + // Direct download for local disk provider. + if strings.HasPrefix(url, requestBaseURL(r)+"/api/v1/files/") { + if err := s.files.ServeDirectDownload(w, r, org.ID, fileID); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "failed_to_download_file"}) + return + } + return + } + + http.Redirect(w, r, url, http.StatusFound) +} + +func (s *Server) filesDownloadURL(w http.ResponseWriter, r *http.Request) { + if s.files == nil { + writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "files_not_configured"}) + return + } + + org := authOrgFromContext(r.Context()) + if org.ID == "" { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "organization_required"}) + return + } + + fileID := strings.TrimSpace(r.PathValue("id")) + if fileID == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "missing_file_id"}) + return + } + + base := requestBaseURL(r) + url, err := s.files.GetDownloadURL(r.Context(), org.ID, fileID, base) + if err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "failed_to_get_download_url"}) + return + } + + downloadType := "presigned" + if strings.HasPrefix(url, base+"/api/v1/files/") { + downloadType = "direct" + } + + writeJSON(w, http.StatusOK, map[string]any{"url": url, "downloadType": downloadType}) +} + +func (s *Server) filesComplete(w http.ResponseWriter, r *http.Request) { + if s.files == nil { + writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "files_not_configured"}) + return + } + + user := authUserFromContext(r.Context()) + org := authOrgFromContext(r.Context()) + if org.ID == "" { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "organization_required"}) + return + } + + fileID := strings.TrimSpace(r.PathValue("id")) + if fileID == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "missing_file_id"}) + return + } + + var req struct { + SizeBytes int64 `json:"sizeBytes"` + } + _ = json.NewDecoder(r.Body).Decode(&req) + + if err := s.files.MarkUploaded(r.Context(), org.ID, fileID, req.SizeBytes); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "failed_to_mark_uploaded"}) + return + } + + s.analytics.Track(r.Context(), analytics.Event{ + Name: "file_uploaded", + DistinctID: user.ID, + Properties: map[string]any{"organization_id": org.ID}, + }) + _ = s.audit.Record(r.Context(), audit.Event{ + OrganizationID: org.ID, + UserID: user.ID, + Action: "file_uploaded", + Data: map[string]any{"file_id": fileID, "size_bytes": req.SizeBytes}, + }) + + writeJSON(w, http.StatusOK, map[string]string{"status": "uploaded"}) +} + +func requestBaseURL(r *http.Request) string { + proto := strings.TrimSpace(r.Header.Get("X-Forwarded-Proto")) + if proto == "" { + proto = "http" + } + + host := strings.TrimSpace(r.Header.Get("X-Forwarded-Host")) + if host == "" { + host = r.Host + } + + return proto + "://" + host +} diff --git a/backend/internal/audit/audit.go b/backend/internal/audit/audit.go new file mode 100644 index 0000000..71cec4b --- /dev/null +++ b/backend/internal/audit/audit.go @@ -0,0 +1,33 @@ +package audit + +import "context" + +type Event struct { + OrganizationID string + UserID string + Action string + Data map[string]any +} + +type Recorder interface { + Record(ctx context.Context, event Event) error +} + +type Reader interface { + ListByOrganization(ctx context.Context, organizationID string, limit int) ([]EventRecord, error) +} + +type EventRecord struct { + ID string `json:"id"` + OrganizationID string `json:"organizationId"` + UserID string `json:"userId"` + Action string `json:"action"` + Data map[string]any `json:"data"` + CreatedAt string `json:"createdAt"` +} + +type noopRecorder struct{} + +func NewNoop() Recorder { return &noopRecorder{} } + +func (r *noopRecorder) Record(context.Context, Event) error { return nil } diff --git a/backend/internal/audit/db.go b/backend/internal/audit/db.go new file mode 100644 index 0000000..44ce9cb --- /dev/null +++ b/backend/internal/audit/db.go @@ -0,0 +1,100 @@ +package audit + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/jackc/pgx/v5/pgxpool" +) + +type DBRecorder struct { + db *pgxpool.Pool +} + +func NewDBRecorder(db *pgxpool.Pool) *DBRecorder { + return &DBRecorder{db: db} +} + +func (r *DBRecorder) Record(ctx context.Context, event Event) error { + action := strings.TrimSpace(event.Action) + if action == "" { + return fmt.Errorf("missing audit action") + } + + encoded, err := json.Marshal(event.Data) + if err != nil { + return fmt.Errorf("encode audit data: %w", err) + } + + var orgID any + if strings.TrimSpace(event.OrganizationID) != "" { + orgID = strings.TrimSpace(event.OrganizationID) + } + var userID any + if strings.TrimSpace(event.UserID) != "" { + userID = strings.TrimSpace(event.UserID) + } + + _, err = r.db.Exec(ctx, ` + INSERT INTO audit_events (organization_id, user_id, action, data) + VALUES ($1::uuid, $2::uuid, $3, $4::jsonb) + `, orgID, userID, action, string(encoded)) + if err != nil { + return fmt.Errorf("insert audit event: %w", err) + } + return nil +} + +func (r *DBRecorder) ListByOrganization(ctx context.Context, organizationID string, limit int) ([]EventRecord, error) { + if strings.TrimSpace(organizationID) == "" { + return nil, fmt.Errorf("missing organizationID") + } + if limit <= 0 || limit > 200 { + limit = 50 + } + + rows, err := r.db.Query(ctx, ` + SELECT id::text, + COALESCE(organization_id::text, ''), + COALESCE(user_id::text, ''), + action, + data::text, + created_at + FROM audit_events + WHERE organization_id = $1 + ORDER BY created_at DESC + LIMIT $2 + `, organizationID, limit) + if err != nil { + return nil, fmt.Errorf("query audit events: %w", err) + } + defer rows.Close() + + records := []EventRecord{} + for rows.Next() { + var rec EventRecord + var dataText string + var created time.Time + if err := rows.Scan(&rec.ID, &rec.OrganizationID, &rec.UserID, &rec.Action, &dataText, &created); err != nil { + return nil, fmt.Errorf("scan audit event: %w", err) + } + rec.CreatedAt = created.UTC().Format(time.RFC3339) + + var parsed map[string]any + if err := json.Unmarshal([]byte(dataText), &parsed); err == nil && parsed != nil { + rec.Data = parsed + } else { + rec.Data = map[string]any{} + } + + records = append(records, rec) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterate audit events: %w", err) + } + + return records, nil +} diff --git a/backend/internal/auth/auth.go b/backend/internal/auth/auth.go index c8a4276..1bc156d 100644 --- a/backend/internal/auth/auth.go +++ b/backend/internal/auth/auth.go @@ -6,8 +6,11 @@ import ( "fmt" "net/http" "strings" + "time" "github.com/jackc/pgx/v5/pgxpool" + "saas-core-template/backend/internal/audit" + "saas-core-template/backend/internal/jobs" ) var ( @@ -30,6 +33,8 @@ type Provider interface { type Service struct { provider Provider db *pgxpool.Pool + jobs jobs.Enqueuer + audit audit.Recorder } type User struct { @@ -44,10 +49,34 @@ type Organization struct { Role string `json:"role"` } -func NewService(provider Provider, db *pgxpool.Pool) *Service { - return &Service{ +func NewService(provider Provider, db *pgxpool.Pool, opts ...func(*Service)) *Service { + svc := &Service{ provider: provider, db: db, + jobs: nil, + audit: audit.NewNoop(), + } + + for _, opt := range opts { + if opt != nil { + opt(svc) + } + } + + return svc +} + +func WithJobs(enqueuer jobs.Enqueuer) func(*Service) { + return func(s *Service) { + s.jobs = enqueuer + } +} + +func WithAudit(recorder audit.Recorder) func(*Service) { + return func(s *Service) { + if recorder != nil { + s.audit = recorder + } } } @@ -71,7 +100,7 @@ func (s *Service) Authenticate(ctx context.Context, token string) (User, error) return User{}, fmt.Errorf("verify token: %w", err) } - userID, err := s.ensureUserIdentity(ctx, principal) + userID, created, err := s.ensureUserIdentity(ctx, principal) if err != nil { return User{}, err } @@ -85,13 +114,30 @@ func (s *Service) Authenticate(ctx context.Context, token string) (User, error) return User{}, err } + if created { + _ = s.audit.Record(ctx, audit.Event{ + UserID: user.ID, + Action: "user_created", + Data: map[string]any{"primary_email": user.PrimaryEmail, "provider": principal.Provider}, + }) + + if s.jobs != nil && strings.TrimSpace(user.PrimaryEmail) != "" { + _, _ = s.jobs.Enqueue(ctx, "send_email", map[string]any{ + "kind": "welcome", + "to": user.PrimaryEmail, + "subject": "Welcome", + "text": "Welcome to the app. You're set up and ready to go.", + }, time.Now().UTC()) + } + } + return user, nil } -func (s *Service) ensureUserIdentity(ctx context.Context, principal VerifiedPrincipal) (string, error) { +func (s *Service) ensureUserIdentity(ctx context.Context, principal VerifiedPrincipal) (string, bool, error) { tx, err := s.db.Begin(ctx) if err != nil { - return "", fmt.Errorf("begin tx: %w", err) + return "", false, fmt.Errorf("begin tx: %w", err) } defer tx.Rollback(ctx) @@ -108,21 +154,21 @@ func (s *Service) ensureUserIdentity(ctx context.Context, principal VerifiedPrin email_verified_at = CASE WHEN $2 THEN now() ELSE email_verified_at END, updated_at = now() WHERE provider = $3 AND provider_user_id = $4 - `, emptyToNil(principal.PrimaryEmail), principal.EmailVerified, principal.Provider, principal.ProviderUserID); err != nil { - return "", fmt.Errorf("update identity: %w", err) + `, emptyToNil(principal.PrimaryEmail), principal.EmailVerified, principal.Provider, principal.ProviderUserID); err != nil { + return "", false, fmt.Errorf("update identity: %w", err) } if principal.PrimaryEmail != "" { if _, err := tx.Exec(ctx, `UPDATE users SET primary_email = $1, updated_at = now() WHERE id = $2`, principal.PrimaryEmail, userID); err != nil { - return "", fmt.Errorf("update user email: %w", err) + return "", false, fmt.Errorf("update user email: %w", err) } } if err := tx.Commit(ctx); err != nil { - return "", fmt.Errorf("commit existing identity: %w", err) + return "", false, fmt.Errorf("commit existing identity: %w", err) } - return userID, nil + return userID, false, nil } // Create new user and identity mapping when no existing identity is found. @@ -131,21 +177,21 @@ func (s *Service) ensureUserIdentity(ctx context.Context, principal VerifiedPrin VALUES ($1) RETURNING id::text `, emptyToNil(principal.PrimaryEmail)).Scan(&userID); err != nil { - return "", fmt.Errorf("insert user: %w", err) + return "", false, fmt.Errorf("insert user: %w", err) } if _, err := tx.Exec(ctx, ` INSERT INTO auth_identities (user_id, provider, provider_user_id, provider_email, email_verified_at) VALUES ($1, $2, $3, $4, CASE WHEN $5 THEN now() ELSE NULL END) `, userID, principal.Provider, principal.ProviderUserID, emptyToNil(principal.PrimaryEmail), principal.EmailVerified); err != nil { - return "", fmt.Errorf("insert identity: %w", err) + return "", false, fmt.Errorf("insert identity: %w", err) } if err := tx.Commit(ctx); err != nil { - return "", fmt.Errorf("commit new identity: %w", err) + return "", false, fmt.Errorf("commit new identity: %w", err) } - return userID, nil + return userID, true, nil } func (s *Service) ResolveOrganization(ctx context.Context, userID string, requestedOrgID string) (Organization, error) { diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index e305413..69fb50b 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -3,6 +3,8 @@ package config import ( "fmt" "os" + "strconv" + "time" ) type Config struct { @@ -25,6 +27,23 @@ type Config struct { PostHogProjectKey string PostHogHost string + EmailProvider string + EmailFrom string + ResendAPIKey string + + JobsEnabled bool + JobsWorkerID string + JobsPollInterval time.Duration + + FileStorageProvider string + FileStorageDiskPath string + S3Bucket string + S3Region string + S3Endpoint string + S3AccessKeyID string + S3SecretAccessKey string + S3ForcePathStyle bool + ClerkSecretKey string ClerkAPIURL string StripeSecretKey string @@ -56,6 +75,23 @@ func Load() (Config, error) { PostHogProjectKey: os.Getenv("POSTHOG_PROJECT_KEY"), PostHogHost: getEnv("POSTHOG_HOST", "https://app.posthog.com"), + EmailProvider: getEnv("EMAIL_PROVIDER", "console"), + EmailFrom: getEnv("EMAIL_FROM", ""), + ResendAPIKey: os.Getenv("RESEND_API_KEY"), + + JobsEnabled: getEnvBool("JOBS_ENABLED", true), + JobsWorkerID: getEnv("JOBS_WORKER_ID", "local"), + JobsPollInterval: getEnvDuration("JOBS_POLL_INTERVAL", 1*time.Second), + + FileStorageProvider: getEnv("FILE_STORAGE_PROVIDER", "disk"), + FileStorageDiskPath: getEnv("FILE_STORAGE_DISK_PATH", "./.data/uploads"), + S3Bucket: getEnv("S3_BUCKET", ""), + S3Region: getEnv("S3_REGION", "auto"), + S3Endpoint: getEnv("S3_ENDPOINT", ""), + S3AccessKeyID: getEnv("S3_ACCESS_KEY_ID", ""), + S3SecretAccessKey: getEnv("S3_SECRET_ACCESS_KEY", ""), + S3ForcePathStyle: getEnvBool("S3_FORCE_PATH_STYLE", true), + ClerkSecretKey: os.Getenv("CLERK_SECRET_KEY"), ClerkAPIURL: getEnv("CLERK_API_URL", ""), StripeSecretKey: os.Getenv("STRIPE_SECRET_KEY"), @@ -85,3 +121,27 @@ func getEnv(key string, fallback string) string { return value } + +func getEnvBool(key string, fallback bool) bool { + value := os.Getenv(key) + if value == "" { + return fallback + } + parsed, err := strconv.ParseBool(value) + if err != nil { + return fallback + } + return parsed +} + +func getEnvDuration(key string, fallback time.Duration) time.Duration { + value := os.Getenv(key) + if value == "" { + return fallback + } + parsed, err := time.ParseDuration(value) + if err != nil { + return fallback + } + return parsed +} diff --git a/backend/internal/email/email.go b/backend/internal/email/email.go new file mode 100644 index 0000000..d38d0c5 --- /dev/null +++ b/backend/internal/email/email.go @@ -0,0 +1,51 @@ +package email + +import ( + "context" + "fmt" + "log/slog" + "strings" +) + +type Message struct { + To string + From string + Subject string + Text string + HTML string +} + +type Sender interface { + Send(ctx context.Context, msg Message) error +} + +type noopSender struct{} + +func NewNoop() Sender { return &noopSender{} } + +func (s *noopSender) Send(context.Context, Message) error { return nil } + +type ConsoleSender struct{} + +func NewConsole() Sender { return &ConsoleSender{} } + +func (s *ConsoleSender) Send(_ context.Context, msg Message) error { + slog.Info("email send", "to", msg.To, "from", msg.From, "subject", msg.Subject, "text_len", len(msg.Text), "html_len", len(msg.HTML)) + return nil +} + +func ValidateMessage(msg Message) error { + if strings.TrimSpace(msg.To) == "" { + return fmt.Errorf("missing To") + } + if strings.TrimSpace(msg.From) == "" { + return fmt.Errorf("missing From") + } + if strings.TrimSpace(msg.Subject) == "" { + return fmt.Errorf("missing Subject") + } + if strings.TrimSpace(msg.Text) == "" && strings.TrimSpace(msg.HTML) == "" { + return fmt.Errorf("missing body") + } + return nil +} diff --git a/backend/internal/email/resend.go b/backend/internal/email/resend.go new file mode 100644 index 0000000..0671044 --- /dev/null +++ b/backend/internal/email/resend.go @@ -0,0 +1,66 @@ +package email + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" +) + +type ResendSender struct { + apiKey string + client *http.Client +} + +func NewResend(apiKey string) *ResendSender { + return &ResendSender{ + apiKey: strings.TrimSpace(apiKey), + client: &http.Client{Timeout: 10 * time.Second}, + } +} + +func (s *ResendSender) Send(ctx context.Context, msg Message) error { + if err := ValidateMessage(msg); err != nil { + return err + } + if s.apiKey == "" { + return fmt.Errorf("missing Resend API key") + } + + payload := map[string]any{ + "from": msg.From, + "to": []string{msg.To}, + "subject": msg.Subject, + } + if strings.TrimSpace(msg.HTML) != "" { + payload["html"] = msg.HTML + } + if strings.TrimSpace(msg.Text) != "" { + payload["text"] = msg.Text + } + + body, _ := json.Marshal(payload) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "https://api.resend.com/emails", bytes.NewReader(body)) + if err != nil { + return fmt.Errorf("build resend request: %w", err) + } + req.Header.Set("Authorization", "Bearer "+s.apiKey) + req.Header.Set("Content-Type", "application/json") + + resp, err := s.client.Do(req) + if err != nil { + return fmt.Errorf("call resend: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + b, _ := io.ReadAll(io.LimitReader(resp.Body, 2048)) + return fmt.Errorf("resend status %d: %s", resp.StatusCode, strings.TrimSpace(string(b))) + } + + return nil +} diff --git a/backend/internal/files/files.go b/backend/internal/files/files.go new file mode 100644 index 0000000..85463b4 --- /dev/null +++ b/backend/internal/files/files.go @@ -0,0 +1,257 @@ +package files + +import ( + "context" + "fmt" + "io" + "mime/multipart" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/jackc/pgx/v5/pgxpool" +) + +type UploadURLResponse struct { + FileID string `json:"fileId"` + Method string `json:"method"` + URL string `json:"url"` + Headers map[string]string `json:"headers"` + UploadType string `json:"uploadType"` // "direct" or "presigned" +} + +type Service struct { + db *pgxpool.Pool + + provider string + diskPath string + + s3 *S3Provider +} + +type Config struct { + Provider string + DiskPath string + S3 *S3Provider +} + +func NewService(db *pgxpool.Pool, cfg Config) *Service { + return &Service{ + db: db, + provider: strings.ToLower(strings.TrimSpace(defaultString(cfg.Provider, "disk"))), + diskPath: strings.TrimSpace(defaultString(cfg.DiskPath, "./.data/uploads")), + s3: cfg.S3, + } +} + +type CreateInput struct { + OrganizationID string + UploaderUserID string + Filename string + ContentType string +} + +func (s *Service) CreateUploadURL(ctx context.Context, input CreateInput, apiBaseURL string) (UploadURLResponse, error) { + filename := strings.TrimSpace(input.Filename) + contentType := strings.TrimSpace(input.ContentType) + if filename == "" { + return UploadURLResponse{}, fmt.Errorf("filename is required") + } + if contentType == "" { + contentType = "application/octet-stream" + } + + provider := s.provider + if provider == "" { + provider = "disk" + } + + var id string + storageKey := buildStorageKey(input.OrganizationID, filename) + if err := s.db.QueryRow(ctx, ` + INSERT INTO file_objects (organization_id, uploader_user_id, filename, content_type, provider, storage_key, status) + VALUES ($1::uuid, NULLIF($2, '')::uuid, $3, $4, $5, $6, 'pending') + RETURNING id::text + `, input.OrganizationID, input.UploaderUserID, filename, contentType, provider, storageKey).Scan(&id); err != nil { + return UploadURLResponse{}, fmt.Errorf("insert file object: %w", err) + } + + switch provider { + case "disk": + return UploadURLResponse{ + FileID: id, + Method: http.MethodPost, + URL: strings.TrimRight(apiBaseURL, "/") + "/api/v1/files/" + id + "/upload", + Headers: map[string]string{}, + UploadType: "direct", + }, nil + case "s3": + if s.s3 == nil { + return UploadURLResponse{}, fmt.Errorf("s3 not configured") + } + + url, headers, err := s.s3.PresignPut(ctx, storageKey, contentType, 10*time.Minute) + if err != nil { + return UploadURLResponse{}, err + } + + return UploadURLResponse{ + FileID: id, + Method: http.MethodPut, + URL: url, + Headers: headers, + UploadType: "presigned", + }, nil + default: + return UploadURLResponse{}, fmt.Errorf("unknown FILE_STORAGE_PROVIDER %q (expected disk|s3)", provider) + } +} + +func (s *Service) HandleDirectUpload(ctx context.Context, organizationID string, fileID string, file multipart.File, header *multipart.FileHeader) error { + record, err := s.getFile(ctx, organizationID, fileID) + if err != nil { + return err + } + if record.Provider != "disk" { + return fmt.Errorf("direct upload not supported for provider %q", record.Provider) + } + + if err := os.MkdirAll(s.diskPath, 0o755); err != nil { + return fmt.Errorf("create upload dir: %w", err) + } + + targetPath := filepath.Join(s.diskPath, filepath.FromSlash(record.StorageKey)) + if err := os.MkdirAll(filepath.Dir(targetPath), 0o755); err != nil { + return fmt.Errorf("create upload path: %w", err) + } + + tmpPath := targetPath + ".tmp" + out, err := os.Create(tmpPath) + if err != nil { + return fmt.Errorf("create file: %w", err) + } + defer out.Close() + + written, err := io.Copy(out, io.LimitReader(file, 25*1024*1024)) + if err != nil { + return fmt.Errorf("write file: %w", err) + } + if err := out.Close(); err != nil { + return fmt.Errorf("close file: %w", err) + } + if err := os.Rename(tmpPath, targetPath); err != nil { + return fmt.Errorf("commit file: %w", err) + } + + _, err = s.db.Exec(ctx, ` + UPDATE file_objects + SET status = 'uploaded', + size_bytes = $1, + updated_at = now() + WHERE id = $2 AND organization_id = $3 + `, written, fileID, organizationID) + if err != nil { + return fmt.Errorf("update file status: %w", err) + } + + _ = header + return nil +} + +func (s *Service) MarkUploaded(ctx context.Context, organizationID string, fileID string, sizeBytes int64) error { + // Allow marking s3 uploads as complete after a successful presigned PUT. + _, err := s.db.Exec(ctx, ` + UPDATE file_objects + SET status = 'uploaded', + size_bytes = NULLIF($1, 0), + updated_at = now() + WHERE id = $2 AND organization_id = $3 + `, sizeBytes, fileID, organizationID) + if err != nil { + return fmt.Errorf("mark uploaded: %w", err) + } + return nil +} + +func (s *Service) GetDownloadURL(ctx context.Context, organizationID string, fileID string, apiBaseURL string) (string, error) { + record, err := s.getFile(ctx, organizationID, fileID) + if err != nil { + return "", err + } + + switch record.Provider { + case "disk": + return strings.TrimRight(apiBaseURL, "/") + "/api/v1/files/" + fileID + "/download", nil + case "s3": + if s.s3 == nil { + return "", fmt.Errorf("s3 not configured") + } + url, err := s.s3.PresignGet(ctx, record.StorageKey, 10*time.Minute) + if err != nil { + return "", err + } + return url, nil + default: + return "", fmt.Errorf("unknown provider %q", record.Provider) + } +} + +func (s *Service) ServeDirectDownload(w http.ResponseWriter, r *http.Request, organizationID string, fileID string) error { + record, err := s.getFile(r.Context(), organizationID, fileID) + if err != nil { + return err + } + if record.Provider != "disk" { + return fmt.Errorf("direct download not supported for provider %q", record.Provider) + } + + path := filepath.Join(s.diskPath, filepath.FromSlash(record.StorageKey)) + w.Header().Set("Content-Type", record.ContentType) + w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", record.Filename)) + http.ServeFile(w, r, path) + return nil +} + +type fileRecord struct { + ID string + Provider string + StorageKey string + Filename string + ContentType string +} + +func (s *Service) getFile(ctx context.Context, organizationID string, fileID string) (fileRecord, error) { + var rec fileRecord + if err := s.db.QueryRow(ctx, ` + SELECT id::text, provider, storage_key, filename, content_type + FROM file_objects + WHERE id = $1 AND organization_id = $2 + `, fileID, organizationID).Scan(&rec.ID, &rec.Provider, &rec.StorageKey, &rec.Filename, &rec.ContentType); err != nil { + return fileRecord{}, fmt.Errorf("file not found") + } + rec.Provider = strings.ToLower(strings.TrimSpace(rec.Provider)) + return rec, nil +} + +func buildStorageKey(organizationID string, filename string) string { + // Use a stable prefix per org and keep filename sanitized for readability. + clean := strings.TrimSpace(filename) + clean = strings.ReplaceAll(clean, "..", "_") + clean = strings.ReplaceAll(clean, "/", "_") + clean = strings.ReplaceAll(clean, "\\", "_") + if clean == "" { + clean = "upload.bin" + } + + return path.Join("org", strings.TrimSpace(organizationID), fmt.Sprintf("%d-%s", time.Now().UTC().UnixNano(), clean)) +} + +func defaultString(value string, fallback string) string { + if strings.TrimSpace(value) == "" { + return fallback + } + return strings.TrimSpace(value) +} diff --git a/backend/internal/files/s3.go b/backend/internal/files/s3.go new file mode 100644 index 0000000..d973f97 --- /dev/null +++ b/backend/internal/files/s3.go @@ -0,0 +1,108 @@ +package files + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/s3" +) + +type S3Provider struct { + bucket string + client *s3.Client + presign *s3.PresignClient +} + +type S3Config struct { + Bucket string + Region string + Endpoint string + AccessKeyID string + SecretAccessKey string + ForcePathStyle bool +} + +func NewS3Provider(ctx context.Context, cfg S3Config) (*S3Provider, error) { + bucket := strings.TrimSpace(cfg.Bucket) + if bucket == "" { + return nil, fmt.Errorf("missing S3_BUCKET") + } + + loadOpts := []func(*config.LoadOptions) error{ + config.WithRegion(strings.TrimSpace(defaultString(cfg.Region, "auto"))), + } + + if strings.TrimSpace(cfg.AccessKeyID) != "" && strings.TrimSpace(cfg.SecretAccessKey) != "" { + loadOpts = append(loadOpts, config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( + strings.TrimSpace(cfg.AccessKeyID), + strings.TrimSpace(cfg.SecretAccessKey), + "", + ))) + } + + awsCfg, err := config.LoadDefaultConfig(ctx, loadOpts...) + if err != nil { + return nil, fmt.Errorf("load aws config: %w", err) + } + + clientOpts := []func(*s3.Options){} + if endpoint := strings.TrimSpace(cfg.Endpoint); endpoint != "" { + clientOpts = append(clientOpts, func(o *s3.Options) { + o.BaseEndpoint = aws.String(endpoint) + }) + } + if cfg.ForcePathStyle { + clientOpts = append(clientOpts, func(o *s3.Options) { + o.UsePathStyle = true + }) + } + + s3Client := s3.NewFromConfig(awsCfg, clientOpts...) + p := s3.NewPresignClient(s3Client) + + return &S3Provider{bucket: bucket, client: s3Client, presign: p}, nil +} + +func (p *S3Provider) PresignPut(ctx context.Context, key string, contentType string, ttl time.Duration) (string, map[string]string, error) { + if strings.TrimSpace(key) == "" { + return "", nil, fmt.Errorf("missing key") + } + if ttl <= 0 { + ttl = 10 * time.Minute + } + + out, err := p.presign.PresignPutObject(ctx, &s3.PutObjectInput{ + Bucket: aws.String(p.bucket), + Key: aws.String(key), + ContentType: aws.String(contentType), + }, s3.WithPresignExpires(ttl)) + if err != nil { + return "", nil, fmt.Errorf("presign put: %w", err) + } + + return out.URL, map[string]string{"Content-Type": contentType}, nil +} + +func (p *S3Provider) PresignGet(ctx context.Context, key string, ttl time.Duration) (string, error) { + if strings.TrimSpace(key) == "" { + return "", fmt.Errorf("missing key") + } + if ttl <= 0 { + ttl = 10 * time.Minute + } + + out, err := p.presign.PresignGetObject(ctx, &s3.GetObjectInput{ + Bucket: aws.String(p.bucket), + Key: aws.String(key), + }, s3.WithPresignExpires(ttl)) + if err != nil { + return "", fmt.Errorf("presign get: %w", err) + } + + return out.URL, nil +} diff --git a/backend/internal/jobs/jobs.go b/backend/internal/jobs/jobs.go new file mode 100644 index 0000000..306a7eb --- /dev/null +++ b/backend/internal/jobs/jobs.go @@ -0,0 +1,192 @@ +package jobs + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" +) + +type Enqueuer interface { + Enqueue(ctx context.Context, jobType string, payload any, runAt time.Time) (string, error) +} + +type Store struct { + db *pgxpool.Pool +} + +func NewStore(db *pgxpool.Pool) *Store { + return &Store{db: db} +} + +func (s *Store) Enqueue(ctx context.Context, jobType string, payload any, runAt time.Time) (string, error) { + encoded, err := json.Marshal(payload) + if err != nil { + return "", fmt.Errorf("marshal job payload: %w", err) + } + + var id string + if err := s.db.QueryRow(ctx, ` + INSERT INTO jobs (type, payload, status, run_at) + VALUES ($1, $2::jsonb, 'queued', $3) + RETURNING id::text + `, jobType, string(encoded), runAt.UTC()).Scan(&id); err != nil { + return "", fmt.Errorf("insert job: %w", err) + } + + return id, nil +} + +type Job struct { + ID string + Type string + PayloadJSON []byte + Attempts int + MaxAttempts int +} + +type Claimer struct { + db *pgxpool.Pool + workerID string + lockTTL time.Duration + maxJitter time.Duration +} + +type ClaimerConfig struct { + WorkerID string + LockTTL time.Duration +} + +func NewClaimer(db *pgxpool.Pool, cfg ClaimerConfig) *Claimer { + lockTTL := cfg.LockTTL + if lockTTL <= 0 { + lockTTL = 5 * time.Minute + } + + return &Claimer{ + db: db, + workerID: cfg.WorkerID, + lockTTL: lockTTL, + } +} + +func (c *Claimer) ClaimNext(ctx context.Context) (*Job, error) { + tx, err := c.db.Begin(ctx) + if err != nil { + return nil, fmt.Errorf("begin claim tx: %w", err) + } + defer tx.Rollback(ctx) + + var job Job + err = tx.QueryRow(ctx, ` + WITH next_job AS ( + SELECT id + FROM jobs + WHERE status = 'queued' + AND run_at <= now() + AND (locked_until IS NULL OR locked_until < now()) + ORDER BY run_at ASC, created_at ASC + LIMIT 1 + FOR UPDATE SKIP LOCKED + ) + UPDATE jobs + SET status = 'processing', + attempts = attempts + 1, + locked_until = now() + ($1::int * interval '1 second'), + locked_by = $2, + updated_at = now() + WHERE id IN (SELECT id FROM next_job) + RETURNING id::text, type, payload::text, attempts, max_attempts + `, int(c.lockTTL.Seconds()), c.workerID).Scan(&job.ID, &job.Type, &job.PayloadJSON, &job.Attempts, &job.MaxAttempts) + + if errors.Is(err, pgx.ErrNoRows) { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("claim job: %w", err) + } + + if err := tx.Commit(ctx); err != nil { + return nil, fmt.Errorf("commit claim tx: %w", err) + } + + return &job, nil +} + +func (c *Claimer) Complete(ctx context.Context, jobID string) error { + _, err := c.db.Exec(ctx, ` + UPDATE jobs + SET status = 'done', + locked_until = NULL, + locked_by = NULL, + updated_at = now() + WHERE id = $1 + `, jobID) + if err != nil { + return fmt.Errorf("complete job: %w", err) + } + return nil +} + +type FailureInput struct { + JobID string + Attempts int + MaxAttempts int + Err error +} + +func (c *Claimer) Fail(ctx context.Context, input FailureInput) error { + status := "queued" + nextRunAt := time.Now().UTC().Add(backoff(input.Attempts)) + if input.Attempts >= input.MaxAttempts { + status = "failed" + nextRunAt = time.Now().UTC() + } + + lastErr := "" + if input.Err != nil { + lastErr = input.Err.Error() + if len(lastErr) > 2000 { + lastErr = lastErr[:2000] + } + } + + _, err := c.db.Exec(ctx, ` + UPDATE jobs + SET status = $1, + run_at = $2, + locked_until = NULL, + locked_by = NULL, + last_error = $3, + updated_at = now() + WHERE id = $4 + `, status, nextRunAt, lastErr, input.JobID) + if err != nil { + return fmt.Errorf("fail job: %w", err) + } + return nil +} + +func backoff(attempt int) time.Duration { + // attempt is 1-based here, because we increment attempts on claim. + if attempt <= 1 { + return 5 * time.Second + } + + delay := time.Duration(1< 10*time.Minute { + delay = 10 * time.Minute + } + return delay +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/backend/migrations/0002_jobs_audit_files.down.sql b/backend/migrations/0002_jobs_audit_files.down.sql new file mode 100644 index 0000000..1eff409 --- /dev/null +++ b/backend/migrations/0002_jobs_audit_files.down.sql @@ -0,0 +1,11 @@ +DROP INDEX IF EXISTS idx_file_objects_org_created_at; +DROP TABLE IF EXISTS file_objects; + +DROP INDEX IF EXISTS idx_audit_events_user_created_at; +DROP INDEX IF EXISTS idx_audit_events_org_created_at; +DROP TABLE IF EXISTS audit_events; + +DROP INDEX IF EXISTS idx_jobs_locked_until; +DROP INDEX IF EXISTS idx_jobs_status_run_at; +DROP TABLE IF EXISTS jobs; + diff --git a/backend/migrations/0002_jobs_audit_files.up.sql b/backend/migrations/0002_jobs_audit_files.up.sql new file mode 100644 index 0000000..cee6a06 --- /dev/null +++ b/backend/migrations/0002_jobs_audit_files.up.sql @@ -0,0 +1,46 @@ +CREATE TABLE IF NOT EXISTS jobs ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + type TEXT NOT NULL, + payload JSONB NOT NULL DEFAULT '{}'::jsonb, + status TEXT NOT NULL DEFAULT 'queued', + run_at TIMESTAMPTZ NOT NULL DEFAULT now(), + attempts INTEGER NOT NULL DEFAULT 0, + max_attempts INTEGER NOT NULL DEFAULT 10, + locked_until TIMESTAMPTZ, + locked_by TEXT, + last_error TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_jobs_status_run_at ON jobs(status, run_at); +CREATE INDEX IF NOT EXISTS idx_jobs_locked_until ON jobs(locked_until); + +CREATE TABLE IF NOT EXISTS audit_events ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + organization_id UUID REFERENCES organizations(id) ON DELETE CASCADE, + user_id UUID REFERENCES users(id) ON DELETE SET NULL, + action TEXT NOT NULL, + data JSONB NOT NULL DEFAULT '{}'::jsonb, + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_audit_events_org_created_at ON audit_events(organization_id, created_at DESC); +CREATE INDEX IF NOT EXISTS idx_audit_events_user_created_at ON audit_events(user_id, created_at DESC); + +CREATE TABLE IF NOT EXISTS file_objects ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + organization_id UUID NOT NULL REFERENCES organizations(id) ON DELETE CASCADE, + uploader_user_id UUID REFERENCES users(id) ON DELETE SET NULL, + filename TEXT NOT NULL, + content_type TEXT NOT NULL, + size_bytes BIGINT, + provider TEXT NOT NULL DEFAULT 'disk', + storage_key TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_file_objects_org_created_at ON file_objects(organization_id, created_at DESC); + diff --git a/docs/README.md b/docs/README.md index d20830e..0614b27 100644 --- a/docs/README.md +++ b/docs/README.md @@ -32,6 +32,14 @@ This directory contains implementation playbooks for contributors and AI agents. - Baseline controls and evidence expectations. - [Production Setup Checklist](operations/production-setup-checklist.md) - End-to-end deployment wiring (Render + Vercel + providers). +- [Background Jobs](operations/background-jobs.md) + - Postgres-backed job queue and worker process. +- [Email](operations/email.md) + - Transactional email adapter and Resend configuration. +- [File Uploads](operations/file-uploads.md) + - Disk and S3/R2 upload configurations. +- [Audit Logs](operations/audit-logs.md) + - Audit events table and API. - [Observability (OpenTelemetry)](operations/observability.md) - Local tracing collector and production export configuration. - [Product Analytics (PostHog)](operations/product-analytics.md) diff --git a/docs/operations/agent-workflow.md b/docs/operations/agent-workflow.md index 6218528..d8f9c6f 100644 --- a/docs/operations/agent-workflow.md +++ b/docs/operations/agent-workflow.md @@ -41,6 +41,7 @@ This runbook defines the standard operating flow for AI-agent-assisted developme - Confirm no secrets were committed while adding integration variables. - Confirm i18n defaults render correctly (language switcher changes locale without breaking SSR pages). - For UI work, prefer shadcn/ui primitives in `frontend/components/ui/` and avoid one-off styling. + - If adding background jobs/email, verify local worker runs (`make dev-worker`) and production checklist is updated. ## 5) Documentation and traceability diff --git a/docs/operations/audit-logs.md b/docs/operations/audit-logs.md new file mode 100644 index 0000000..9c9f6b2 --- /dev/null +++ b/docs/operations/audit-logs.md @@ -0,0 +1,20 @@ +# Audit Logs + +This template includes an `audit_events` table for security-relevant and business-relevant actions. + +## Local development + +Audit events are written to Postgres. + +## API + +- `GET /api/v1/audit/events` (org-scoped) returns recent audit events for the active organization. + +## What should be audited + +At minimum: + +- Identity and access changes (sign-in, role changes, invites) +- Billing actions (checkout/portal sessions, subscription changes) +- File uploads and sensitive operations + diff --git a/docs/operations/background-jobs.md b/docs/operations/background-jobs.md new file mode 100644 index 0000000..6de40df --- /dev/null +++ b/docs/operations/background-jobs.md @@ -0,0 +1,24 @@ +# Background Jobs + +This template includes a minimal Postgres-backed job queue and a separate worker process. + +## Local development + +1. Start infra: `make infra-up` +2. Run API: `make dev-api` +3. Run worker in another terminal: `make dev-worker` + +Jobs are stored in the `jobs` table and claimed with `FOR UPDATE SKIP LOCKED`. + +## Configuration + +Backend env vars: + +- `JOBS_ENABLED=true|false` +- `JOBS_WORKER_ID=` (worker identity) +- `JOBS_POLL_INTERVAL=1s` (poll interval) + +## Current job types + +- `send_email`: sends a transactional email using the configured email provider. + diff --git a/docs/operations/email.md b/docs/operations/email.md new file mode 100644 index 0000000..e5e5709 --- /dev/null +++ b/docs/operations/email.md @@ -0,0 +1,25 @@ +# Email + +This template supports sending transactional emails via a provider adapter. + +## Local development + +Defaults are local-first: + +- `EMAIL_PROVIDER=console` logs emails instead of sending. + +## Production (Resend) + +Set backend env vars: + +- `EMAIL_PROVIDER=resend` +- `EMAIL_FROM=">` +- `RESEND_API_KEY=` + +Email sending is executed via background jobs (see `background-jobs.md`). + +## Provider boundaries + +- Keep provider-specific API calls inside `backend/internal/email/*`. +- Use jobs to ensure email sending is retryable and non-blocking. + diff --git a/docs/operations/file-uploads.md b/docs/operations/file-uploads.md new file mode 100644 index 0000000..36f8c8d --- /dev/null +++ b/docs/operations/file-uploads.md @@ -0,0 +1,30 @@ +# File Uploads + +This template supports tenant-scoped file uploads via an adapter boundary. + +## Local development (disk) + +Defaults: + +- `FILE_STORAGE_PROVIDER=disk` +- `FILE_STORAGE_DISK_PATH=./.data/uploads` (relative to `backend/` when running `make dev-api`) + +Uploads use a direct API endpoint: + +- `POST /api/v1/files/upload-url` → returns an upload URL +- `POST /api/v1/files/{id}/upload` → multipart upload (disk provider) +- `POST /api/v1/files/{id}/complete` → marks presigned uploads complete (S3/R2) + +## Production (S3 / R2) + +Set: + +- `FILE_STORAGE_PROVIDER=s3` +- `S3_BUCKET=` +- `S3_REGION=` (R2 uses `auto`) +- `S3_ENDPOINT=` (required for R2) +- `S3_ACCESS_KEY_ID` +- `S3_SECRET_ACCESS_KEY` +- `S3_FORCE_PATH_STYLE=true` (usually required for R2) + +Uploads use presigned PUT URLs, so the frontend uploads directly to object storage. diff --git a/docs/operations/production-setup-checklist.md b/docs/operations/production-setup-checklist.md index 4715eef..28d1f54 100644 --- a/docs/operations/production-setup-checklist.md +++ b/docs/operations/production-setup-checklist.md @@ -3,9 +3,9 @@ This checklist wires the template end-to-end in production with: - Frontend on Vercel -- Backend + Postgres on Render +- Backend + Worker + Postgres on Render - Redis on Upstash -- Providers: Clerk (auth), Stripe (billing), PostHog (analytics), Sentry (error reporting), Crisp (support), Grafana Cloud (telemetry) +- Providers: Clerk (auth), Stripe (billing), Resend (email), PostHog (analytics), Sentry (error reporting), Crisp (support), Grafana Cloud (telemetry) Local development should continue to work with console/noop defaults and Docker Compose infra. @@ -13,13 +13,14 @@ Local development should continue to work with console/noop defaults and Docker - Clerk: create an application. - Stripe: create an account and products/prices. +- Resend: create an account, verify domain/sender, create API key. - Upstash: create a Redis database. - PostHog: create a project. - Sentry: create a project (frontend + backend can share or be separate). - Crisp: create a website. - Grafana Cloud: create a stack with OTLP endpoint + API token. -## 1) Deploy backend + Postgres (Render) +## 1) Deploy backend + worker + Postgres (Render) 1. In Render, create services from `render.yaml`. 2. Confirm backend service is reachable (Render service URL): @@ -58,10 +59,36 @@ Set these in the backend service: - `ANALYTICS_PROVIDER=posthog` - `POSTHOG_PROJECT_KEY` - `POSTHOG_HOST=https://app.posthog.com` (or your host) +- File uploads (S3 / R2) + - `FILE_STORAGE_PROVIDER=s3` + - `S3_BUCKET` + - `S3_REGION` (R2 uses `auto`) + - `S3_ENDPOINT` (R2 required) + - `S3_ACCESS_KEY_ID` + - `S3_SECRET_ACCESS_KEY` + - `S3_FORCE_PATH_STYLE=true` + +### Worker env vars (Render) + +The worker service runs background jobs (emails, future async tasks). Configure: + +- Jobs + - `JOBS_ENABLED=true` + - `JOBS_WORKER_ID=render` + - `JOBS_POLL_INTERVAL=1s` +- Email (Resend) + - `EMAIL_PROVIDER=resend` + - `EMAIL_FROM=` + - `RESEND_API_KEY=` +- Observability / error reporting + - `OTEL_*` and `SENTRY_*` as above ### Database migration (Render Postgres) -Apply `backend/migrations/0001_identity_tenancy_billing.up.sql` against the Render Postgres database before using auth/billing endpoints. +Apply migrations in order against Render Postgres before using auth/billing/files endpoints: + +- `backend/migrations/0001_identity_tenancy_billing.up.sql` +- `backend/migrations/0002_jobs_audit_files.up.sql` ## 2) Deploy frontend (Vercel) @@ -134,4 +161,6 @@ Apply `backend/migrations/0001_identity_tenancy_billing.up.sql` against the Rend - Sentry captures test error (optional). - Crisp widget loads (optional). - Grafana Cloud receives traces (optional). - +6. Background jobs: + - Trigger a new-user sign-in to enqueue a welcome email job. + - Confirm worker processes the job (Resend in prod, console locally). diff --git a/docs/overview.md b/docs/overview.md index 863183e..3ce5660 100644 --- a/docs/overview.md +++ b/docs/overview.md @@ -26,10 +26,15 @@ This doc is a “start here” guide for understanding the template quickly. - `backend/` - `cmd/api/main.go`: composition root (config, DB/Redis, providers, server start). + - `cmd/worker/main.go`: background worker (jobs, email sending). - `internal/api/`: HTTP transport (routes, middleware, JSON responses). - `internal/auth/`: auth provider adapter + identity mapping + org resolution. - `internal/billing/`: billing provider adapter + webhook handling + subscription state. - `internal/analytics/`: backend analytics adapter boundary (console/PostHog/noop). + - `internal/jobs/`: Postgres-backed job queue. + - `internal/email/`: email provider adapter (console/Resend/noop). + - `internal/files/`: file upload adapter (disk / S3 presign). + - `internal/audit/`: audit events recorder and reader. - `internal/telemetry/`: OpenTelemetry init and exporter selection. - `internal/errorreporting/`: backend error reporting adapter (console/Sentry/noop). - `migrations/`: SQL migrations (identity, tenancy, billing tables). @@ -72,4 +77,3 @@ This doc is a “start here” guide for understanding the template quickly. - auth and billing endpoints return “not configured” errors - telemetry/analytics/error reporting default to console output or no-ops - support widget is disabled by default - diff --git a/frontend/app/app/dashboard-client.tsx b/frontend/app/app/dashboard-client.tsx index 10abb26..7323dc4 100644 --- a/frontend/app/app/dashboard-client.tsx +++ b/frontend/app/app/dashboard-client.tsx @@ -2,7 +2,16 @@ import { UserButton, useAuth } from "@clerk/nextjs"; import { useEffect, useMemo, useState } from "react"; -import { createBillingPortalSession, fetchViewer, type ViewerResponse } from "@/lib/api"; +import { + completeFileUpload, + createBillingPortalSession, + createFileUploadURL, + fetchAuditEvents, + fetchViewer, + getFileDownloadURL, + type AuditEventRecord, + type ViewerResponse +} from "@/lib/api"; import { createAnalyticsClient } from "@/lib/integrations/analytics"; import { Button } from "@/components/ui/button"; import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; @@ -14,6 +23,10 @@ export function DashboardClient() { const [viewer, setViewer] = useState(null); const [state, setState] = useState("idle"); const [portalLoading, setPortalLoading] = useState(false); + const [auditEvents, setAuditEvents] = useState([]); + const [uploadFile, setUploadFile] = useState(null); + const [uploading, setUploading] = useState(false); + const [lastUploadedFileId, setLastUploadedFileId] = useState(null); const analytics = useMemo( () => createAnalyticsClient((process.env.NEXT_PUBLIC_ANALYTICS_PROVIDER ?? "console") as "console" | "posthog" | "none"), [] @@ -55,6 +68,27 @@ export function DashboardClient() { }; }, [getToken, hasClerk, isLoaded, orgId, userId]); + useEffect(() => { + let cancelled = false; + + async function loadAudit() { + if (!hasClerk) return; + if (!isLoaded || !userId) return; + const token = await getToken(); + if (!token) return; + + const data = await fetchAuditEvents(token, orgId); + if (!cancelled) { + setAuditEvents(data?.events ?? []); + } + } + + void loadAudit(); + return () => { + cancelled = true; + }; + }, [getToken, hasClerk, isLoaded, orgId, userId]); + const openBillingPortal = async () => { if (!hasClerk) { return; @@ -73,6 +107,67 @@ export function DashboardClient() { } }; + const startUpload = async () => { + if (!hasClerk || !uploadFile) return; + setUploading(true); + setLastUploadedFileId(null); + + const token = await getToken(); + if (!token) { + setUploading(false); + return; + } + + const created = await createFileUploadURL({ + token, + organizationId: orgId, + filename: uploadFile.name, + contentType: uploadFile.type || "application/octet-stream" + }); + if (!created) { + setUploading(false); + return; + } + + let ok = false; + if (created.uploadType === "direct") { + const form = new FormData(); + form.append("file", uploadFile, uploadFile.name); + const response = await fetch(created.url, { + method: created.method, + headers: { + Authorization: `Bearer ${token}`, + ...(orgId ? { "X-Organization-ID": orgId } : {}) + }, + body: form + }); + ok = response.ok; + } else { + const response = await fetch(created.url, { + method: created.method, + headers: created.headers, + body: uploadFile + }); + ok = response.ok; + if (ok) { + ok = await completeFileUpload({ + token, + organizationId: orgId, + fileId: created.fileId, + sizeBytes: uploadFile.size + }); + } + } + + setUploading(false); + if (ok) { + setLastUploadedFileId(created.fileId); + analytics.track("file_uploaded", { fileId: created.fileId }); + const updated = await fetchAuditEvents(token, orgId); + setAuditEvents(updated?.events ?? []); + } + }; + return (
@@ -127,6 +222,81 @@ export function DashboardClient() {
+ + + + Files + + +

Tenant-scoped uploads with disk local storage or presigned S3/R2 URLs.

+ { + setUploadFile(e.target.files?.[0] ?? null); + }} + /> +
+ + {lastUploadedFileId && ( + + )} +
+
+
+ + + + Audit events + + + {auditEvents.length === 0 ? ( +

No recent events.

+ ) : ( +
    + {auditEvents.slice(0, 10).map((evt) => ( +
  • + {evt.action}{" "} + ({new Date(evt.createdAt).toLocaleString()}) +
  • + ))} +
+ )} +
+
); } diff --git a/frontend/lib/api.ts b/frontend/lib/api.ts index 3fe401a..cf4fd77 100644 --- a/frontend/lib/api.ts +++ b/frontend/lib/api.ts @@ -21,6 +21,32 @@ export type ViewerResponse = { }; }; +export type AuditEventRecord = { + id: string; + organizationId: string; + userId: string; + action: string; + data: Record; + createdAt: string; +}; + +export type AuditEventsResponse = { + events: AuditEventRecord[]; +}; + +export type FileUploadURLResponse = { + fileId: string; + method: string; + url: string; + headers: Record; + uploadType: "direct" | "presigned"; +}; + +export type FileDownloadURLResponse = { + url: string; + downloadType: "direct" | "presigned"; +}; + export async function fetchMeta(): Promise { try { const response = await fetch(`${API_BASE_URL}/api/v1/meta`, { @@ -54,6 +80,92 @@ export async function fetchViewer(token: string, organizationId?: string | null) } } +export async function fetchAuditEvents(token: string, organizationId?: string | null): Promise { + try { + const response = await fetch(`${API_BASE_URL}/api/v1/audit/events`, { + method: "GET", + headers: buildAuthHeaders(token, organizationId) + }); + + if (!response.ok) { + return null; + } + + return (await response.json()) as AuditEventsResponse; + } catch { + return null; + } +} + +export async function createFileUploadURL(params: { + token: string; + organizationId?: string | null; + filename: string; + contentType: string; +}): Promise { + try { + const response = await fetch(`${API_BASE_URL}/api/v1/files/upload-url`, { + method: "POST", + headers: { + ...buildAuthHeaders(params.token, params.organizationId), + "Content-Type": "application/json" + }, + body: JSON.stringify({ filename: params.filename, contentType: params.contentType }) + }); + + if (!response.ok) { + return null; + } + + return (await response.json()) as FileUploadURLResponse; + } catch { + return null; + } +} + +export async function completeFileUpload(params: { + token: string; + organizationId?: string | null; + fileId: string; + sizeBytes?: number; +}): Promise { + try { + const response = await fetch(`${API_BASE_URL}/api/v1/files/${params.fileId}/complete`, { + method: "POST", + headers: { + ...buildAuthHeaders(params.token, params.organizationId), + "Content-Type": "application/json" + }, + body: JSON.stringify({ sizeBytes: params.sizeBytes ?? 0 }) + }); + + return response.ok; + } catch { + return false; + } +} + +export async function getFileDownloadURL(params: { + token: string; + organizationId?: string | null; + fileId: string; +}): Promise { + try { + const response = await fetch(`${API_BASE_URL}/api/v1/files/${params.fileId}/download-url`, { + method: "GET", + headers: buildAuthHeaders(params.token, params.organizationId) + }); + + if (!response.ok) { + return null; + } + + return (await response.json()) as FileDownloadURLResponse; + } catch { + return null; + } +} + export async function createCheckoutSession(params: { token: string; planCode: string; diff --git a/render.yaml b/render.yaml index f2f97b1..01a6af6 100644 --- a/render.yaml +++ b/render.yaml @@ -22,6 +22,22 @@ services: property: connectionString - key: REDIS_URL sync: false + - key: FILE_STORAGE_PROVIDER + value: s3 + - key: FILE_STORAGE_DISK_PATH + value: ./.data/uploads + - key: S3_BUCKET + sync: false + - key: S3_REGION + value: auto + - key: S3_ENDPOINT + sync: false + - key: S3_ACCESS_KEY_ID + sync: false + - key: S3_SECRET_ACCESS_KEY + sync: false + - key: S3_FORCE_PATH_STYLE + value: "true" - key: CLERK_SECRET_KEY sync: false - key: CLERK_API_URL @@ -36,6 +52,72 @@ services: sync: false - key: STRIPE_PRICE_TEAM_MONTHLY sync: false + - key: ANALYTICS_PROVIDER + value: posthog + - key: POSTHOG_PROJECT_KEY + sync: false + - key: POSTHOG_HOST + value: https://app.posthog.com + - key: ERROR_REPORTING_PROVIDER + value: sentry + - key: SENTRY_DSN + sync: false + - key: SENTRY_ENVIRONMENT + value: production + - key: OTEL_TRACES_EXPORTER + value: otlp + - key: OTEL_SERVICE_NAME + value: saas-core-template-backend + - key: OTEL_EXPORTER_OTLP_ENDPOINT + sync: false + - key: OTEL_EXPORTER_OTLP_HEADERS + sync: false + + - type: worker + name: saas-core-template-worker + runtime: go + rootDir: backend + plan: free + autoDeploy: true + buildCommand: go build -o bin/worker ./cmd/worker + startCommand: ./bin/worker + envVars: + - key: APP_ENV + value: production + - key: APP_VERSION + value: render + - key: DATABASE_URL + fromDatabase: + name: saas-core-template-postgres + property: connectionString + - key: REDIS_URL + sync: false + - key: JOBS_ENABLED + value: "true" + - key: JOBS_WORKER_ID + value: render + - key: JOBS_POLL_INTERVAL + value: 1s + - key: EMAIL_PROVIDER + value: resend + - key: EMAIL_FROM + sync: false + - key: RESEND_API_KEY + sync: false + - key: ERROR_REPORTING_PROVIDER + value: sentry + - key: SENTRY_DSN + sync: false + - key: SENTRY_ENVIRONMENT + value: production + - key: OTEL_TRACES_EXPORTER + value: otlp + - key: OTEL_SERVICE_NAME + value: saas-core-template-worker + - key: OTEL_EXPORTER_OTLP_ENDPOINT + sync: false + - key: OTEL_EXPORTER_OTLP_HEADERS + sync: false databases: - name: saas-core-template-postgres From c9dc6575b74237835ecf5928744ca66f261787c6 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Sun, 22 Feb 2026 23:20:43 +0800 Subject: [PATCH 07/23] feat(tenancy): enforce single-member personal workspaces --- README.md | 1 + backend/internal/auth/auth.go | 6 +- .../0003_personal_workspaces.down.sql | 14 ++++ .../0003_personal_workspaces.up.sql | 71 +++++++++++++++++++ docs/architecture/multi-tenant-model.md | 9 +++ docs/operations/production-setup-checklist.md | 1 + docs/overview.md | 2 +- 7 files changed, 100 insertions(+), 4 deletions(-) create mode 100644 backend/migrations/0003_personal_workspaces.down.sql create mode 100644 backend/migrations/0003_personal_workspaces.up.sql diff --git a/README.md b/README.md index b479f99..aa91e58 100644 --- a/README.md +++ b/README.md @@ -92,6 +92,7 @@ Initial migration files: - `backend/migrations/0001_identity_tenancy_billing.up.sql` - `backend/migrations/0002_jobs_audit_files.up.sql` +- `backend/migrations/0003_personal_workspaces.up.sql` ## Local development Run infra first: diff --git a/backend/internal/auth/auth.go b/backend/internal/auth/auth.go index 1bc156d..2d667c8 100644 --- a/backend/internal/auth/auth.go +++ b/backend/internal/auth/auth.go @@ -250,10 +250,10 @@ func (s *Service) ensureDefaultOrganizationForUser(ctx context.Context, user Use slug := fmt.Sprintf("workspace-%s", shortKey(user.ID)) var orgID string if err := tx.QueryRow(ctx, ` - INSERT INTO organizations (name, slug) - VALUES ($1, $2) + INSERT INTO organizations (name, slug, kind, personal_owner_user_id) + VALUES ($1, $2, 'personal', $3::uuid) RETURNING id::text - `, name, slug).Scan(&orgID); err != nil { + `, name, slug, user.ID).Scan(&orgID); err != nil { return fmt.Errorf("create default organization: %w", err) } diff --git a/backend/migrations/0003_personal_workspaces.down.sql b/backend/migrations/0003_personal_workspaces.down.sql new file mode 100644 index 0000000..34dad61 --- /dev/null +++ b/backend/migrations/0003_personal_workspaces.down.sql @@ -0,0 +1,14 @@ +DROP TRIGGER IF EXISTS trg_enforce_personal_org_membership ON organization_members; +DROP FUNCTION IF EXISTS enforce_personal_org_membership; + +DROP TRIGGER IF EXISTS trg_enforce_personal_org_owner ON organizations; +DROP FUNCTION IF EXISTS enforce_personal_org_owner; + +DROP INDEX IF EXISTS idx_organizations_personal_owner; + +ALTER TABLE organizations DROP CONSTRAINT IF EXISTS organizations_kind_check; + +ALTER TABLE organizations + DROP COLUMN IF EXISTS personal_owner_user_id, + DROP COLUMN IF EXISTS kind; + diff --git a/backend/migrations/0003_personal_workspaces.up.sql b/backend/migrations/0003_personal_workspaces.up.sql new file mode 100644 index 0000000..3eccb2f --- /dev/null +++ b/backend/migrations/0003_personal_workspaces.up.sql @@ -0,0 +1,71 @@ +-- Personal workspaces are implemented as organizations with kind='personal'. +-- They are enforced to be single-member and owned by a specific user. + +ALTER TABLE organizations + ADD COLUMN IF NOT EXISTS kind TEXT NOT NULL DEFAULT 'team', + ADD COLUMN IF NOT EXISTS personal_owner_user_id UUID REFERENCES users(id) ON DELETE SET NULL; + +ALTER TABLE organizations + ADD CONSTRAINT organizations_kind_check CHECK (kind IN ('personal', 'team')); + +CREATE INDEX IF NOT EXISTS idx_organizations_personal_owner ON organizations(personal_owner_user_id); + +-- Enforce: personal org must have an owner. +CREATE OR REPLACE FUNCTION enforce_personal_org_owner() +RETURNS trigger AS $$ +BEGIN + IF NEW.kind = 'personal' AND NEW.personal_owner_user_id IS NULL THEN + RAISE EXCEPTION 'personal organization requires personal_owner_user_id'; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS trg_enforce_personal_org_owner ON organizations; +CREATE TRIGGER trg_enforce_personal_org_owner +BEFORE INSERT OR UPDATE ON organizations +FOR EACH ROW +EXECUTE FUNCTION enforce_personal_org_owner(); + +-- Enforce: personal org is single-member and membership must be the owner. +CREATE OR REPLACE FUNCTION enforce_personal_org_membership() +RETURNS trigger AS $$ +DECLARE + org_kind TEXT; + owner_id UUID; + other_member_count INT; +BEGIN + SELECT kind, personal_owner_user_id INTO org_kind, owner_id + FROM organizations + WHERE id = NEW.organization_id; + + IF org_kind = 'personal' THEN + IF owner_id IS NULL THEN + RAISE EXCEPTION 'personal organization missing owner'; + END IF; + + IF NEW.user_id <> owner_id THEN + RAISE EXCEPTION 'personal organization membership must be the owner'; + END IF; + + SELECT COUNT(*) INTO other_member_count + FROM organization_members + WHERE organization_id = NEW.organization_id + AND user_id <> owner_id + AND (TG_OP = 'INSERT' OR id <> NEW.id); + + IF other_member_count > 0 THEN + RAISE EXCEPTION 'personal organization must be single-member'; + END IF; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS trg_enforce_personal_org_membership ON organization_members; +CREATE TRIGGER trg_enforce_personal_org_membership +BEFORE INSERT OR UPDATE ON organization_members +FOR EACH ROW +EXECUTE FUNCTION enforce_personal_org_membership(); + diff --git a/docs/architecture/multi-tenant-model.md b/docs/architecture/multi-tenant-model.md index bd9b37a..2f16e6b 100644 --- a/docs/architecture/multi-tenant-model.md +++ b/docs/architecture/multi-tenant-model.md @@ -8,6 +8,15 @@ This template uses an organization/workspace model as the default multi-tenant s - Product data belongs to an organization unless it is explicitly global platform metadata. - Effective authorization is determined by membership and role within the active organization. +### Personal workspace (Option A) + +Every new user gets a **personal workspace** implemented as a normal `organizations` row with: + +- `kind = 'personal'` +- `personal_owner_user_id = ` + +This personal workspace is enforced to be **single-member** (owner only). Users can still create and join other (team) organizations. + ## Recommended tables - `organizations` diff --git a/docs/operations/production-setup-checklist.md b/docs/operations/production-setup-checklist.md index 28d1f54..771a3e5 100644 --- a/docs/operations/production-setup-checklist.md +++ b/docs/operations/production-setup-checklist.md @@ -89,6 +89,7 @@ Apply migrations in order against Render Postgres before using auth/billing/file - `backend/migrations/0001_identity_tenancy_billing.up.sql` - `backend/migrations/0002_jobs_audit_files.up.sql` +- `backend/migrations/0003_personal_workspaces.up.sql` ## 2) Deploy frontend (Vercel) diff --git a/docs/overview.md b/docs/overview.md index 3ce5660..2339960 100644 --- a/docs/overview.md +++ b/docs/overview.md @@ -58,7 +58,7 @@ This doc is a “start here” guide for understanding the template quickly. - Backend verifies token with Clerk, then ensures: - internal `users` row exists - identity mapping exists in `auth_identities` - - user has at least one organization membership + - user has a personal workspace (single-member organization) and at least one membership ### Tenancy / org context From 76a683e152697594d15fd07c505dbb9effa846f0 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Sun, 22 Feb 2026 23:27:23 +0800 Subject: [PATCH 08/23] db: enforce team org owner --- README.md | 1 + .../0004_team_owner_enforcement.down.sql | 6 ++ .../0004_team_owner_enforcement.up.sql | 57 +++++++++++++++++++ docs/architecture/multi-tenant-model.md | 7 +++ docs/operations/production-setup-checklist.md | 1 + 5 files changed, 72 insertions(+) create mode 100644 backend/migrations/0004_team_owner_enforcement.down.sql create mode 100644 backend/migrations/0004_team_owner_enforcement.up.sql diff --git a/README.md b/README.md index aa91e58..639123a 100644 --- a/README.md +++ b/README.md @@ -93,6 +93,7 @@ Initial migration files: - `backend/migrations/0001_identity_tenancy_billing.up.sql` - `backend/migrations/0002_jobs_audit_files.up.sql` - `backend/migrations/0003_personal_workspaces.up.sql` +- `backend/migrations/0004_team_owner_enforcement.up.sql` ## Local development Run infra first: diff --git a/backend/migrations/0004_team_owner_enforcement.down.sql b/backend/migrations/0004_team_owner_enforcement.down.sql new file mode 100644 index 0000000..8c7b25f --- /dev/null +++ b/backend/migrations/0004_team_owner_enforcement.down.sql @@ -0,0 +1,6 @@ +DROP TRIGGER IF EXISTS trg_enforce_team_has_owner ON organization_members; +DROP FUNCTION IF EXISTS enforce_team_has_owner; + +ALTER TABLE organization_members + DROP CONSTRAINT IF EXISTS organization_members_role_check; + diff --git a/backend/migrations/0004_team_owner_enforcement.up.sql b/backend/migrations/0004_team_owner_enforcement.up.sql new file mode 100644 index 0000000..3a8deb8 --- /dev/null +++ b/backend/migrations/0004_team_owner_enforcement.up.sql @@ -0,0 +1,57 @@ +-- Enforce role values and ensure team orgs always retain at least one owner. + +ALTER TABLE organization_members + ADD CONSTRAINT organization_members_role_check CHECK (role IN ('owner', 'admin', 'member')); + +CREATE OR REPLACE FUNCTION enforce_team_has_owner() +RETURNS trigger AS $$ +DECLARE + org_kind TEXT; + owner_count INT; + target_org_id UUID; +BEGIN + target_org_id := COALESCE(OLD.organization_id, NEW.organization_id); + + SELECT kind INTO org_kind + FROM organizations + WHERE id = target_org_id; + + IF org_kind = 'team' THEN + -- If the operation would remove or demote an owner, ensure at least one owner remains. + IF TG_OP = 'DELETE' THEN + IF OLD.role = 'owner' THEN + SELECT COUNT(*) INTO owner_count + FROM organization_members + WHERE organization_id = OLD.organization_id + AND role = 'owner' + AND id <> OLD.id; + + IF owner_count = 0 THEN + RAISE EXCEPTION 'team organization must have at least one owner'; + END IF; + END IF; + ELSIF TG_OP = 'UPDATE' THEN + IF OLD.role = 'owner' AND NEW.role <> 'owner' THEN + SELECT COUNT(*) INTO owner_count + FROM organization_members + WHERE organization_id = NEW.organization_id + AND role = 'owner' + AND id <> NEW.id; + + IF owner_count = 0 THEN + RAISE EXCEPTION 'team organization must have at least one owner'; + END IF; + END IF; + END IF; + END IF; + + RETURN COALESCE(NEW, OLD); +END; +$$ LANGUAGE plpgsql; + +DROP TRIGGER IF EXISTS trg_enforce_team_has_owner ON organization_members; +CREATE TRIGGER trg_enforce_team_has_owner +BEFORE UPDATE OR DELETE ON organization_members +FOR EACH ROW +EXECUTE FUNCTION enforce_team_has_owner(); + diff --git a/docs/architecture/multi-tenant-model.md b/docs/architecture/multi-tenant-model.md index 2f16e6b..6f7ca95 100644 --- a/docs/architecture/multi-tenant-model.md +++ b/docs/architecture/multi-tenant-model.md @@ -17,6 +17,13 @@ Every new user gets a **personal workspace** implemented as a normal `organizati This personal workspace is enforced to be **single-member** (owner only). Users can still create and join other (team) organizations. +### Team organizations + +Team organizations (`kind = 'team'`) support multiple members and roles. + +- The `organization_members.role` column is constrained to: `owner`, `admin`, `member`. +- Team organizations are enforced to always have **at least one** `owner` (the last owner cannot be removed or demoted). + ## Recommended tables - `organizations` diff --git a/docs/operations/production-setup-checklist.md b/docs/operations/production-setup-checklist.md index 771a3e5..0f1444a 100644 --- a/docs/operations/production-setup-checklist.md +++ b/docs/operations/production-setup-checklist.md @@ -90,6 +90,7 @@ Apply migrations in order against Render Postgres before using auth/billing/file - `backend/migrations/0001_identity_tenancy_billing.up.sql` - `backend/migrations/0002_jobs_audit_files.up.sql` - `backend/migrations/0003_personal_workspaces.up.sql` +- `backend/migrations/0004_team_owner_enforcement.up.sql` ## 2) Deploy frontend (Vercel) From 3ab3625f6fac5b7e4a2033a019724e9e9bc798e1 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Sun, 22 Feb 2026 23:45:07 +0800 Subject: [PATCH 09/23] api: add org RBAC roles --- backend/internal/api/rbac.go | 58 +++++++++++++++++++++++++ backend/internal/api/rbac_test.go | 41 +++++++++++++++++ backend/internal/api/router.go | 6 +-- docs/architecture/multi-tenant-model.md | 17 +++++++- 4 files changed, 118 insertions(+), 4 deletions(-) create mode 100644 backend/internal/api/rbac.go create mode 100644 backend/internal/api/rbac_test.go diff --git a/backend/internal/api/rbac.go b/backend/internal/api/rbac.go new file mode 100644 index 0000000..82b403a --- /dev/null +++ b/backend/internal/api/rbac.go @@ -0,0 +1,58 @@ +package api + +import ( + "net/http" + "strings" +) + +type orgRole string + +const ( + orgRoleMember orgRole = "member" + orgRoleAdmin orgRole = "admin" + orgRoleOwner orgRole = "owner" +) + +func (r orgRole) rank() (int, bool) { + switch strings.ToLower(strings.TrimSpace(string(r))) { + case string(orgRoleMember): + return 1, true + case string(orgRoleAdmin): + return 2, true + case string(orgRoleOwner): + return 3, true + default: + return 0, false + } +} + +func orgRoleAllows(actual string, required orgRole) bool { + actualRank, ok := orgRole(actual).rank() + if !ok { + return false + } + + requiredRank, ok := required.rank() + if !ok { + return false + } + + return actualRank >= requiredRank +} + +func (s *Server) requireOrgRole(required orgRole, next http.HandlerFunc) http.HandlerFunc { + return s.requireOrg(func(w http.ResponseWriter, r *http.Request) { + org := authOrgFromContext(r.Context()) + if org.ID == "" { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "organization_required"}) + return + } + + if !orgRoleAllows(org.Role, required) { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "insufficient_role"}) + return + } + + next.ServeHTTP(w, r) + }) +} diff --git a/backend/internal/api/rbac_test.go b/backend/internal/api/rbac_test.go new file mode 100644 index 0000000..3ba1e31 --- /dev/null +++ b/backend/internal/api/rbac_test.go @@ -0,0 +1,41 @@ +package api + +import "testing" + +func TestOrgRoleAllows(t *testing.T) { + t.Run("rejects unknown actual role", func(t *testing.T) { + if orgRoleAllows("superadmin", orgRoleMember) { + t.Fatalf("expected false") + } + }) + + t.Run("rejects unknown required role", func(t *testing.T) { + if orgRoleAllows("owner", orgRole("weird")) { + t.Fatalf("expected false") + } + }) + + t.Run("member meets member", func(t *testing.T) { + if !orgRoleAllows("member", orgRoleMember) { + t.Fatalf("expected true") + } + }) + + t.Run("member does not meet admin", func(t *testing.T) { + if orgRoleAllows("member", orgRoleAdmin) { + t.Fatalf("expected false") + } + }) + + t.Run("admin meets member", func(t *testing.T) { + if !orgRoleAllows("admin", orgRoleMember) { + t.Fatalf("expected true") + } + }) + + t.Run("owner meets admin", func(t *testing.T) { + if !orgRoleAllows("owner", orgRoleAdmin) { + t.Fatalf("expected true") + } + }) +} diff --git a/backend/internal/api/router.go b/backend/internal/api/router.go index c9ac21b..30c2fcf 100644 --- a/backend/internal/api/router.go +++ b/backend/internal/api/router.go @@ -118,10 +118,10 @@ func (s *Server) Handler() http.Handler { mux.HandleFunc("GET /readyz", s.readyz) mux.HandleFunc("GET /api/v1/meta", s.meta) mux.HandleFunc("GET /api/v1/auth/me", s.requireAuth(s.authMe)) - mux.HandleFunc("POST /api/v1/billing/checkout-session", s.requireOrg(s.billingCheckoutSession)) - mux.HandleFunc("POST /api/v1/billing/portal-session", s.requireOrg(s.billingPortalSession)) + mux.HandleFunc("POST /api/v1/billing/checkout-session", s.requireOrgRole(orgRoleAdmin, s.billingCheckoutSession)) + mux.HandleFunc("POST /api/v1/billing/portal-session", s.requireOrgRole(orgRoleAdmin, s.billingPortalSession)) mux.HandleFunc("POST /api/v1/billing/webhook", s.billingWebhook) - mux.HandleFunc("GET /api/v1/audit/events", s.requireOrg(s.auditEvents)) + mux.HandleFunc("GET /api/v1/audit/events", s.requireOrgRole(orgRoleAdmin, s.auditEvents)) mux.HandleFunc("POST /api/v1/files/upload-url", s.requireOrg(s.filesUploadURL)) mux.HandleFunc("POST /api/v1/files/{id}/upload", s.requireOrg(s.filesDirectUpload)) mux.HandleFunc("POST /api/v1/files/{id}/complete", s.requireOrg(s.filesComplete)) diff --git a/docs/architecture/multi-tenant-model.md b/docs/architecture/multi-tenant-model.md index 6f7ca95..cf78386 100644 --- a/docs/architecture/multi-tenant-model.md +++ b/docs/architecture/multi-tenant-model.md @@ -44,9 +44,24 @@ Every tenant-scoped business table must include `organization_id`. - Resolve active organization context on each request. - Verify membership before all tenant-scoped reads and writes. -- Apply role checks for sensitive actions (billing changes, member management, settings). +- Apply role checks for sensitive actions (billing changes, member management, audit access, settings). - Deny by default when organization context is missing or invalid. +### RBAC roles + +Membership includes a `role`: + +- `owner`: full control of an organization. +- `admin`: manage billing/settings and operational data. +- `member`: default role for day-to-day usage. + +Role hierarchy: `owner` > `admin` > `member`. + +Current API enforcement: + +- Billing endpoints require `admin` or higher: `POST /api/v1/billing/checkout-session`, `POST /api/v1/billing/portal-session`. +- Audit events require `admin` or higher: `GET /api/v1/audit/events`. + ## API scoping conventions - Never accept raw `organization_id` from clients without server-side membership validation. From 47b6a8f912bc55c0da06f6346e38486926e6e730 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Mon, 23 Feb 2026 00:16:11 +0800 Subject: [PATCH 10/23] orgs: add team org management --- README.md | 1 + backend/cmd/api/main.go | 3 + backend/internal/api/orgs_handlers.go | 270 ++++++++++ backend/internal/api/router.go | 17 + backend/internal/auth/auth.go | 5 +- backend/internal/orgs/orgs.go | 504 ++++++++++++++++++ backend/migrations/0005_org_invites.down.sql | 8 + backend/migrations/0005_org_invites.up.sql | 24 + docs/README.md | 2 + docs/architecture/multi-tenant-model.md | 1 + docs/operations/organization-management.md | 36 ++ docs/operations/production-setup-checklist.md | 1 + frontend/app/app/dashboard-client.tsx | 295 +++++++++- frontend/app/app/invite/invite-client.tsx | 83 +++ frontend/app/app/invite/page.tsx | 6 + frontend/app/integrations-provider.tsx | 12 +- frontend/app/pricing/pricing-client.tsx | 10 +- frontend/lib/api.ts | 128 +++++ 18 files changed, 1384 insertions(+), 22 deletions(-) create mode 100644 backend/internal/api/orgs_handlers.go create mode 100644 backend/internal/orgs/orgs.go create mode 100644 backend/migrations/0005_org_invites.down.sql create mode 100644 backend/migrations/0005_org_invites.up.sql create mode 100644 docs/operations/organization-management.md create mode 100644 frontend/app/app/invite/invite-client.tsx create mode 100644 frontend/app/app/invite/page.tsx diff --git a/README.md b/README.md index 639123a..70b8730 100644 --- a/README.md +++ b/README.md @@ -94,6 +94,7 @@ Initial migration files: - `backend/migrations/0002_jobs_audit_files.up.sql` - `backend/migrations/0003_personal_workspaces.up.sql` - `backend/migrations/0004_team_owner_enforcement.up.sql` +- `backend/migrations/0005_org_invites.up.sql` ## Local development Run infra first: diff --git a/backend/cmd/api/main.go b/backend/cmd/api/main.go index f85e9e0..45bca3b 100644 --- a/backend/cmd/api/main.go +++ b/backend/cmd/api/main.go @@ -23,6 +23,7 @@ import ( "saas-core-template/backend/internal/errorreporting" "saas-core-template/backend/internal/files" "saas-core-template/backend/internal/jobs" + "saas-core-template/backend/internal/orgs" "saas-core-template/backend/internal/telemetry" ) @@ -101,6 +102,7 @@ func main() { auditRecorder := audit.NewDBRecorder(pool) jobStore := jobs.NewStore(pool) + orgService := orgs.NewService(pool, orgs.WithJobs(jobStore), orgs.WithAudit(auditRecorder)) var s3Provider *files.S3Provider if cfg.FileStorageProvider == "s3" { @@ -161,6 +163,7 @@ func main() { api.WithAnalytics(analyticsClient), api.WithAudit(auditRecorder), api.WithFiles(filesService), + api.WithOrgs(orgService), ) baseHandler := apiServer.Handler() diff --git a/backend/internal/api/orgs_handlers.go b/backend/internal/api/orgs_handlers.go new file mode 100644 index 0000000..cfc1f4b --- /dev/null +++ b/backend/internal/api/orgs_handlers.go @@ -0,0 +1,270 @@ +package api + +import ( + "encoding/json" + "errors" + "net/http" + "strings" + "time" + + "saas-core-template/backend/internal/analytics" + "saas-core-template/backend/internal/orgs" +) + +func (s *Server) orgsList(w http.ResponseWriter, r *http.Request) { + if s.orgs == nil { + writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "orgs_not_configured"}) + return + } + + user := authUserFromContext(r.Context()) + if user.ID == "" { + writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "user_not_found"}) + return + } + + items, err := s.orgs.ListForUser(r.Context(), user.ID) + if err != nil { + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed_to_list_orgs"}) + return + } + + writeJSON(w, http.StatusOK, map[string]any{"organizations": items}) +} + +func (s *Server) orgsCreate(w http.ResponseWriter, r *http.Request) { + if s.orgs == nil { + writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "orgs_not_configured"}) + return + } + + user := authUserFromContext(r.Context()) + if user.ID == "" { + writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "user_not_found"}) + return + } + + var req struct { + Name string `json:"name"` + Slug string `json:"slug"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid_request_body"}) + return + } + + created, err := s.orgs.CreateTeamOrganization(r.Context(), user.ID, orgs.CreateOrgInput{ + Name: req.Name, + Slug: req.Slug, + }) + if err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "failed_to_create_org"}) + return + } + + s.analytics.Track(r.Context(), analytics.Event{ + Name: "organization_created", + DistinctID: user.ID, + Properties: map[string]any{"organization_id": created.ID, "kind": "team"}, + }) + + writeJSON(w, http.StatusOK, map[string]any{"organization": created}) +} + +func (s *Server) orgMembersList(w http.ResponseWriter, r *http.Request) { + if s.orgs == nil { + writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "orgs_not_configured"}) + return + } + + org := authOrgFromContext(r.Context()) + if org.ID == "" { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "organization_required"}) + return + } + + members, err := s.orgs.ListMembers(r.Context(), org.ID) + if err != nil { + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed_to_list_members"}) + return + } + + writeJSON(w, http.StatusOK, map[string]any{"members": members}) +} + +func (s *Server) orgInvitesCreate(w http.ResponseWriter, r *http.Request) { + if s.orgs == nil { + writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "orgs_not_configured"}) + return + } + + user := authUserFromContext(r.Context()) + org := authOrgFromContext(r.Context()) + if user.ID == "" { + writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "user_not_found"}) + return + } + if org.ID == "" { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "organization_required"}) + return + } + + var req struct { + Email string `json:"email"` + Role string `json:"role"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid_request_body"}) + return + } + + invite, err := s.orgs.CreateInvite(r.Context(), orgs.CreateInviteInput{ + OrganizationID: org.ID, + InvitedByUserID: user.ID, + Email: req.Email, + Role: req.Role, + }) + if err != nil { + if errors.Is(err, orgs.ErrInviteAlreadyExists) { + writeJSON(w, http.StatusConflict, map[string]string{"error": "invite_already_exists"}) + return + } + if errors.Is(err, orgs.ErrInvalidOrganization) { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invites_not_allowed_for_personal_workspace"}) + return + } + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "failed_to_create_invite"}) + return + } + + acceptURL := strings.TrimRight(s.appBaseURL, "/") + "/app/invite?token=" + invite.Token + _ = s.orgs.EnqueueInviteEmail(r.Context(), invite, acceptURL) + writeJSON(w, http.StatusOK, map[string]any{ + "invite": invite, + "acceptUrl": acceptURL, + }) +} + +func (s *Server) orgInvitesAccept(w http.ResponseWriter, r *http.Request) { + if s.orgs == nil { + writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "orgs_not_configured"}) + return + } + + user := authUserFromContext(r.Context()) + if user.ID == "" { + writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "user_not_found"}) + return + } + + var req struct { + Token string `json:"token"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid_request_body"}) + return + } + + org, err := s.orgs.AcceptInvite(r.Context(), orgs.AcceptInviteInput{ + Token: strings.TrimSpace(req.Token), + UserID: user.ID, + Email: user.PrimaryEmail, + }) + if err != nil { + switch { + case errors.Is(err, orgs.ErrInviteAlreadyUsed): + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invite_already_used"}) + case errors.Is(err, orgs.ErrInviteEmailMismatch): + writeJSON(w, http.StatusForbidden, map[string]string{"error": "invite_email_mismatch"}) + default: + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "failed_to_accept_invite"}) + } + return + } + + s.analytics.Track(r.Context(), analytics.Event{ + Name: "organization_invite_accepted", + DistinctID: user.ID, + Properties: map[string]any{"organization_id": org.ID}, + }) + + writeJSON(w, http.StatusOK, map[string]any{ + "organization": org, + "acceptedAt": time.Now().UTC().Format(time.RFC3339), + }) +} + +func (s *Server) orgMembersUpdateRole(w http.ResponseWriter, r *http.Request) { + if s.orgs == nil { + writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "orgs_not_configured"}) + return + } + + org := authOrgFromContext(r.Context()) + if org.ID == "" { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "organization_required"}) + return + } + + targetUserID := strings.TrimSpace(r.PathValue("userId")) + if targetUserID == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "missing_user_id"}) + return + } + + var req struct { + Role string `json:"role"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid_request_body"}) + return + } + + if err := s.orgs.UpdateMemberRole(r.Context(), orgs.UpdateMemberRoleInput{ + OrganizationID: org.ID, + UserID: targetUserID, + Role: req.Role, + }); err != nil { + if errors.Is(err, orgs.ErrInvalidOrganization) { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "role_changes_not_allowed_for_personal_workspace"}) + return + } + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "failed_to_update_role"}) + return + } + + writeJSON(w, http.StatusOK, map[string]string{"status": "updated"}) +} + +func (s *Server) orgMembersRemove(w http.ResponseWriter, r *http.Request) { + if s.orgs == nil { + writeJSON(w, http.StatusServiceUnavailable, map[string]string{"error": "orgs_not_configured"}) + return + } + + org := authOrgFromContext(r.Context()) + if org.ID == "" { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "organization_required"}) + return + } + + targetUserID := strings.TrimSpace(r.PathValue("userId")) + if targetUserID == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "missing_user_id"}) + return + } + + if err := s.orgs.RemoveMember(r.Context(), orgs.RemoveMemberInput{ + OrganizationID: org.ID, + UserID: targetUserID, + }); err != nil { + if errors.Is(err, orgs.ErrInvalidOrganization) { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "member_changes_not_allowed_for_personal_workspace"}) + return + } + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "failed_to_remove_member"}) + return + } + + writeJSON(w, http.StatusOK, map[string]string{"status": "removed"}) +} diff --git a/backend/internal/api/router.go b/backend/internal/api/router.go index 30c2fcf..8e244fc 100644 --- a/backend/internal/api/router.go +++ b/backend/internal/api/router.go @@ -16,6 +16,7 @@ import ( "saas-core-template/backend/internal/auth" "saas-core-template/backend/internal/billing" "saas-core-template/backend/internal/files" + "saas-core-template/backend/internal/orgs" ) type Server struct { @@ -30,6 +31,7 @@ type Server struct { analytics analytics.Client audit audit.Recorder files *files.Service + orgs *orgs.Service } type serverOptions struct { @@ -39,6 +41,7 @@ type serverOptions struct { analytics analytics.Client audit audit.Recorder files *files.Service + orgs *orgs.Service } func NewServer(appName string, env string, version string, db *pgxpool.Pool, redisClient *redis.Client, opts ...func(*serverOptions)) *Server { @@ -59,6 +62,7 @@ func NewServer(appName string, env string, version string, db *pgxpool.Pool, red analytics: defaultAnalytics(options.analytics), audit: defaultAudit(options.audit), files: options.files, + orgs: options.orgs, } } @@ -112,12 +116,25 @@ func WithFiles(service *files.Service) func(*serverOptions) { } } +func WithOrgs(service *orgs.Service) func(*serverOptions) { + return func(opts *serverOptions) { + opts.orgs = service + } +} + func (s *Server) Handler() http.Handler { mux := http.NewServeMux() mux.HandleFunc("GET /healthz", s.healthz) mux.HandleFunc("GET /readyz", s.readyz) mux.HandleFunc("GET /api/v1/meta", s.meta) mux.HandleFunc("GET /api/v1/auth/me", s.requireAuth(s.authMe)) + mux.HandleFunc("GET /api/v1/orgs", s.requireAuth(s.orgsList)) + mux.HandleFunc("POST /api/v1/orgs", s.requireAuth(s.orgsCreate)) + mux.HandleFunc("GET /api/v1/org/members", s.requireOrgRole(orgRoleAdmin, s.orgMembersList)) + mux.HandleFunc("POST /api/v1/org/invites", s.requireOrgRole(orgRoleAdmin, s.orgInvitesCreate)) + mux.HandleFunc("POST /api/v1/org/invites/accept", s.requireAuth(s.orgInvitesAccept)) + mux.HandleFunc("PATCH /api/v1/org/members/{userId}", s.requireOrgRole(orgRoleOwner, s.orgMembersUpdateRole)) + mux.HandleFunc("DELETE /api/v1/org/members/{userId}", s.requireOrgRole(orgRoleOwner, s.orgMembersRemove)) mux.HandleFunc("POST /api/v1/billing/checkout-session", s.requireOrgRole(orgRoleAdmin, s.billingCheckoutSession)) mux.HandleFunc("POST /api/v1/billing/portal-session", s.requireOrgRole(orgRoleAdmin, s.billingPortalSession)) mux.HandleFunc("POST /api/v1/billing/webhook", s.billingWebhook) diff --git a/backend/internal/auth/auth.go b/backend/internal/auth/auth.go index 2d667c8..a5f7cf6 100644 --- a/backend/internal/auth/auth.go +++ b/backend/internal/auth/auth.go @@ -46,6 +46,7 @@ type Organization struct { ID string `json:"id"` Name string `json:"name"` Slug string `json:"slug"` + Kind string `json:"kind"` Role string `json:"role"` } @@ -196,7 +197,7 @@ func (s *Service) ensureUserIdentity(ctx context.Context, principal VerifiedPrin func (s *Service) ResolveOrganization(ctx context.Context, userID string, requestedOrgID string) (Organization, error) { query := ` - SELECT o.id::text, o.name, o.slug, om.role + SELECT o.id::text, o.name, o.slug, o.kind, om.role FROM organizations o INNER JOIN organization_members om ON om.organization_id = o.id WHERE om.user_id = $1 @@ -211,7 +212,7 @@ func (s *Service) ResolveOrganization(ctx context.Context, userID string, reques query += ` ORDER BY om.created_at ASC LIMIT 1` var org Organization - if err := s.db.QueryRow(ctx, query, args...).Scan(&org.ID, &org.Name, &org.Slug, &org.Role); err != nil { + if err := s.db.QueryRow(ctx, query, args...).Scan(&org.ID, &org.Name, &org.Slug, &org.Kind, &org.Role); err != nil { return Organization{}, ErrNoOrganization } diff --git a/backend/internal/orgs/orgs.go b/backend/internal/orgs/orgs.go new file mode 100644 index 0000000..3b41574 --- /dev/null +++ b/backend/internal/orgs/orgs.go @@ -0,0 +1,504 @@ +package orgs + +import ( + "context" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "regexp" + "strings" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "saas-core-template/backend/internal/audit" + "saas-core-template/backend/internal/jobs" +) + +var ( + ErrNotFound = errors.New("not found") + ErrInviteAlreadyExists = errors.New("invite already exists") + ErrInviteAlreadyUsed = errors.New("invite already used") + ErrInviteEmailMismatch = errors.New("invite email mismatch") + ErrInvalidOrganization = errors.New("invalid organization") +) + +type Service struct { + db *pgxpool.Pool + jobs jobs.Enqueuer + audit audit.Recorder +} + +type Organization struct { + ID string `json:"id"` + Name string `json:"name"` + Slug string `json:"slug"` + Kind string `json:"kind"` + Role string `json:"role"` +} + +type Member struct { + UserID string `json:"userId"` + PrimaryEmail string `json:"primaryEmail"` + Role string `json:"role"` + JoinedAt time.Time `json:"joinedAt"` +} + +type Invite struct { + ID string `json:"id"` + OrganizationID string `json:"organizationId"` + Email string `json:"email"` + Role string `json:"role"` + Token string `json:"token"` + CreatedAt time.Time `json:"createdAt"` + AcceptedAt *time.Time `json:"acceptedAt,omitempty"` +} + +type CreateOrgInput struct { + Name string + Slug string +} + +type CreateInviteInput struct { + OrganizationID string + InvitedByUserID string + Email string + Role string +} + +type AcceptInviteInput struct { + Token string + UserID string + Email string +} + +type UpdateMemberRoleInput struct { + OrganizationID string + UserID string + Role string +} + +type RemoveMemberInput struct { + OrganizationID string + UserID string +} + +func NewService(db *pgxpool.Pool, opts ...func(*Service)) *Service { + s := &Service{db: db, audit: audit.NewNoop()} + for _, opt := range opts { + if opt != nil { + opt(s) + } + } + return s +} + +func WithJobs(enqueuer jobs.Enqueuer) func(*Service) { + return func(s *Service) { + s.jobs = enqueuer + } +} + +func WithAudit(recorder audit.Recorder) func(*Service) { + return func(s *Service) { + if recorder != nil { + s.audit = recorder + } + } +} + +func (s *Service) ListForUser(ctx context.Context, userID string) ([]Organization, error) { + rows, err := s.db.Query(ctx, ` + SELECT o.id::text, o.name, o.slug, o.kind, om.role + FROM organizations o + INNER JOIN organization_members om ON om.organization_id = o.id + WHERE om.user_id = $1 + ORDER BY om.created_at ASC + `, userID) + if err != nil { + return nil, fmt.Errorf("list organizations: %w", err) + } + defer rows.Close() + + var out []Organization + for rows.Next() { + var org Organization + if err := rows.Scan(&org.ID, &org.Name, &org.Slug, &org.Kind, &org.Role); err != nil { + return nil, fmt.Errorf("scan organization: %w", err) + } + out = append(out, org) + } + if rows.Err() != nil { + return nil, fmt.Errorf("list organizations rows: %w", rows.Err()) + } + return out, nil +} + +func (s *Service) CreateTeamOrganization(ctx context.Context, creatorUserID string, input CreateOrgInput) (Organization, error) { + name := strings.TrimSpace(input.Name) + if name == "" { + return Organization{}, fmt.Errorf("missing name") + } + + slug := strings.TrimSpace(input.Slug) + if slug == "" { + slug = slugify(name) + } + if slug == "" { + return Organization{}, fmt.Errorf("invalid slug") + } + + tx, err := s.db.Begin(ctx) + if err != nil { + return Organization{}, fmt.Errorf("begin tx: %w", err) + } + defer tx.Rollback(ctx) + + var org Organization + org.Name = name + org.Slug = slug + org.Kind = "team" + org.Role = "owner" + + baseSlug := slug + for i := 0; i < 5; i++ { + err := tx.QueryRow(ctx, ` + INSERT INTO organizations (name, slug, kind) + VALUES ($1, $2, 'team') + RETURNING id::text + `, name, slug).Scan(&org.ID) + if err == nil { + break + } + if isUniqueViolation(err) { + slug = fmt.Sprintf("%s-%s", baseSlug, randomSuffix()) + continue + } + return Organization{}, fmt.Errorf("insert organization: %w", err) + } + if org.ID == "" { + return Organization{}, fmt.Errorf("failed to allocate unique slug") + } + + if _, err := tx.Exec(ctx, ` + INSERT INTO organization_members (organization_id, user_id, role) + VALUES ($1, $2, 'owner') + `, org.ID, creatorUserID); err != nil { + return Organization{}, fmt.Errorf("insert organization member: %w", err) + } + + if err := tx.Commit(ctx); err != nil { + return Organization{}, fmt.Errorf("commit create org: %w", err) + } + + _ = s.audit.Record(ctx, audit.Event{ + OrganizationID: org.ID, + UserID: creatorUserID, + Action: "organization_created", + Data: map[string]any{"kind": "team"}, + }) + + return org, nil +} + +func (s *Service) ListMembers(ctx context.Context, organizationID string) ([]Member, error) { + rows, err := s.db.Query(ctx, ` + SELECT u.id::text, COALESCE(u.primary_email, ''), om.role, om.created_at + FROM organization_members om + INNER JOIN users u ON u.id = om.user_id + WHERE om.organization_id = $1 + ORDER BY om.created_at ASC + `, organizationID) + if err != nil { + return nil, fmt.Errorf("list members: %w", err) + } + defer rows.Close() + + var out []Member + for rows.Next() { + var m Member + if err := rows.Scan(&m.UserID, &m.PrimaryEmail, &m.Role, &m.JoinedAt); err != nil { + return nil, fmt.Errorf("scan member: %w", err) + } + out = append(out, m) + } + if rows.Err() != nil { + return nil, fmt.Errorf("list members rows: %w", rows.Err()) + } + return out, nil +} + +func (s *Service) CreateInvite(ctx context.Context, input CreateInviteInput) (Invite, error) { + email := normalizeEmail(input.Email) + if email == "" { + return Invite{}, fmt.Errorf("missing email") + } + + role := strings.ToLower(strings.TrimSpace(input.Role)) + if role == "" { + role = "member" + } + if role != "member" && role != "admin" { + return Invite{}, fmt.Errorf("invalid role") + } + + var orgKind string + if err := s.db.QueryRow(ctx, `SELECT kind FROM organizations WHERE id = $1`, input.OrganizationID).Scan(&orgKind); err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return Invite{}, ErrNotFound + } + return Invite{}, fmt.Errorf("load organization kind: %w", err) + } + if orgKind != "team" { + return Invite{}, ErrInvalidOrganization + } + + token, err := newToken(16) + if err != nil { + return Invite{}, fmt.Errorf("generate token: %w", err) + } + + var invite Invite + err = s.db.QueryRow(ctx, ` + INSERT INTO organization_invites (organization_id, email, role, token, invited_by_user_id) + VALUES ($1, $2, $3, $4, $5) + RETURNING id::text, organization_id::text, email, role, token, created_at, accepted_at + `, input.OrganizationID, email, role, token, input.InvitedByUserID).Scan( + &invite.ID, + &invite.OrganizationID, + &invite.Email, + &invite.Role, + &invite.Token, + &invite.CreatedAt, + &invite.AcceptedAt, + ) + if err != nil { + if isUniqueViolation(err) { + return Invite{}, ErrInviteAlreadyExists + } + return Invite{}, fmt.Errorf("insert invite: %w", err) + } + + _ = s.audit.Record(ctx, audit.Event{ + OrganizationID: input.OrganizationID, + UserID: input.InvitedByUserID, + Action: "organization_invite_created", + Data: map[string]any{"email": email, "role": role}, + }) + + return invite, nil +} + +func (s *Service) EnqueueInviteEmail(ctx context.Context, invite Invite, acceptURL string) error { + if s.jobs == nil { + return nil + } + if strings.TrimSpace(invite.Email) == "" || strings.TrimSpace(acceptURL) == "" { + return nil + } + + orgName := "your workspace" + _ = s.db.QueryRow(ctx, `SELECT name FROM organizations WHERE id = $1`, invite.OrganizationID).Scan(&orgName) + + subject := fmt.Sprintf("You're invited to join %s", orgName) + text := fmt.Sprintf("You have been invited to join %s.\n\nAccept: %s\n", orgName, acceptURL) + + _, err := s.jobs.Enqueue(ctx, "send_email", map[string]any{ + "to": invite.Email, + "subject": subject, + "text": text, + }, time.Now().UTC()) + if err != nil { + return fmt.Errorf("enqueue invite email: %w", err) + } + return nil +} + +func (s *Service) AcceptInvite(ctx context.Context, input AcceptInviteInput) (Organization, error) { + token := strings.TrimSpace(input.Token) + if token == "" { + return Organization{}, fmt.Errorf("missing token") + } + + tx, err := s.db.Begin(ctx) + if err != nil { + return Organization{}, fmt.Errorf("begin tx: %w", err) + } + defer tx.Rollback(ctx) + + var inviteID string + var orgID string + var email string + var role string + var acceptedAt *time.Time + if err := tx.QueryRow(ctx, ` + SELECT id::text, organization_id::text, email, role, accepted_at + FROM organization_invites + WHERE token = $1 + FOR UPDATE + `, token).Scan(&inviteID, &orgID, &email, &role, &acceptedAt); err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return Organization{}, ErrNotFound + } + return Organization{}, fmt.Errorf("load invite: %w", err) + } + + if acceptedAt != nil { + return Organization{}, ErrInviteAlreadyUsed + } + + if normalizeEmail(input.Email) == "" || normalizeEmail(input.Email) != normalizeEmail(email) { + return Organization{}, ErrInviteEmailMismatch + } + + if _, err := tx.Exec(ctx, ` + INSERT INTO organization_members (organization_id, user_id, role) + VALUES ($1, $2, $3) + ON CONFLICT (organization_id, user_id) DO UPDATE SET role = EXCLUDED.role, updated_at = now() + `, orgID, input.UserID, role); err != nil { + return Organization{}, fmt.Errorf("insert membership: %w", err) + } + + if _, err := tx.Exec(ctx, ` + UPDATE organization_invites + SET accepted_at = now(), + accepted_by_user_id = $1, + updated_at = now() + WHERE id = $2 + `, input.UserID, inviteID); err != nil { + return Organization{}, fmt.Errorf("mark invite accepted: %w", err) + } + + var org Organization + if err := tx.QueryRow(ctx, ` + SELECT o.id::text, o.name, o.slug, o.kind, om.role + FROM organizations o + INNER JOIN organization_members om ON om.organization_id = o.id + WHERE o.id = $1 AND om.user_id = $2 + `, orgID, input.UserID).Scan(&org.ID, &org.Name, &org.Slug, &org.Kind, &org.Role); err != nil { + return Organization{}, fmt.Errorf("load organization: %w", err) + } + + if err := tx.Commit(ctx); err != nil { + return Organization{}, fmt.Errorf("commit accept invite: %w", err) + } + + _ = s.audit.Record(ctx, audit.Event{ + OrganizationID: orgID, + UserID: input.UserID, + Action: "organization_invite_accepted", + Data: map[string]any{"email": normalizeEmail(email), "role": role}, + }) + + return org, nil +} + +func (s *Service) UpdateMemberRole(ctx context.Context, input UpdateMemberRoleInput) error { + role := strings.ToLower(strings.TrimSpace(input.Role)) + if role != "owner" && role != "admin" && role != "member" { + return fmt.Errorf("invalid role") + } + + var orgKind string + if err := s.db.QueryRow(ctx, `SELECT kind FROM organizations WHERE id = $1`, input.OrganizationID).Scan(&orgKind); err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return ErrNotFound + } + return fmt.Errorf("load organization kind: %w", err) + } + if orgKind != "team" { + return ErrInvalidOrganization + } + + ct, err := s.db.Exec(ctx, ` + UPDATE organization_members + SET role = $1, updated_at = now() + WHERE organization_id = $2 AND user_id = $3 + `, role, input.OrganizationID, input.UserID) + if err != nil { + return fmt.Errorf("update member role: %w", err) + } + if ct.RowsAffected() == 0 { + return ErrNotFound + } + return nil +} + +func (s *Service) RemoveMember(ctx context.Context, input RemoveMemberInput) error { + var orgKind string + if err := s.db.QueryRow(ctx, `SELECT kind FROM organizations WHERE id = $1`, input.OrganizationID).Scan(&orgKind); err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return ErrNotFound + } + return fmt.Errorf("load organization kind: %w", err) + } + if orgKind != "team" { + return ErrInvalidOrganization + } + + ct, err := s.db.Exec(ctx, ` + DELETE FROM organization_members + WHERE organization_id = $1 AND user_id = $2 + `, input.OrganizationID, input.UserID) + if err != nil { + return fmt.Errorf("delete member: %w", err) + } + if ct.RowsAffected() == 0 { + return ErrNotFound + } + return nil +} + +func newToken(bytes int) (string, error) { + buf := make([]byte, bytes) + if _, err := rand.Read(buf); err != nil { + return "", err + } + return hex.EncodeToString(buf), nil +} + +var slugAllowed = regexp.MustCompile(`[^a-z0-9-]+`) + +func slugify(value string) string { + trimmed := strings.ToLower(strings.TrimSpace(value)) + if trimmed == "" { + return "" + } + + trimmed = strings.ReplaceAll(trimmed, "_", "-") + trimmed = strings.ReplaceAll(trimmed, " ", "-") + trimmed = slugAllowed.ReplaceAllString(trimmed, "") + trimmed = strings.Trim(trimmed, "-") + for strings.Contains(trimmed, "--") { + trimmed = strings.ReplaceAll(trimmed, "--", "-") + } + return trimmed +} + +func randomSuffix() string { + token, err := newToken(3) + if err != nil { + return "alt" + } + return token +} + +func normalizeEmail(email string) string { + trimmed := strings.TrimSpace(strings.ToLower(email)) + if strings.Contains(trimmed, " ") { + return "" + } + if !strings.Contains(trimmed, "@") { + return "" + } + return trimmed +} + +func isUniqueViolation(err error) bool { + if err == nil { + return false + } + msg := err.Error() + return strings.Contains(msg, "duplicate key value") || strings.Contains(msg, "unique constraint") +} diff --git a/backend/migrations/0005_org_invites.down.sql b/backend/migrations/0005_org_invites.down.sql new file mode 100644 index 0000000..192670a --- /dev/null +++ b/backend/migrations/0005_org_invites.down.sql @@ -0,0 +1,8 @@ +DROP INDEX IF EXISTS uq_organization_invites_active; +DROP INDEX IF EXISTS idx_organization_invites_org_id; + +ALTER TABLE organization_invites + DROP CONSTRAINT IF EXISTS organization_invites_role_check; + +DROP TABLE IF EXISTS organization_invites; + diff --git a/backend/migrations/0005_org_invites.up.sql b/backend/migrations/0005_org_invites.up.sql new file mode 100644 index 0000000..035f317 --- /dev/null +++ b/backend/migrations/0005_org_invites.up.sql @@ -0,0 +1,24 @@ +-- Organization invites for team workspaces (email-based). + +CREATE TABLE IF NOT EXISTS organization_invites ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + organization_id UUID NOT NULL REFERENCES organizations(id) ON DELETE CASCADE, + email TEXT NOT NULL, + role TEXT NOT NULL DEFAULT 'member', + token TEXT NOT NULL UNIQUE, + invited_by_user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now(), + accepted_at TIMESTAMPTZ, + accepted_by_user_id UUID REFERENCES users(id) ON DELETE SET NULL +); + +ALTER TABLE organization_invites + ADD CONSTRAINT organization_invites_role_check CHECK (role IN ('admin', 'member')); + +CREATE INDEX IF NOT EXISTS idx_organization_invites_org_id ON organization_invites(organization_id); + +-- One outstanding invite per org+email (case-insensitive). +CREATE UNIQUE INDEX IF NOT EXISTS uq_organization_invites_active +ON organization_invites(organization_id, lower(email)) +WHERE accepted_at IS NULL; diff --git a/docs/README.md b/docs/README.md index 0614b27..85466f8 100644 --- a/docs/README.md +++ b/docs/README.md @@ -40,6 +40,8 @@ This directory contains implementation playbooks for contributors and AI agents. - Disk and S3/R2 upload configurations. - [Audit Logs](operations/audit-logs.md) - Audit events table and API. +- [Organization Management](operations/organization-management.md) + - Team organizations, invites, membership roles, and org selection. - [Observability (OpenTelemetry)](operations/observability.md) - Local tracing collector and production export configuration. - [Product Analytics (PostHog)](operations/product-analytics.md) diff --git a/docs/architecture/multi-tenant-model.md b/docs/architecture/multi-tenant-model.md index cf78386..9425db5 100644 --- a/docs/architecture/multi-tenant-model.md +++ b/docs/architecture/multi-tenant-model.md @@ -61,6 +61,7 @@ Current API enforcement: - Billing endpoints require `admin` or higher: `POST /api/v1/billing/checkout-session`, `POST /api/v1/billing/portal-session`. - Audit events require `admin` or higher: `GET /api/v1/audit/events`. +- Organization member management requires `admin`+ (list/invite) and `owner` (role changes/removals). ## API scoping conventions diff --git a/docs/operations/organization-management.md b/docs/operations/organization-management.md new file mode 100644 index 0000000..5bed7c7 --- /dev/null +++ b/docs/operations/organization-management.md @@ -0,0 +1,36 @@ +# Organization Management + +This template uses app-owned organizations (workspaces) and membership roles stored in Postgres. + +Clerk is used for authentication only; organization context and RBAC are enforced by the API using app-owned tables. + +## Concepts + +- Personal workspace: created automatically on first sign-in (`kind = 'personal'`), enforced single-member owner-only. +- Team organization: created by a signed-in user (`kind = 'team'`), supports multiple members and roles. +- Role hierarchy: `owner` > `admin` > `member`. + +## API endpoints + +All endpoints require a Clerk bearer token (`Authorization: Bearer ...`). + +Organization context is selected via `X-Organization-ID: ` for org-scoped endpoints. + +- `GET /api/v1/orgs`: list organizations the user belongs to (includes role + kind). +- `POST /api/v1/orgs`: create a new team organization. +- `GET /api/v1/org/members`: list members for the active org (admin+). +- `POST /api/v1/org/invites`: create an invite for the active org (admin+, team orgs only). +- `POST /api/v1/org/invites/accept`: accept an invite token (email must match the signed-in user). +- `PATCH /api/v1/org/members/{userId}`: change a member role (owner-only, team orgs only). +- `DELETE /api/v1/org/members/{userId}`: remove a member (owner-only, team orgs only). + +## Invite flow + +1. Owner/admin creates an invite for a team org via `POST /api/v1/org/invites`. +2. The API returns an `acceptUrl` pointing at `GET /app/invite?token=...` and (if the worker is enabled) enqueues an email job to deliver the link. +3. The invited user signs in, opens the link, and the UI calls `POST /api/v1/org/invites/accept`. + +## Active organization selection (frontend) + +The frontend stores the active org UUID in `localStorage` under `activeOrganizationId` and sends it as `X-Organization-ID`. + diff --git a/docs/operations/production-setup-checklist.md b/docs/operations/production-setup-checklist.md index 0f1444a..328e583 100644 --- a/docs/operations/production-setup-checklist.md +++ b/docs/operations/production-setup-checklist.md @@ -91,6 +91,7 @@ Apply migrations in order against Render Postgres before using auth/billing/file - `backend/migrations/0002_jobs_audit_files.up.sql` - `backend/migrations/0003_personal_workspaces.up.sql` - `backend/migrations/0004_team_owner_enforcement.up.sql` +- `backend/migrations/0005_org_invites.up.sql` ## 2) Deploy frontend (Vercel) diff --git a/frontend/app/app/dashboard-client.tsx b/frontend/app/app/dashboard-client.tsx index 7323dc4..a963be2 100644 --- a/frontend/app/app/dashboard-client.tsx +++ b/frontend/app/app/dashboard-client.tsx @@ -4,12 +4,18 @@ import { UserButton, useAuth } from "@clerk/nextjs"; import { useEffect, useMemo, useState } from "react"; import { completeFileUpload, + createOrganization, + createOrganizationInvite, createBillingPortalSession, createFileUploadURL, fetchAuditEvents, + fetchOrganizationMembers, + fetchOrganizations, fetchViewer, getFileDownloadURL, type AuditEventRecord, + type OrganizationSummary, + type OrganizationMembersResponse, type ViewerResponse } from "@/lib/api"; import { createAnalyticsClient } from "@/lib/integrations/analytics"; @@ -18,15 +24,38 @@ import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; type LoadState = "idle" | "loading" | "error"; +function roleRank(role: string): number { + switch ((role ?? "").toLowerCase()) { + case "owner": + return 3; + case "admin": + return 2; + case "member": + return 1; + default: + return 0; + } +} + export function DashboardClient() { - const { isLoaded, getToken, orgId, userId } = useAuth(); + const { isLoaded, getToken, userId } = useAuth(); const [viewer, setViewer] = useState(null); + const [organizations, setOrganizations] = useState([]); + const [activeOrgId, setActiveOrgId] = useState(null); const [state, setState] = useState("idle"); const [portalLoading, setPortalLoading] = useState(false); const [auditEvents, setAuditEvents] = useState([]); + const [members, setMembers] = useState([]); + const [membersState, setMembersState] = useState("idle"); const [uploadFile, setUploadFile] = useState(null); const [uploading, setUploading] = useState(false); const [lastUploadedFileId, setLastUploadedFileId] = useState(null); + const [newOrgName, setNewOrgName] = useState(""); + const [creatingOrg, setCreatingOrg] = useState(false); + const [inviteEmail, setInviteEmail] = useState(""); + const [inviteRole, setInviteRole] = useState<"member" | "admin">("member"); + const [inviteLink, setInviteLink] = useState(null); + const [inviteLoading, setInviteLoading] = useState(false); const analytics = useMemo( () => createAnalyticsClient((process.env.NEXT_PUBLIC_ANALYTICS_PROVIDER ?? "console") as "console" | "posthog" | "none"), [] @@ -34,6 +63,44 @@ export function DashboardClient() { const hasClerk = useMemo(() => Boolean(process.env.NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY), []); + useEffect(() => { + try { + const stored = window.localStorage.getItem("activeOrganizationId"); + if (stored) { + setActiveOrgId(stored); + } + } catch { + // ignore + } + }, []); + + useEffect(() => { + let cancelled = false; + + async function loadOrganizations() { + if (!hasClerk) return; + if (!isLoaded || !userId) return; + const token = await getToken(); + if (!token) return; + + const data = await fetchOrganizations(token); + if (cancelled) return; + setOrganizations(data?.organizations ?? []); + } + + void loadOrganizations(); + return () => { + cancelled = true; + }; + }, [getToken, hasClerk, isLoaded, userId]); + + useEffect(() => { + if (!organizations.length || !activeOrgId) return; + if (organizations.some((o) => o.id === activeOrgId)) return; + + setActiveOrgId(organizations[0]?.id ?? null); + }, [activeOrgId, organizations]); + useEffect(() => { let cancelled = false; @@ -55,10 +122,18 @@ export function DashboardClient() { return; } - const data = await fetchViewer(token, orgId); + const data = await fetchViewer(token, activeOrgId); if (!cancelled) { setViewer(data); setState(data ? "idle" : "error"); + if (data?.organization?.id) { + setActiveOrgId(data.organization.id); + try { + window.localStorage.setItem("activeOrganizationId", data.organization.id); + } catch { + // ignore + } + } } } @@ -66,7 +141,7 @@ export function DashboardClient() { return () => { cancelled = true; }; - }, [getToken, hasClerk, isLoaded, orgId, userId]); + }, [activeOrgId, getToken, hasClerk, isLoaded, userId]); useEffect(() => { let cancelled = false; @@ -77,7 +152,7 @@ export function DashboardClient() { const token = await getToken(); if (!token) return; - const data = await fetchAuditEvents(token, orgId); + const data = await fetchAuditEvents(token, activeOrgId); if (!cancelled) { setAuditEvents(data?.events ?? []); } @@ -87,7 +162,41 @@ export function DashboardClient() { return () => { cancelled = true; }; - }, [getToken, hasClerk, isLoaded, orgId, userId]); + }, [activeOrgId, getToken, hasClerk, isLoaded, userId]); + + useEffect(() => { + let cancelled = false; + + async function loadMembers() { + if (!hasClerk) return; + if (!isLoaded || !userId) return; + if (!activeOrgId) return; + + setMembersState("loading"); + const token = await getToken(); + if (!token) { + if (!cancelled) setMembersState("error"); + return; + } + + const data = await fetchOrganizationMembers(token, activeOrgId); + if (cancelled) return; + + if (!data) { + setMembers([]); + setMembersState("idle"); + return; + } + + setMembers(data.members); + setMembersState("idle"); + } + + void loadMembers(); + return () => { + cancelled = true; + }; + }, [activeOrgId, getToken, hasClerk, isLoaded, userId]); const openBillingPortal = async () => { if (!hasClerk) { @@ -100,7 +209,7 @@ export function DashboardClient() { setPortalLoading(false); return; } - const session = await createBillingPortalSession({ token, organizationId: orgId }); + const session = await createBillingPortalSession({ token, organizationId: activeOrgId }); setPortalLoading(false); if (session?.url) { window.location.href = session.url; @@ -120,7 +229,7 @@ export function DashboardClient() { const created = await createFileUploadURL({ token, - organizationId: orgId, + organizationId: activeOrgId, filename: uploadFile.name, contentType: uploadFile.type || "application/octet-stream" }); @@ -137,7 +246,7 @@ export function DashboardClient() { method: created.method, headers: { Authorization: `Bearer ${token}`, - ...(orgId ? { "X-Organization-ID": orgId } : {}) + ...(activeOrgId ? { "X-Organization-ID": activeOrgId } : {}) }, body: form }); @@ -152,7 +261,7 @@ export function DashboardClient() { if (ok) { ok = await completeFileUpload({ token, - organizationId: orgId, + organizationId: activeOrgId, fileId: created.fileId, sizeBytes: uploadFile.size }); @@ -163,11 +272,54 @@ export function DashboardClient() { if (ok) { setLastUploadedFileId(created.fileId); analytics.track("file_uploaded", { fileId: created.fileId }); - const updated = await fetchAuditEvents(token, orgId); + const updated = await fetchAuditEvents(token, activeOrgId); setAuditEvents(updated?.events ?? []); } }; + const createTeamOrg = async () => { + if (!hasClerk) return; + if (!newOrgName.trim()) return; + const token = await getToken(); + if (!token) return; + + setCreatingOrg(true); + const created = await createOrganization({ token, name: newOrgName.trim() }); + setCreatingOrg(false); + if (!created?.organization?.id) return; + + setNewOrgName(""); + setActiveOrgId(created.organization.id); + try { + window.localStorage.setItem("activeOrganizationId", created.organization.id); + } catch { + // ignore + } + + const updated = await fetchOrganizations(token); + setOrganizations(updated?.organizations ?? []); + }; + + const createInvite = async () => { + if (!hasClerk) return; + if (!activeOrgId) return; + if (!inviteEmail.trim()) return; + const token = await getToken(); + if (!token) return; + + setInviteLoading(true); + setInviteLink(null); + const resp = await createOrganizationInvite({ + token, + organizationId: activeOrgId, + email: inviteEmail.trim(), + role: inviteRole + }); + setInviteLoading(false); + if (!resp?.acceptUrl) return; + setInviteLink(resp.acceptUrl); + }; + return (
@@ -200,10 +352,121 @@ export function DashboardClient() { {hasClerk && state === "loading" &&

Loading your workspace context...

} {hasClerk && state === "error" &&

Could not load workspace context from API. Ensure backend auth and migrations are configured.

} {hasClerk && viewer && ( -
    -
  • User: {viewer.user.primaryEmail || viewer.user.id}
  • -
  • Organization: {viewer.organization.name || viewer.organization.id}
  • -
  • Role: {viewer.organization.role}
  • +
    +
      +
    • User: {viewer.user.primaryEmail || viewer.user.id}
    • +
    • + Active organization: {viewer.organization.name || viewer.organization.id}{" "} + ({viewer.organization.kind}) +
    • +
    • Role: {viewer.organization.role}
    • +
    + +
    +

    Switch workspace

    + +
    +
    + )} + + + + + + Organizations + + +

    Create a team workspace and invite members.

    +
    + setNewOrgName(e.target.value)} + /> + +
    +
    +
    + + + + Invites + + +

    Invite a teammate (team orgs only, admin+).

    +
    + setInviteEmail(e.target.value)} + /> + + +
    + {inviteLink && ( +
    +

    Invite link

    + {inviteLink} +
    + )} +
    +
    + + + + Members + + + {roleRank(viewer?.organization.role ?? "") < 2 ? ( +

    Members are visible to admins and owners.

    + ) : membersState === "loading" ? ( +

    Loading members...

    + ) : members.length === 0 ? ( +

    No members found.

    + ) : ( +
      + {members.map((m) => ( +
    • + {m.primaryEmail || m.userId}{" "} + ({m.role}) +
    • + ))}
    )}
    @@ -246,7 +509,7 @@ export function DashboardClient() { onClick={async () => { const token = await getToken(); if (!token) return; - const info = await getFileDownloadURL({ token, organizationId: orgId, fileId: lastUploadedFileId }); + const info = await getFileDownloadURL({ token, organizationId: activeOrgId, fileId: lastUploadedFileId }); if (!info) return; if (info.downloadType === "presigned") { @@ -258,7 +521,7 @@ export function DashboardClient() { method: "GET", headers: { Authorization: `Bearer ${token}`, - ...(orgId ? { "X-Organization-ID": orgId } : {}) + ...(activeOrgId ? { "X-Organization-ID": activeOrgId } : {}) } }); if (!response.ok) return; diff --git a/frontend/app/app/invite/invite-client.tsx b/frontend/app/app/invite/invite-client.tsx new file mode 100644 index 0000000..d989760 --- /dev/null +++ b/frontend/app/app/invite/invite-client.tsx @@ -0,0 +1,83 @@ +"use client"; + +import { useAuth } from "@clerk/nextjs"; +import { useRouter, useSearchParams } from "next/navigation"; +import { useEffect, useMemo, useState } from "react"; +import { acceptOrganizationInvite } from "@/lib/api"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { Button } from "@/components/ui/button"; + +type State = "idle" | "accepting" | "accepted" | "error"; + +export function InviteClient() { + const { isLoaded, getToken, userId } = useAuth(); + const router = useRouter(); + const searchParams = useSearchParams(); + const inviteToken = useMemo(() => searchParams.get("token") ?? "", [searchParams]); + const [state, setState] = useState("idle"); + + useEffect(() => { + let cancelled = false; + + async function accept() { + if (!isLoaded || !userId) return; + if (!inviteToken) { + setState("error"); + return; + } + + setState("accepting"); + const token = await getToken(); + if (!token) { + setState("error"); + return; + } + + const accepted = await acceptOrganizationInvite({ token, inviteToken }); + if (!accepted) { + if (!cancelled) setState("error"); + return; + } + + try { + window.localStorage.setItem("activeOrganizationId", accepted.organization.id); + } catch { + // ignore + } + + if (!cancelled) { + setState("accepted"); + router.replace("/app"); + } + } + + void accept(); + return () => { + cancelled = true; + }; + }, [getToken, inviteToken, isLoaded, router, userId]); + + return ( +
    + + + Accept Invite + + + {state === "idle" &&

    Preparing to accept invite…

    } + {state === "accepting" &&

    Accepting your invite…

    } + {state === "accepted" &&

    Invite accepted. Redirecting…

    } + {state === "error" && ( +
    +

    Could not accept this invite. It may be invalid, already used, or intended for a different email.

    + +
    + )} +
    +
    +
    + ); +} + diff --git a/frontend/app/app/invite/page.tsx b/frontend/app/app/invite/page.tsx new file mode 100644 index 0000000..88980e3 --- /dev/null +++ b/frontend/app/app/invite/page.tsx @@ -0,0 +1,6 @@ +import { InviteClient } from "./invite-client"; + +export default function InvitePage() { + return ; +} + diff --git a/frontend/app/integrations-provider.tsx b/frontend/app/integrations-provider.tsx index 5aef687..8be1ddd 100644 --- a/frontend/app/integrations-provider.tsx +++ b/frontend/app/integrations-provider.tsx @@ -74,8 +74,16 @@ function IntegrationsWithClerk({ children }: PropsWithChildren) { } integrations.analytics.identify(userId); - integrations.analytics.group("organization", orgId ?? "none"); - integrations.support.identify({ userId, organizationId: orgId ?? undefined }); + const activeOrgId = (() => { + try { + return window.localStorage.getItem("activeOrganizationId"); + } catch { + return null; + } + })(); + + integrations.analytics.group("organization", activeOrgId ?? orgId ?? "none"); + integrations.support.identify({ userId, organizationId: activeOrgId ?? orgId ?? undefined }); integrations.errorReporting.setUser({ id: userId }); }, [integrations, isLoaded, orgId, userId]); diff --git a/frontend/app/pricing/pricing-client.tsx b/frontend/app/pricing/pricing-client.tsx index 0fd0800..3bb0e02 100644 --- a/frontend/app/pricing/pricing-client.tsx +++ b/frontend/app/pricing/pricing-client.tsx @@ -25,7 +25,7 @@ const PLANS = [ ] as const; export function PricingClient() { - const { getToken, orgId } = useAuth(); + const { getToken } = useAuth(); const [loadingPlan, setLoadingPlan] = useState(null); const hasClerk = Boolean(process.env.NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY); const analytics = createAnalyticsClient((process.env.NEXT_PUBLIC_ANALYTICS_PROVIDER ?? "console") as "console" | "posthog" | "none"); @@ -47,7 +47,13 @@ export function PricingClient() { const session = await createCheckoutSession({ token, planCode, - organizationId: orgId + organizationId: (() => { + try { + return window.localStorage.getItem("activeOrganizationId"); + } catch { + return null; + } + })() }); setLoadingPlan(null); diff --git a/frontend/lib/api.ts b/frontend/lib/api.ts index cf4fd77..53c5aa4 100644 --- a/frontend/lib/api.ts +++ b/frontend/lib/api.ts @@ -17,10 +17,32 @@ export type ViewerResponse = { id: string; name: string; slug: string; + kind: string; role: string; }; }; +export type OrganizationSummary = { + id: string; + name: string; + slug: string; + kind: string; + role: string; +}; + +export type OrganizationsResponse = { + organizations: OrganizationSummary[]; +}; + +export type OrganizationMembersResponse = { + members: Array<{ + userId: string; + primaryEmail: string; + role: string; + joinedAt: string; + }>; +}; + export type AuditEventRecord = { id: string; organizationId: string; @@ -80,6 +102,112 @@ export async function fetchViewer(token: string, organizationId?: string | null) } } +export async function fetchOrganizations(token: string): Promise { + try { + const response = await fetch(`${API_BASE_URL}/api/v1/orgs`, { + method: "GET", + headers: buildAuthHeaders(token) + }); + + if (!response.ok) { + return null; + } + + return (await response.json()) as OrganizationsResponse; + } catch { + return null; + } +} + +export async function createOrganization(params: { + token: string; + name: string; + slug?: string; +}): Promise<{ organization: OrganizationSummary } | null> { + try { + const response = await fetch(`${API_BASE_URL}/api/v1/orgs`, { + method: "POST", + headers: { + ...buildAuthHeaders(params.token), + "Content-Type": "application/json" + }, + body: JSON.stringify({ name: params.name, slug: params.slug ?? "" }) + }); + + if (!response.ok) { + return null; + } + + return (await response.json()) as { organization: OrganizationSummary }; + } catch { + return null; + } +} + +export async function fetchOrganizationMembers(token: string, organizationId: string): Promise { + try { + const response = await fetch(`${API_BASE_URL}/api/v1/org/members`, { + method: "GET", + headers: buildAuthHeaders(token, organizationId) + }); + + if (!response.ok) { + return null; + } + + return (await response.json()) as OrganizationMembersResponse; + } catch { + return null; + } +} + +export async function createOrganizationInvite(params: { + token: string; + organizationId: string; + email: string; + role?: string; +}): Promise<{ acceptUrl: string } | null> { + try { + const response = await fetch(`${API_BASE_URL}/api/v1/org/invites`, { + method: "POST", + headers: { + ...buildAuthHeaders(params.token, params.organizationId), + "Content-Type": "application/json" + }, + body: JSON.stringify({ email: params.email, role: params.role ?? "member" }) + }); + + if (!response.ok) { + return null; + } + + return (await response.json()) as { acceptUrl: string }; + } catch { + return null; + } +} + +export async function acceptOrganizationInvite(params: { token: string; inviteToken: string }): Promise<{ organization: OrganizationSummary } | null> { + try { + const response = await fetch(`${API_BASE_URL}/api/v1/org/invites/accept`, { + method: "POST", + headers: { + ...buildAuthHeaders(params.token), + "Content-Type": "application/json" + }, + body: JSON.stringify({ token: params.inviteToken }) + }); + + if (!response.ok) { + return null; + } + + return (await response.json()) as { organization: OrganizationSummary }; + } catch { + return null; + } +} + export async function fetchAuditEvents(token: string, organizationId?: string | null): Promise { try { const response = await fetch(`${API_BASE_URL}/api/v1/audit/events`, { From 2b5361c887b52d19ad4e920dabb936561e8904f6 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Mon, 23 Feb 2026 00:32:21 +0800 Subject: [PATCH 11/23] ops: add local e2e smoke test --- Makefile | 5 +- README.md | 6 + docs/operations/agent-workflow.md | 2 + scripts/smoke-local.sh | 198 ++++++++++++++++++++++++++++++ 4 files changed, 210 insertions(+), 1 deletion(-) create mode 100644 scripts/smoke-local.sh diff --git a/Makefile b/Makefile index 6ada069..04908c5 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ SHELL := /bin/sh -.PHONY: infra-up infra-down dev-api dev-ui test ci +.PHONY: infra-up infra-down dev-api dev-ui dev-worker test ci smoke-local infra-up: docker compose up -d postgres redis otel-collector @@ -17,6 +17,9 @@ dev-worker: dev-ui: cd frontend && npm run dev +smoke-local: + bash scripts/smoke-local.sh + test: cd backend && go test ./... cd frontend && npm run lint && npm run typecheck diff --git a/README.md b/README.md index 70b8730..0313018 100644 --- a/README.md +++ b/README.md @@ -105,6 +105,12 @@ make infra-up This starts Postgres, Redis, and a local OpenTelemetry collector (for local tracing). +Optional: run a local end-to-end smoke test (infra + api + worker + ui): + +```bash +make smoke-local +``` + Start backend in one terminal: ```bash diff --git a/docs/operations/agent-workflow.md b/docs/operations/agent-workflow.md index d8f9c6f..296bc4f 100644 --- a/docs/operations/agent-workflow.md +++ b/docs/operations/agent-workflow.md @@ -34,6 +34,8 @@ This runbook defines the standard operating flow for AI-agent-assisted developme - `npm run lint` - `npm run typecheck` - `npm run build` for route/config changes +- Local E2E: + - `make smoke-local` (infra + api + worker + ui, plus a basic jobs processing check) - Configuration: - Validate env example files are still consistent and complete. - Validate deployment config changes reflect new variables (for example `render.yaml` for Render backend, and Vercel project env vars for frontend). diff --git a/scripts/smoke-local.sh b/scripts/smoke-local.sh new file mode 100644 index 0000000..233b5af --- /dev/null +++ b/scripts/smoke-local.sh @@ -0,0 +1,198 @@ +#!/usr/bin/env bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +cd "$ROOT_DIR" + +NO_INFRA=0 +DOWN_AFTER=0 +SKIP_UI=0 +SKIP_WORKER=0 +SKIP_MIGRATIONS=0 + +usage() { + cat <<'EOF' +Local end-to-end smoke test (infra + backend + worker + frontend). + +Usage: + bash scripts/smoke-local.sh [options] + +Options: + --no-infra Do not run docker compose up + --down Run docker compose down after the test + --skip-ui Do not start Next.js dev server + --skip-worker Do not start worker or test jobs + --skip-migrations Do not apply SQL migrations +EOF +} + +for arg in "${@:-}"; do + case "$arg" in + --no-infra) NO_INFRA=1 ;; + --down) DOWN_AFTER=1 ;; + --skip-ui) SKIP_UI=1 ;; + --skip-worker) SKIP_WORKER=1 ;; + --skip-migrations) SKIP_MIGRATIONS=1 ;; + -h|--help) usage; exit 0 ;; + *) echo "unknown argument: $arg" >&2; usage; exit 2 ;; + esac +done + +API_PORT="${PORT:-8080}" +API_BASE="http://localhost:${API_PORT}" +UI_BASE="http://localhost:3000" + +DATABASE_URL_DEFAULT="postgres://postgres:postgres@localhost:5432/saas_core_template?sslmode=disable" +REDIS_URL_DEFAULT="redis://localhost:6379/0" + +export DATABASE_URL="${DATABASE_URL:-$DATABASE_URL_DEFAULT}" +export REDIS_URL="${REDIS_URL:-$REDIS_URL_DEFAULT}" +export APP_BASE_URL="${APP_BASE_URL:-$UI_BASE}" +export APP_ENV="${APP_ENV:-development}" +export APP_VERSION="${APP_VERSION:-smoke}" + +export OTEL_TRACES_EXPORTER="${OTEL_TRACES_EXPORTER:-console}" +export ANALYTICS_PROVIDER="${ANALYTICS_PROVIDER:-console}" +export ERROR_REPORTING_PROVIDER="${ERROR_REPORTING_PROVIDER:-console}" +export EMAIL_PROVIDER="${EMAIL_PROVIDER:-console}" + +export FILE_STORAGE_PROVIDER="${FILE_STORAGE_PROVIDER:-disk}" +export FILE_STORAGE_DISK_PATH="${FILE_STORAGE_DISK_PATH:-./.data/uploads}" + +export JOBS_ENABLED="${JOBS_ENABLED:-true}" +export JOBS_WORKER_ID="${JOBS_WORKER_ID:-smoke}" +export JOBS_POLL_INTERVAL="${JOBS_POLL_INTERVAL:-1s}" + +API_PID="" +WORKER_PID="" +UI_PID="" + +cleanup() { + set +e + if [[ -n "${UI_PID}" ]] && kill -0 "${UI_PID}" 2>/dev/null; then + kill "${UI_PID}" 2>/dev/null || true + wait "${UI_PID}" 2>/dev/null || true + fi + if [[ -n "${WORKER_PID}" ]] && kill -0 "${WORKER_PID}" 2>/dev/null; then + kill "${WORKER_PID}" 2>/dev/null || true + wait "${WORKER_PID}" 2>/dev/null || true + fi + if [[ -n "${API_PID}" ]] && kill -0 "${API_PID}" 2>/dev/null; then + kill "${API_PID}" 2>/dev/null || true + wait "${API_PID}" 2>/dev/null || true + fi + + if [[ "${DOWN_AFTER}" == "1" ]]; then + docker compose down >/dev/null 2>&1 || true + fi +} +trap cleanup EXIT + +wait_for() { + local name="$1" + local cmd="$2" + local timeout_seconds="${3:-60}" + local start + start="$(date +%s)" + + while true; do + if eval "${cmd}" >/dev/null 2>&1; then + echo "ok: ${name}" + return 0 + fi + + local now + now="$(date +%s)" + if (( now - start > timeout_seconds )); then + echo "timeout waiting for ${name}" >&2 + return 1 + fi + sleep 1 + done +} + +wait_http_ok() { + local name="$1" + local url="$2" + local timeout_seconds="${3:-60}" + wait_for "${name}" "curl -fsS \"${url}\"" "${timeout_seconds}" +} + +require_bin() { + local bin="$1" + if ! command -v "${bin}" >/dev/null 2>&1; then + echo "missing required binary: ${bin}" >&2 + exit 1 + fi +} + +require_bin curl +require_bin docker + +if [[ "${NO_INFRA}" == "0" ]]; then + echo "==> starting infra (docker compose)" + docker compose up -d postgres redis otel-collector + + wait_for "postgres" "docker compose exec -T postgres pg_isready -U postgres -d saas_core_template" 90 + wait_for "redis" "docker compose exec -T redis redis-cli ping | grep -q PONG" 90 +fi + +if [[ "${SKIP_MIGRATIONS}" == "0" ]]; then + echo "==> applying migrations (inside postgres container)" + for f in backend/migrations/*.up.sql; do + echo " - ${f}" + cat "${f}" | docker compose exec -T postgres psql -v ON_ERROR_STOP=1 -U postgres -d saas_core_template >/dev/null + done +fi + +echo "==> starting api" +( + cd backend + PORT="${API_PORT}" go run ./cmd/api +) & +API_PID="$!" + +wait_http_ok "api /healthz" "${API_BASE}/healthz" 60 +wait_http_ok "api /readyz" "${API_BASE}/readyz" 60 +wait_http_ok "api /api/v1/meta" "${API_BASE}/api/v1/meta" 60 + +if [[ "${SKIP_WORKER}" == "0" ]]; then + echo "==> starting worker" + ( + cd backend + go run ./cmd/worker + ) & + WORKER_PID="$!" + + echo "==> testing jobs (enqueue -> worker processes -> done)" + JOB_ID="$( + docker compose exec -T postgres psql -qtA -U postgres -d saas_core_template -v ON_ERROR_STOP=1 -c \ + "INSERT INTO jobs (type, payload, status, run_at) VALUES ('send_email', '{\"to\":\"smoke@example.com\",\"subject\":\"Smoke test\",\"text\":\"Hello from smoke test.\"}'::jsonb, 'queued', now()) RETURNING id::text;" + )" + JOB_ID="$(echo "${JOB_ID}" | tr -d '[:space:]')" + if [[ -z "${JOB_ID}" ]]; then + echo "failed to enqueue job" >&2 + exit 1 + fi + + wait_for "job ${JOB_ID} done" "docker compose exec -T postgres psql -qtA -U postgres -d saas_core_template -c \"SELECT status FROM jobs WHERE id = '${JOB_ID}'\" | tr -d '[:space:]' | grep -q '^done$'" 30 +fi + +if [[ "${SKIP_UI}" == "0" ]]; then + echo "==> starting ui" + ( + cd frontend + NEXT_PUBLIC_API_URL="${API_BASE}" npm run dev + ) & + UI_PID="$!" + + wait_http_ok "ui /" "${UI_BASE}/" 90 + wait_http_ok "ui /pricing" "${UI_BASE}/pricing" 90 +fi + +echo "==> smoke test passed" +echo "API: ${API_BASE}" +if [[ "${SKIP_UI}" == "0" ]]; then + echo "UI: ${UI_BASE}" +fi + From ac96c7e1e1d6cfd5d5d789de52f0457537738bb6 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Mon, 23 Feb 2026 00:34:28 +0800 Subject: [PATCH 12/23] smoke: fix empty-args parsing --- scripts/smoke-local.sh | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/scripts/smoke-local.sh b/scripts/smoke-local.sh index 233b5af..7e97ba0 100644 --- a/scripts/smoke-local.sh +++ b/scripts/smoke-local.sh @@ -26,7 +26,7 @@ Options: EOF } -for arg in "${@:-}"; do +for arg in "$@"; do case "$arg" in --no-infra) NO_INFRA=1 ;; --down) DOWN_AFTER=1 ;; @@ -129,6 +129,19 @@ require_bin() { require_bin curl require_bin docker +if [[ "${SKIP_UI}" == "0" ]]; then + if command -v node >/dev/null 2>&1; then + NODE_VERSION="$(node -v | tr -d 'v' || true)" + if [[ "${NODE_VERSION}" == 19.* ]]; then + NODE_MINOR="$(echo "${NODE_VERSION}" | cut -d. -f2)" + if [[ "${NODE_MINOR}" -lt 8 ]]; then + echo "Node.js ${NODE_VERSION} is too old for Next.js; use Node 20+ (or run with --skip-ui)." >&2 + exit 1 + fi + fi + fi +fi + if [[ "${NO_INFRA}" == "0" ]]; then echo "==> starting infra (docker compose)" docker compose up -d postgres redis otel-collector @@ -195,4 +208,3 @@ echo "API: ${API_BASE}" if [[ "${SKIP_UI}" == "0" ]]; then echo "UI: ${UI_BASE}" fi - From c48c45d73f33b4f2ed0120a65743bc1e576ffbea Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Mon, 23 Feb 2026 00:37:58 +0800 Subject: [PATCH 13/23] telemetry: fix resource schema conflict --- backend/internal/telemetry/telemetry.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/backend/internal/telemetry/telemetry.go b/backend/internal/telemetry/telemetry.go index 230cd1d..a3442e2 100644 --- a/backend/internal/telemetry/telemetry.go +++ b/backend/internal/telemetry/telemetry.go @@ -32,10 +32,11 @@ func Init(ctx context.Context, cfg Config) (ShutdownFunc, error) { serviceName = "backend" } + baseRes := resource.Default() res, err := resource.Merge( - resource.Default(), + baseRes, resource.NewWithAttributes( - semconv.SchemaURL, + baseRes.SchemaURL(), semconv.ServiceName(serviceName), semconv.ServiceVersion(strings.TrimSpace(cfg.Version)), semconv.DeploymentEnvironment(strings.TrimSpace(cfg.Environment)), From e23310189604e581665ace6c581b58196df3f20c Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Mon, 23 Feb 2026 00:43:23 +0800 Subject: [PATCH 14/23] smoke: use isolated db per run --- Makefile | 2 +- README.md | 8 ++++++++ scripts/smoke-local.sh | 24 ++++++++++++++++++++---- 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 04908c5..206983b 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ dev-ui: cd frontend && npm run dev smoke-local: - bash scripts/smoke-local.sh + bash scripts/smoke-local.sh $(SMOKE_ARGS) test: cd backend && go test ./... diff --git a/README.md b/README.md index 0313018..639d633 100644 --- a/README.md +++ b/README.md @@ -111,6 +111,14 @@ Optional: run a local end-to-end smoke test (infra + api + worker + ui): make smoke-local ``` +If your local Node version can't run Next.js, skip the UI step: + +```bash +make smoke-local SMOKE_ARGS=--skip-ui +``` + +Smoke test uses a separate Postgres database (default `saas_core_template_smoke`) and recreates it each run. Override with `SMOKE_DB_NAME=`. + Start backend in one terminal: ```bash diff --git a/scripts/smoke-local.sh b/scripts/smoke-local.sh index 7e97ba0..0002a39 100644 --- a/scripts/smoke-local.sh +++ b/scripts/smoke-local.sh @@ -42,7 +42,13 @@ API_PORT="${PORT:-8080}" API_BASE="http://localhost:${API_PORT}" UI_BASE="http://localhost:3000" -DATABASE_URL_DEFAULT="postgres://postgres:postgres@localhost:5432/saas_core_template?sslmode=disable" +SMOKE_DB_NAME="${SMOKE_DB_NAME:-saas_core_template_smoke}" +if [[ ! "${SMOKE_DB_NAME}" =~ ^[a-zA-Z0-9_]+$ ]]; then + echo "invalid SMOKE_DB_NAME (expected [a-zA-Z0-9_]+): ${SMOKE_DB_NAME}" >&2 + exit 2 +fi + +DATABASE_URL_DEFAULT="postgres://postgres:postgres@localhost:5432/${SMOKE_DB_NAME}?sslmode=disable" REDIS_URL_DEFAULT="redis://localhost:6379/0" export DATABASE_URL="${DATABASE_URL:-$DATABASE_URL_DEFAULT}" @@ -150,11 +156,21 @@ if [[ "${NO_INFRA}" == "0" ]]; then wait_for "redis" "docker compose exec -T redis redis-cli ping | grep -q PONG" 90 fi +echo "==> preparing smoke database (${SMOKE_DB_NAME})" +docker compose exec -T postgres psql -v ON_ERROR_STOP=1 -U postgres -d postgres >/dev/null < pg_backend_pid(); +DROP DATABASE IF EXISTS ${SMOKE_DB_NAME}; +CREATE DATABASE ${SMOKE_DB_NAME}; +SQL + if [[ "${SKIP_MIGRATIONS}" == "0" ]]; then echo "==> applying migrations (inside postgres container)" for f in backend/migrations/*.up.sql; do echo " - ${f}" - cat "${f}" | docker compose exec -T postgres psql -v ON_ERROR_STOP=1 -U postgres -d saas_core_template >/dev/null + cat "${f}" | docker compose exec -T postgres psql -v ON_ERROR_STOP=1 -U postgres -d "${SMOKE_DB_NAME}" >/dev/null done fi @@ -179,7 +195,7 @@ if [[ "${SKIP_WORKER}" == "0" ]]; then echo "==> testing jobs (enqueue -> worker processes -> done)" JOB_ID="$( - docker compose exec -T postgres psql -qtA -U postgres -d saas_core_template -v ON_ERROR_STOP=1 -c \ + docker compose exec -T postgres psql -qtA -U postgres -d "${SMOKE_DB_NAME}" -v ON_ERROR_STOP=1 -c \ "INSERT INTO jobs (type, payload, status, run_at) VALUES ('send_email', '{\"to\":\"smoke@example.com\",\"subject\":\"Smoke test\",\"text\":\"Hello from smoke test.\"}'::jsonb, 'queued', now()) RETURNING id::text;" )" JOB_ID="$(echo "${JOB_ID}" | tr -d '[:space:]')" @@ -188,7 +204,7 @@ if [[ "${SKIP_WORKER}" == "0" ]]; then exit 1 fi - wait_for "job ${JOB_ID} done" "docker compose exec -T postgres psql -qtA -U postgres -d saas_core_template -c \"SELECT status FROM jobs WHERE id = '${JOB_ID}'\" | tr -d '[:space:]' | grep -q '^done$'" 30 + wait_for "job ${JOB_ID} done" "docker compose exec -T postgres psql -qtA -U postgres -d \"${SMOKE_DB_NAME}\" -c \"SELECT status FROM jobs WHERE id = '${JOB_ID}'\" | tr -d '[:space:]' | grep -q '^done$'" 30 fi if [[ "${SKIP_UI}" == "0" ]]; then From b6c47a4f32fe2532401b2ff66e4eb1bb26b1534e Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Mon, 23 Feb 2026 00:49:58 +0800 Subject: [PATCH 15/23] db: add migrate CLI --- Makefile | 8 +- README.md | 11 +- backend/cmd/migrate/main.go | 243 ++++++++++++++++++ docs/operations/agent-workflow.md | 3 + docs/operations/production-setup-checklist.md | 11 +- scripts/smoke-local.sh | 10 +- 6 files changed, 277 insertions(+), 9 deletions(-) create mode 100644 backend/cmd/migrate/main.go diff --git a/Makefile b/Makefile index 206983b..027b5f8 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ SHELL := /bin/sh -.PHONY: infra-up infra-down dev-api dev-ui dev-worker test ci smoke-local +.PHONY: infra-up infra-down dev-api dev-ui dev-worker migrate-up migrate-status test ci smoke-local infra-up: docker compose up -d postgres redis otel-collector @@ -17,6 +17,12 @@ dev-worker: dev-ui: cd frontend && npm run dev +migrate-up: + cd backend && go run ./cmd/migrate up + +migrate-status: + cd backend && go run ./cmd/migrate status + smoke-local: bash scripts/smoke-local.sh $(SMOKE_ARGS) diff --git a/README.md b/README.md index 639d633..b4a9b96 100644 --- a/README.md +++ b/README.md @@ -87,8 +87,15 @@ Core variables: SQL migrations live in `backend/migrations/`. -Apply them with your preferred migration tool before using auth/billing endpoints. -Initial migration files: +Apply them before using auth/billing endpoints. + +Recommended: run the built-in migration CLI (tracks applied migrations in `schema_migrations`): + +```bash +make migrate-up +``` + +Initial migration files (applied in order): - `backend/migrations/0001_identity_tenancy_billing.up.sql` - `backend/migrations/0002_jobs_audit_files.up.sql` diff --git a/backend/cmd/migrate/main.go b/backend/cmd/migrate/main.go new file mode 100644 index 0000000..ac22759 --- /dev/null +++ b/backend/cmd/migrate/main.go @@ -0,0 +1,243 @@ +package main + +import ( + "context" + "flag" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/jackc/pgx/v5" +) + +func main() { + ctx := context.Background() + + if len(os.Args) < 2 { + usage() + os.Exit(2) + } + + cmd := strings.TrimSpace(os.Args[1]) + switch cmd { + case "up": + fs := flag.NewFlagSet("up", flag.ExitOnError) + dir := fs.String("dir", "./migrations", "migrations directory containing *.up.sql") + _ = fs.Parse(os.Args[2:]) + + databaseURL := strings.TrimSpace(os.Getenv("DATABASE_URL")) + if databaseURL == "" { + fatalf("DATABASE_URL is required") + } + + conn, err := pgx.Connect(ctx, databaseURL) + if err != nil { + fatalf("connect: %v", err) + } + defer conn.Close(ctx) + + if err := ensureSchemaMigrations(ctx, conn); err != nil { + fatalf("ensure schema_migrations: %v", err) + } + + files, err := listMigrationFiles(*dir, ".up.sql") + if err != nil { + fatalf("list migrations: %v", err) + } + + appliedCount := 0 + for _, path := range files { + filename := filepath.Base(path) + applied, err := isApplied(ctx, conn, filename) + if err != nil { + fatalf("check applied %s: %v", filename, err) + } + if applied { + continue + } + + sqlBytes, err := os.ReadFile(path) + if err != nil { + fatalf("read %s: %v", filename, err) + } + + if err := applyMigration(ctx, conn, filename, string(sqlBytes)); err != nil { + fatalf("apply %s: %v", filename, err) + } + + fmt.Printf("applied %s\n", filename) + appliedCount++ + } + + fmt.Printf("done (%d applied)\n", appliedCount) + case "status": + fs := flag.NewFlagSet("status", flag.ExitOnError) + dir := fs.String("dir", "./migrations", "migrations directory containing *.up.sql") + _ = fs.Parse(os.Args[2:]) + + databaseURL := strings.TrimSpace(os.Getenv("DATABASE_URL")) + if databaseURL == "" { + fatalf("DATABASE_URL is required") + } + + conn, err := pgx.Connect(ctx, databaseURL) + if err != nil { + fatalf("connect: %v", err) + } + defer conn.Close(ctx) + + if err := ensureSchemaMigrations(ctx, conn); err != nil { + fatalf("ensure schema_migrations: %v", err) + } + + files, err := listMigrationFiles(*dir, ".up.sql") + if err != nil { + fatalf("list migrations: %v", err) + } + + applied, err := listApplied(ctx, conn) + if err != nil { + fatalf("list applied: %v", err) + } + + appliedNames := mapKeys(applied) + fmt.Printf("applied migrations (%d):\n", len(appliedNames)) + for _, name := range appliedNames { + fmt.Printf(" - %s\n", name) + } + + pendingCount := 0 + for _, path := range files { + name := filepath.Base(path) + if applied[name] { + continue + } + pendingCount++ + } + + fmt.Printf("pending migrations (%d):\n", pendingCount) + for _, path := range files { + name := filepath.Base(path) + if applied[name] { + continue + } + fmt.Printf(" - %s\n", name) + } + default: + usage() + os.Exit(2) + } +} + +func usage() { + fmt.Fprintln(os.Stderr, "Usage:") + fmt.Fprintln(os.Stderr, " go run ./cmd/migrate up [-dir ./migrations]") + fmt.Fprintln(os.Stderr, " go run ./cmd/migrate status [-dir ./migrations]") +} + +func fatalf(format string, args ...any) { + fmt.Fprintf(os.Stderr, format+"\n", args...) + os.Exit(1) +} + +func ensureSchemaMigrations(ctx context.Context, conn *pgx.Conn) error { + _, err := conn.Exec(ctx, ` + CREATE TABLE IF NOT EXISTS schema_migrations ( + filename TEXT PRIMARY KEY, + applied_at TIMESTAMPTZ NOT NULL DEFAULT now() + ) + `) + return err +} + +func listMigrationFiles(dir string, suffix string) ([]string, error) { + glob := filepath.Join(dir, "*"+suffix) + matches, err := filepath.Glob(glob) + if err != nil { + return nil, err + } + sort.Strings(matches) + return matches, nil +} + +func isApplied(ctx context.Context, conn *pgx.Conn, filename string) (bool, error) { + var exists bool + if err := conn.QueryRow(ctx, `SELECT EXISTS (SELECT 1 FROM schema_migrations WHERE filename = $1)`, filename).Scan(&exists); err != nil { + return false, err + } + return exists, nil +} + +func listApplied(ctx context.Context, conn *pgx.Conn) (map[string]bool, error) { + rows, err := conn.Query(ctx, `SELECT filename FROM schema_migrations ORDER BY filename ASC`) + if err != nil { + return nil, err + } + defer rows.Close() + + out := map[string]bool{} + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + return nil, err + } + out[name] = true + } + return out, rows.Err() +} + +func applyMigration(ctx context.Context, conn *pgx.Conn, filename string, sql string) error { + sql = strings.TrimSpace(sql) + if sql == "" { + return fmt.Errorf("empty migration") + } + + wrapped := strings.Builder{} + wrapped.WriteString("BEGIN;\n") + wrapped.WriteString(sql) + if !strings.HasSuffix(sql, ";") { + wrapped.WriteString(";\n") + } else { + wrapped.WriteString("\n") + } + wrapped.WriteString("INSERT INTO schema_migrations (filename, applied_at) VALUES ('") + wrapped.WriteString(strings.ReplaceAll(filename, "'", "''")) + wrapped.WriteString("', now()) ON CONFLICT (filename) DO NOTHING;\n") + wrapped.WriteString("COMMIT;\n") + + ctx, cancel := context.WithTimeout(ctx, 60*time.Second) + defer cancel() + + err := execMulti(ctx, conn, wrapped.String()) + if err == nil { + return nil + } + + _ = execMulti(context.Background(), conn, "ROLLBACK;") + return err +} + +func execMulti(ctx context.Context, conn *pgx.Conn, sql string) error { + results, err := conn.PgConn().Exec(ctx, sql).ReadAll() + if err != nil { + return err + } + for _, res := range results { + if res.Err != nil { + return res.Err + } + } + return nil +} + +func mapKeys(m map[string]bool) []string { + out := make([]string, 0, len(m)) + for k := range m { + out = append(out, k) + } + sort.Strings(out) + return out +} diff --git a/docs/operations/agent-workflow.md b/docs/operations/agent-workflow.md index 296bc4f..f40b2ce 100644 --- a/docs/operations/agent-workflow.md +++ b/docs/operations/agent-workflow.md @@ -30,6 +30,9 @@ This runbook defines the standard operating flow for AI-agent-assisted developme - Backend: - `gofmt` on changed files - `go test ./...` +- Migrations: + - `make migrate-status` (optional) + - `make migrate-up` (when migrations change) - Frontend: - `npm run lint` - `npm run typecheck` diff --git a/docs/operations/production-setup-checklist.md b/docs/operations/production-setup-checklist.md index 328e583..ad7ec5f 100644 --- a/docs/operations/production-setup-checklist.md +++ b/docs/operations/production-setup-checklist.md @@ -85,7 +85,16 @@ The worker service runs background jobs (emails, future async tasks). Configure: ### Database migration (Render Postgres) -Apply migrations in order against Render Postgres before using auth/billing/files endpoints: +Apply migrations against Render Postgres before using auth/billing/files endpoints. + +Recommended (tracks applied migrations in `schema_migrations`): + +```bash +cd backend +DATABASE_URL="" go run ./cmd/migrate up -dir ./migrations +``` + +Migrations (applied in order): - `backend/migrations/0001_identity_tenancy_billing.up.sql` - `backend/migrations/0002_jobs_audit_files.up.sql` diff --git a/scripts/smoke-local.sh b/scripts/smoke-local.sh index 0002a39..622973d 100644 --- a/scripts/smoke-local.sh +++ b/scripts/smoke-local.sh @@ -167,11 +167,11 @@ CREATE DATABASE ${SMOKE_DB_NAME}; SQL if [[ "${SKIP_MIGRATIONS}" == "0" ]]; then - echo "==> applying migrations (inside postgres container)" - for f in backend/migrations/*.up.sql; do - echo " - ${f}" - cat "${f}" | docker compose exec -T postgres psql -v ON_ERROR_STOP=1 -U postgres -d "${SMOKE_DB_NAME}" >/dev/null - done + echo "==> applying migrations" + ( + cd backend + DATABASE_URL="${DATABASE_URL}" go run ./cmd/migrate up -dir ./migrations + ) fi echo "==> starting api" From a170a73957970eeae3b8bb91eec4c03c4fd275b0 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Mon, 23 Feb 2026 00:53:44 +0800 Subject: [PATCH 16/23] smoke: wait for postgres stability --- scripts/smoke-local.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/smoke-local.sh b/scripts/smoke-local.sh index 622973d..553f99d 100644 --- a/scripts/smoke-local.sh +++ b/scripts/smoke-local.sh @@ -152,11 +152,13 @@ if [[ "${NO_INFRA}" == "0" ]]; then echo "==> starting infra (docker compose)" docker compose up -d postgres redis otel-collector - wait_for "postgres" "docker compose exec -T postgres pg_isready -U postgres -d saas_core_template" 90 + wait_for "postgres" "docker compose exec -T postgres pg_isready -U postgres -d postgres" 90 + wait_for "postgres sql" "docker compose exec -T postgres psql -qtA -U postgres -d postgres -c 'SELECT 1' | tr -d '[:space:]' | grep -q '^1$'" 90 wait_for "redis" "docker compose exec -T redis redis-cli ping | grep -q PONG" 90 fi echo "==> preparing smoke database (${SMOKE_DB_NAME})" +wait_for "postgres ready for ddl" "docker compose exec -T postgres psql -qtA -U postgres -d postgres -c 'SELECT 1' | tr -d '[:space:]' | grep -q '^1$'" 60 docker compose exec -T postgres psql -v ON_ERROR_STOP=1 -U postgres -d postgres >/dev/null < Date: Mon, 23 Feb 2026 01:41:20 +0800 Subject: [PATCH 17/23] smoke: fix port collisions and cleanup --- scripts/smoke-local.sh | 122 ++++++++++++++++++++++++++++++++++------- 1 file changed, 101 insertions(+), 21 deletions(-) diff --git a/scripts/smoke-local.sh b/scripts/smoke-local.sh index 553f99d..0f73798 100644 --- a/scripts/smoke-local.sh +++ b/scripts/smoke-local.sh @@ -1,6 +1,10 @@ #!/usr/bin/env bash set -euo pipefail +if [[ -z "${BASH_VERSION:-}" ]]; then + exec bash "$0" "$@" +fi + ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" cd "$ROOT_DIR" @@ -39,7 +43,7 @@ for arg in "$@"; do done API_PORT="${PORT:-8080}" -API_BASE="http://localhost:${API_PORT}" +API_BASE="" UI_BASE="http://localhost:3000" SMOKE_DB_NAME="${SMOKE_DB_NAME:-saas_core_template_smoke}" @@ -48,6 +52,37 @@ if [[ ! "${SMOKE_DB_NAME}" =~ ^[a-zA-Z0-9_]+$ ]]; then exit 2 fi +port_is_open() { + local port="$1" + (echo >/dev/tcp/127.0.0.1/"${port}") >/dev/null 2>&1 +} + +pick_free_port() { + local base_port="$1" + local max_tries="${2:-25}" + local port="${base_port}" + local i=0 + + while (( i < max_tries )); do + if port_is_open "${port}"; then + port=$((port + 1)) + i=$((i + 1)) + continue + fi + echo "${port}" + return 0 + done + + return 1 +} + +API_PORT="$(pick_free_port "${API_PORT}" 25 || true)" +if [[ -z "${API_PORT}" ]]; then + echo "could not find a free port starting from ${PORT:-8080}" >&2 + exit 1 +fi +API_BASE="http://localhost:${API_PORT}" + DATABASE_URL_DEFAULT="postgres://postgres:postgres@localhost:5432/${SMOKE_DB_NAME}?sslmode=disable" REDIS_URL_DEFAULT="redis://localhost:6379/0" @@ -57,7 +92,7 @@ export APP_BASE_URL="${APP_BASE_URL:-$UI_BASE}" export APP_ENV="${APP_ENV:-development}" export APP_VERSION="${APP_VERSION:-smoke}" -export OTEL_TRACES_EXPORTER="${OTEL_TRACES_EXPORTER:-console}" +export OTEL_TRACES_EXPORTER="${OTEL_TRACES_EXPORTER:-none}" export ANALYTICS_PROVIDER="${ANALYTICS_PROVIDER:-console}" export ERROR_REPORTING_PROVIDER="${ERROR_REPORTING_PROVIDER:-console}" export EMAIL_PROVIDER="${EMAIL_PROVIDER:-console}" @@ -73,20 +108,71 @@ API_PID="" WORKER_PID="" UI_PID="" -cleanup() { - set +e - if [[ -n "${UI_PID}" ]] && kill -0 "${UI_PID}" 2>/dev/null; then - kill "${UI_PID}" 2>/dev/null || true - wait "${UI_PID}" 2>/dev/null || true +SCRIPT_PGID="$(ps -o pgid= $$ 2>/dev/null | tr -d '[:space:]' || true)" + +start_bg() { + if command -v setsid >/dev/null 2>&1; then + setsid "$@" & + else + "$@" & fi - if [[ -n "${WORKER_PID}" ]] && kill -0 "${WORKER_PID}" 2>/dev/null; then - kill "${WORKER_PID}" 2>/dev/null || true - wait "${WORKER_PID}" 2>/dev/null || true +} + +kill_process_group() { + local pid="$1" + if [[ -z "${pid}" ]]; then + return 0 + fi + if ! kill -0 "${pid}" 2>/dev/null; then + return 0 fi - if [[ -n "${API_PID}" ]] && kill -0 "${API_PID}" 2>/dev/null; then - kill "${API_PID}" 2>/dev/null || true - wait "${API_PID}" 2>/dev/null || true + + local pgid + pgid="$(ps -o pgid= "${pid}" 2>/dev/null | tr -d '[:space:]' || true)" + if [[ -n "${pgid}" && -n "${SCRIPT_PGID}" && "${pgid}" == "${SCRIPT_PGID}" ]]; then + kill -TERM "${pid}" 2>/dev/null || true + elif [[ -n "${pgid}" ]]; then + kill -TERM -- "-${pgid}" 2>/dev/null || true + else + kill -TERM "${pid}" 2>/dev/null || true fi +} + +wait_gone() { + local pid="$1" + local timeout_seconds="${2:-5}" + local start + start="$(date +%s)" + + while kill -0 "${pid}" 2>/dev/null; do + local now + now="$(date +%s)" + if (( now - start > timeout_seconds )); then + return 1 + fi + sleep 1 + done + return 0 +} + +cleanup() { + set +e + kill_process_group "${UI_PID}" + kill_process_group "${WORKER_PID}" + kill_process_group "${API_PID}" + + for pid in "${UI_PID}" "${WORKER_PID}" "${API_PID}"; do + if [[ -z "${pid}" ]]; then + continue + fi + if wait_gone "${pid}" 5; then + wait "${pid}" 2>/dev/null || true + continue + fi + + kill -KILL "${pid}" 2>/dev/null || true + wait "${pid}" 2>/dev/null || true + done if [[ "${DOWN_AFTER}" == "1" ]]; then docker compose down >/dev/null 2>&1 || true @@ -177,10 +263,7 @@ if [[ "${SKIP_MIGRATIONS}" == "0" ]]; then fi echo "==> starting api" -( - cd backend - PORT="${API_PORT}" go run ./cmd/api -) & +start_bg bash -c "cd backend && PORT='${API_PORT}' exec go run ./cmd/api" API_PID="$!" wait_http_ok "api /healthz" "${API_BASE}/healthz" 60 @@ -189,10 +272,7 @@ wait_http_ok "api /api/v1/meta" "${API_BASE}/api/v1/meta" 60 if [[ "${SKIP_WORKER}" == "0" ]]; then echo "==> starting worker" - ( - cd backend - go run ./cmd/worker - ) & + start_bg bash -c "cd backend && exec go run ./cmd/worker" WORKER_PID="$!" echo "==> testing jobs (enqueue -> worker processes -> done)" From 3ad020eabc4ad125267f32f6c6c42940192f64ce Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Mon, 23 Feb 2026 02:04:55 +0800 Subject: [PATCH 18/23] smoke: run compiled binaries for cleanup --- scripts/smoke-local.sh | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/scripts/smoke-local.sh b/scripts/smoke-local.sh index 0f73798..68fec8f 100644 --- a/scripts/smoke-local.sh +++ b/scripts/smoke-local.sh @@ -108,16 +108,6 @@ API_PID="" WORKER_PID="" UI_PID="" -SCRIPT_PGID="$(ps -o pgid= $$ 2>/dev/null | tr -d '[:space:]' || true)" - -start_bg() { - if command -v setsid >/dev/null 2>&1; then - setsid "$@" & - else - "$@" & - fi -} - kill_process_group() { local pid="$1" if [[ -z "${pid}" ]]; then @@ -127,15 +117,7 @@ kill_process_group() { return 0 fi - local pgid - pgid="$(ps -o pgid= "${pid}" 2>/dev/null | tr -d '[:space:]' || true)" - if [[ -n "${pgid}" && -n "${SCRIPT_PGID}" && "${pgid}" == "${SCRIPT_PGID}" ]]; then - kill -TERM "${pid}" 2>/dev/null || true - elif [[ -n "${pgid}" ]]; then - kill -TERM -- "-${pgid}" 2>/dev/null || true - else - kill -TERM "${pid}" 2>/dev/null || true - fi + kill -TERM "${pid}" 2>/dev/null || true } wait_gone() { @@ -263,7 +245,16 @@ if [[ "${SKIP_MIGRATIONS}" == "0" ]]; then fi echo "==> starting api" -start_bg bash -c "cd backend && PORT='${API_PORT}' exec go run ./cmd/api" +SMOKE_BIN_DIR="${SMOKE_BIN_DIR:-/tmp/saas-core-template-smoke-bin}" +mkdir -p "${SMOKE_BIN_DIR}" + +echo "==> building api + worker binaries" +(cd backend && go build -o "${SMOKE_BIN_DIR}/api" ./cmd/api) +if [[ "${SKIP_WORKER}" == "0" ]]; then + (cd backend && go build -o "${SMOKE_BIN_DIR}/worker" ./cmd/worker) +fi + +PORT="${API_PORT}" "${SMOKE_BIN_DIR}/api" & API_PID="$!" wait_http_ok "api /healthz" "${API_BASE}/healthz" 60 @@ -272,7 +263,7 @@ wait_http_ok "api /api/v1/meta" "${API_BASE}/api/v1/meta" 60 if [[ "${SKIP_WORKER}" == "0" ]]; then echo "==> starting worker" - start_bg bash -c "cd backend && exec go run ./cmd/worker" + "${SMOKE_BIN_DIR}/worker" & WORKER_PID="$!" echo "==> testing jobs (enqueue -> worker processes -> done)" From 552272159f675943b73f41b1095f3965e3512072 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Mon, 23 Feb 2026 23:03:37 +0800 Subject: [PATCH 19/23] docs: add template gaps and upstreaming guide --- docs/README.md | 2 + .../template-gap-and-upstreaming.md | 111 ++++++++++++++++++ 2 files changed, 113 insertions(+) create mode 100644 docs/operations/template-gap-and-upstreaming.md diff --git a/docs/README.md b/docs/README.md index 85466f8..713c043 100644 --- a/docs/README.md +++ b/docs/README.md @@ -50,6 +50,8 @@ This directory contains implementation playbooks for contributors and AI agents. - Local console error capture and managed Sentry configuration. - [Support (Crisp)](operations/support.md) - Optional support widget integration and provider swaps. +- [Template Gaps and Upstreaming](operations/template-gap-and-upstreaming.md) + - Backlog for template completion and guidance for upstreaming from child projects. - [Provider Migration Playbook](operations/provider-migration-playbook.md) - Dual-run, just-in-time migration, and cutover strategy. - [Agent Workflow Runbook](operations/agent-workflow.md) diff --git a/docs/operations/template-gap-and-upstreaming.md b/docs/operations/template-gap-and-upstreaming.md new file mode 100644 index 0000000..75a1296 --- /dev/null +++ b/docs/operations/template-gap-and-upstreaming.md @@ -0,0 +1,111 @@ +# Template Gaps and Upstreaming Guide + +This document serves two purposes: + +1) Track what is still missing in `saas-core-template`. +2) Provide instructions for agents working in *child projects* (projects created from this template) on how to upstream improvements back into the template when appropriate. + +## Current status (what works today) + +- Local infra via Docker Compose (`postgres`, `redis`, `otel-collector`). +- Backend API + worker in Go; Postgres-backed jobs, email adapter, file uploads, audit logs. +- App-owned organizations with personal workspace + team orgs, invites, and RBAC (member/admin/owner). +- Frontend on Next.js (Vercel target) with shadcn/ui baseline; minimal org switcher + invite acceptance flow. +- Local smoke test script: `make smoke-local` (use `SMOKE_ARGS=--skip-ui` if your Node version can’t run Next.js). +- Migration runner: `make migrate-up` (tracks applied migrations in `schema_migrations`). + +## Remaining work (template backlog) + +### Local E2E (developer experience) + +- Full smoke test including UI (`make smoke-local` without `--skip-ui`) requires Node 20+ on the developer machine. +- Add a “prod smoke” runbook/script to verify Vercel + Render wiring and provider integrations end-to-end. + +### Security hardening + +- Replace permissive CORS (`*`) with an allowlist for production. +- Add rate limiting / abuse protections for public endpoints. +- Add request size limits consistently (some exist implicitly, but not centrally enforced). + +### RBAC and authorization coverage + +- Keep expanding RBAC coverage as new endpoints are added (deny-by-default, role-gated mutations, sensitive reads). +- Add dedicated org settings/member management pages in the UI with clear role gating. + +### Production migration automation + +- Decide where `backend/cmd/migrate up` runs in production (manual, one-off Render job, or deployment step) and document the exact procedure. + +## When a child project should upstream a change + +Upstream changes when they are **template-shaped**: + +- Cross-cutting infrastructure (auth boundaries, tenancy, billing wiring, observability, migrations, background jobs). +- Generic product modules that most SaaS apps need (org management, audit logging, file uploads, email, analytics/error reporting/support widgets). +- Developer-experience improvements (smoke tests, scripts, docs, safer defaults). +- Provider portability improvements (adapters/interfaces, migration playbooks). + +Do **not** upstream: + +- Business logic, domain models, or product-specific UI flows. +- Customer-specific integrations, hardcoded pricing, or bespoke schemas. +- Anything requiring paid providers by default (managed providers must remain optional and swappable). + +## Upstreaming workflow (agent instructions for child projects) + +### 1) Triage: is this template-worthy? + +Use this quick filter: + +- Would I want this in my *next* SaaS experiment? +- Can it ship with “console/noop” defaults and work locally without paid accounts? +- Does it keep provider-specific types out of domain models? +- Does it preserve tenant scoping and deny-by-default authorization? + +If “yes” to all, it’s a good upstream candidate. + +### 2) Keep changes portable + +When implementing in the child project: + +- Put provider-specific code behind interfaces/adapters. +- Add feature toggles / env-driven provider selection (for example `*_PROVIDER=console|managed|none`). +- Keep migrations additive and safe; avoid destructive schema changes unless necessary. +- Avoid coupling to the child project name (do not rename module paths back in the template). + +### 3) Package the upstream change + +Before upstreaming: + +- Extract the generic portion into a clean commit (or a short sequence of commits) with no business logic. +- Add/update docs in the template: + - Architecture changes → `docs/architecture/*` + - Ops/runbooks → `docs/operations/*` + - Any new env vars → `.env.example`, `backend/.env.example`, `frontend/.env.example` +- Add/extend smoke coverage where possible: + - local: `scripts/smoke-local.sh` + - migrations: `backend/cmd/migrate` +- Run validations (or ensure CI will cover them). + +### 4) Upstream mechanics + +Recommended approach: + +1. In the child repo, create a branch containing only template-worthy commits. +2. Apply the same commits to the template repo: + - either by `git cherry-pick` onto a `feature/*` branch in the template repo, + - or by opening a PR from the child repo’s branch (if you maintain a remote that can target the template). +3. In the template repo: + - ensure no secrets are added + - ensure docs are updated + - ensure local smoke still passes (use `SMOKE_ARGS=--skip-ui` if needed) + +## Notes for template initialization + +Each child project should run the initialization protocol once: + +- `./scripts/init-template.sh ""` +- Verify rename integrity: `rg "saas-core-template|saas_core_template"` + +This template is intentionally “copy forward”; child projects are not guaranteed downstream updates. The upstreaming process above is the mechanism for voluntarily contributing generic improvements back to the template for future projects. + From acf2338bf150f4298b4ab340473cd41c30899c53 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Mon, 23 Feb 2026 23:12:43 +0800 Subject: [PATCH 20/23] frontend: make typecheck resilient to .next --- frontend/package.json | 2 +- frontend/tsconfig.typecheck.json | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 frontend/tsconfig.typecheck.json diff --git a/frontend/package.json b/frontend/package.json index 01c6de0..0037a04 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -7,7 +7,7 @@ "build": "next build", "start": "sh -c 'next start -p ${PORT:-3000}'", "lint": "next lint", - "typecheck": "tsc --noEmit" + "typecheck": "tsc --noEmit -p tsconfig.typecheck.json" }, "dependencies": { "@clerk/nextjs": "^6.37.5", diff --git a/frontend/tsconfig.typecheck.json b/frontend/tsconfig.typecheck.json new file mode 100644 index 0000000..37d627f --- /dev/null +++ b/frontend/tsconfig.typecheck.json @@ -0,0 +1,6 @@ +{ + "extends": "./tsconfig.json", + "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"], + "exclude": ["node_modules", ".next"] +} + From 36fdd2eb28b426762e74c665c1c2d50cc1b7b42f Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Mon, 23 Feb 2026 23:24:35 +0800 Subject: [PATCH 21/23] frontend: fix lint on Node 19 --- frontend/eslint.config.mjs | 19 +++++++++++++++++++ frontend/lib/i18n/translate.ts | 10 ++++++++-- frontend/lib/integrations/analytics.ts | 2 -- frontend/lib/integrations/error-reporting.ts | 12 +++++++----- frontend/lib/integrations/support.ts | 3 +-- frontend/package.json | 2 +- frontend/tailwind.config.ts | 4 ++-- 7 files changed, 38 insertions(+), 14 deletions(-) create mode 100644 frontend/eslint.config.mjs diff --git a/frontend/eslint.config.mjs b/frontend/eslint.config.mjs new file mode 100644 index 0000000..1b5d0ec --- /dev/null +++ b/frontend/eslint.config.mjs @@ -0,0 +1,19 @@ +import path from "node:path"; +import { fileURLToPath } from "node:url"; +import { FlatCompat } from "@eslint/eslintrc"; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +const compat = new FlatCompat({ + baseDirectory: __dirname +}); + +const config = [ + { + ignores: [".next/**", "node_modules/**", "next-env.d.ts"] + }, + ...compat.extends("next/core-web-vitals", "next/typescript") +]; + +export default config; diff --git a/frontend/lib/i18n/translate.ts b/frontend/lib/i18n/translate.ts index 07ff101..6261e58 100644 --- a/frontend/lib/i18n/translate.ts +++ b/frontend/lib/i18n/translate.ts @@ -2,9 +2,15 @@ import type { Messages } from "./messages"; export function t(messages: Messages, key: string): string { const parts = key.split("."); - let current: any = messages; + let current: unknown = messages; for (const part of parts) { - current = current?.[part]; + if (!current || typeof current !== "object") { + current = undefined; + break; + } + + const record = current as Record; + current = record[part]; } if (typeof current === "string") { diff --git a/frontend/lib/integrations/analytics.ts b/frontend/lib/integrations/analytics.ts index 8ee5a84..cde02fb 100644 --- a/frontend/lib/integrations/analytics.ts +++ b/frontend/lib/integrations/analytics.ts @@ -9,7 +9,6 @@ export type AnalyticsClient = { function log(method: string, event: string, props?: Record) { const payload = props ? JSON.stringify(props) : ""; - // eslint-disable-next-line no-console console.info(`[analytics:${method}] ${event}`, payload); } @@ -60,4 +59,3 @@ declare global { }; } } - diff --git a/frontend/lib/integrations/error-reporting.ts b/frontend/lib/integrations/error-reporting.ts index b0eef88..637225d 100644 --- a/frontend/lib/integrations/error-reporting.ts +++ b/frontend/lib/integrations/error-reporting.ts @@ -19,11 +19,9 @@ export function createErrorReportingClient(provider: ErrorReportingProvider): Er return { init: () => {}, captureException: (error, context) => { - // eslint-disable-next-line no-console console.error("[error-reporting]", error, context); }, setUser: (user) => { - // eslint-disable-next-line no-console console.info("[error-reporting:user]", user); } }; @@ -46,7 +44,7 @@ export function createErrorReportingClient(provider: ErrorReportingProvider): Er }); }, captureException: (error, context) => { - window.Sentry?.withScope?.((scope: any) => { + window.Sentry?.withScope?.((scope) => { if (context) { scope.setContext?.("extra", context); } @@ -81,7 +79,11 @@ function maybeLoadSentryBrowser() { declare global { interface Window { - Sentry?: any; + Sentry?: { + init?: (config: { dsn: string; environment?: string; release?: string }) => void; + withScope?: (fn: (scope: { setContext?: (name: string, data: Record) => void }) => void) => void; + captureException?: (error: unknown) => void; + setUser?: (user: { id: string } | null) => void; + }; } } - diff --git a/frontend/lib/integrations/support.ts b/frontend/lib/integrations/support.ts index cfe620d..4560bef 100644 --- a/frontend/lib/integrations/support.ts +++ b/frontend/lib/integrations/support.ts @@ -36,8 +36,7 @@ export function createSupportClient(provider: SupportProvider): SupportClient { declare global { interface Window { - $crisp?: any[]; + $crisp?: Array; CRISP_WEBSITE_ID?: string; } } - diff --git a/frontend/package.json b/frontend/package.json index 0037a04..5b5050f 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -6,7 +6,7 @@ "dev": "next dev -p 3000", "build": "next build", "start": "sh -c 'next start -p ${PORT:-3000}'", - "lint": "next lint", + "lint": "eslint .", "typecheck": "tsc --noEmit -p tsconfig.typecheck.json" }, "dependencies": { diff --git a/frontend/tailwind.config.ts b/frontend/tailwind.config.ts index afd1352..e0da013 100644 --- a/frontend/tailwind.config.ts +++ b/frontend/tailwind.config.ts @@ -1,4 +1,5 @@ import type { Config } from "tailwindcss"; +import tailwindAnimate from "tailwindcss-animate"; const config: Config = { darkMode: ["class"], @@ -50,8 +51,7 @@ const config: Config = { } } }, - plugins: [require("tailwindcss-animate")] + plugins: [tailwindAnimate] }; export default config; - From 89442fff93d6aa2734cc1a03006a49a6912cefd0 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Mon, 23 Feb 2026 23:52:50 +0800 Subject: [PATCH 22/23] chore: add .nvmrc for Node 20 --- .nvmrc | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .nvmrc diff --git a/.nvmrc b/.nvmrc new file mode 100644 index 0000000..35f4978 --- /dev/null +++ b/.nvmrc @@ -0,0 +1,2 @@ +20 + From e5c053c244b6aeb8965e12c86c2fdde5b6231b10 Mon Sep 17 00:00:00 2001 From: Aayush Gautam <7802627+insideaayush@users.noreply.github.com> Date: Tue, 24 Feb 2026 00:59:43 +0800 Subject: [PATCH 23/23] frontend: fix i18n build and lint script --- .gitignore | 1 + frontend/app/language-switcher.tsx | 2 +- frontend/eslint.config.mjs | 19 ------------------- frontend/lib/i18n/locale-cookie.ts | 2 ++ frontend/lib/i18n/locale.ts | 6 ++---- frontend/package.json | 2 +- 6 files changed, 7 insertions(+), 25 deletions(-) delete mode 100644 frontend/eslint.config.mjs create mode 100644 frontend/lib/i18n/locale-cookie.ts diff --git a/.gitignore b/.gitignore index 5558810..4a4c1a1 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ frontend/.next/ frontend/out/ frontend/node_modules/ *.tsbuildinfo +frontend/pnpm-lock.yaml # Backend backend/bin/ diff --git a/frontend/app/language-switcher.tsx b/frontend/app/language-switcher.tsx index 1e31334..cef554d 100644 --- a/frontend/app/language-switcher.tsx +++ b/frontend/app/language-switcher.tsx @@ -3,7 +3,7 @@ import { useRouter } from "next/navigation"; import { useMemo } from "react"; import { LOCALES, type Locale } from "@/lib/i18n/messages"; -import { localeCookieName } from "@/lib/i18n/locale"; +import { localeCookieName } from "@/lib/i18n/locale-cookie"; import { Button } from "@/components/ui/button"; export function LanguageSwitcher({ currentLocale }: { currentLocale: Locale }) { diff --git a/frontend/eslint.config.mjs b/frontend/eslint.config.mjs deleted file mode 100644 index 1b5d0ec..0000000 --- a/frontend/eslint.config.mjs +++ /dev/null @@ -1,19 +0,0 @@ -import path from "node:path"; -import { fileURLToPath } from "node:url"; -import { FlatCompat } from "@eslint/eslintrc"; - -const __filename = fileURLToPath(import.meta.url); -const __dirname = path.dirname(__filename); - -const compat = new FlatCompat({ - baseDirectory: __dirname -}); - -const config = [ - { - ignores: [".next/**", "node_modules/**", "next-env.d.ts"] - }, - ...compat.extends("next/core-web-vitals", "next/typescript") -]; - -export default config; diff --git a/frontend/lib/i18n/locale-cookie.ts b/frontend/lib/i18n/locale-cookie.ts new file mode 100644 index 0000000..be4d414 --- /dev/null +++ b/frontend/lib/i18n/locale-cookie.ts @@ -0,0 +1,2 @@ +export const localeCookieName = "locale"; + diff --git a/frontend/lib/i18n/locale.ts b/frontend/lib/i18n/locale.ts index 43310d0..1f55537 100644 --- a/frontend/lib/i18n/locale.ts +++ b/frontend/lib/i18n/locale.ts @@ -1,15 +1,13 @@ import { cookies } from "next/headers"; import { isLocale, type Locale } from "./messages"; -const COOKIE_NAME = "locale"; +import { localeCookieName } from "./locale-cookie"; export async function getServerLocale(): Promise { const store = await cookies(); - const value = store.get(COOKIE_NAME)?.value; + const value = store.get(localeCookieName)?.value; if (isLocale(value)) { return value; } return "en"; } - -export const localeCookieName = COOKIE_NAME; diff --git a/frontend/package.json b/frontend/package.json index 5b5050f..0037a04 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -6,7 +6,7 @@ "dev": "next dev -p 3000", "build": "next build", "start": "sh -c 'next start -p ${PORT:-3000}'", - "lint": "eslint .", + "lint": "next lint", "typecheck": "tsc --noEmit -p tsconfig.typecheck.json" }, "dependencies": {