From 316f5af3d6e64c2fa29ab896b88ee3f32f056eb4 Mon Sep 17 00:00:00 2001 From: ngovinh2k2 Date: Mon, 12 Jan 2026 16:21:56 +0700 Subject: [PATCH 01/10] feat(core): initialize codebase and implement core use cases Co-authored-by: Vinh Ngo Co-authored-by: Dat Le Co-authored-by: Khoa Tran Co-authored-by: Luu Le Co-authored-by: Quynh Nguyen --- .env.example | 21 + .github/pull_request_template.md | 19 + .github/workflows/ci-dev.yml | 48 ++ .gitignore | 76 ++ .nosec.json | 7 + Dockerfile | 50 ++ README.md | 217 +++++ cmd/telemetry/main.go | 100 +++ cmd/telemetry/serve.go | 187 +++++ configs/alerts_processors.yaml | 13 + configs/config.yaml | 35 + go.mod | 53 ++ go.sum | 219 +++++ internal/alerts/registry/generic_numeric.go | 65 ++ internal/alerts/registry/loader.go | 81 ++ internal/alerts/registry/registry.go | 72 ++ internal/amqp/multi-tenant/consumer.go | 749 ++++++++++++++++++ internal/api/alerts/handler.go | 162 ++++ internal/api/alerts/routes.go | 13 + internal/api/common/request.go | 41 + internal/api/data/handler.go | 63 ++ internal/api/data/models/models.go | 8 + internal/api/data/router.go | 12 + internal/api/entities/handler.go | 89 +++ internal/api/entities/router.go | 12 + internal/api/location/handler.go | 90 +++ internal/api/location/models/request.go | 36 + internal/api/location/models/response.go | 24 + internal/api/location/router.go | 12 + internal/api/router.go | 22 + internal/api/widget/device_types.go | 96 +++ internal/api/widget/handler.go | 67 ++ internal/api/widget/models.go | 83 ++ internal/api/widget/router.go | 12 + internal/config/config.go | 130 +++ internal/health/handler.go | 96 +++ internal/health/models/response.go | 55 ++ internal/health/router.go | 19 + internal/models/device.go | 43 + internal/models/org_event.go | 71 ++ internal/models/telemetry.go | 38 + internal/services/processor.go | 206 +++++ internal/timescaledb/alerts.go | 229 ++++++ internal/timescaledb/attributes.go | 330 ++++++++ internal/timescaledb/client.go | 60 ++ internal/timescaledb/context.go | 42 + internal/timescaledb/device_properties.go | 91 +++ internal/timescaledb/entities.go | 182 +++++ internal/timescaledb/errors.go | 26 + internal/timescaledb/schema.go | 75 ++ internal/timescaledb/telemetry.go | 209 +++++ internal/timescaledb/tx.go | 38 + internal/timescaledb/widget_data.go | 404 ++++++++++ pkgs/db/dberrors/bob_errors.bob.go | 32 + pkgs/db/dberrors/bob_main.bob_test.go | 9 + pkgs/db/dberrors/device_locations.bob.go | 17 + pkgs/db/dberrors/schema_migrations.bob.go | 17 + pkgs/db/dbinfo/bob_types.bob.go | 83 ++ pkgs/db/dbinfo/device_locations.bob.go | 193 +++++ pkgs/db/dbinfo/schema_migrations.bob.go | 92 +++ pkgs/db/factory/bobfactory_context.bob.go | 34 + pkgs/db/factory/bobfactory_main.bob.go | 89 +++ pkgs/db/factory/bobfactory_main.bob_test.go | 57 ++ pkgs/db/factory/bobfactory_random.bob.go | 71 ++ pkgs/db/factory/bobfactory_random.bob_test.go | 46 ++ pkgs/db/factory/device_locations.bob.go | 476 +++++++++++ pkgs/db/factory/schema_migrations.bob.go | 255 ++++++ pkgs/db/migrate.go | 28 + ...20251114041904_create_device_locations.sql | 33 + .../20251126082459_add-space-slug-column.sql | 5 + .../20251208120000_create_entities_schema.sql | 64 ++ .../20251209120000_drop_device_locations.sql | 15 + pkgs/db/models/bob_joins.bob.go | 70 ++ pkgs/db/models/bob_loaders.bob.go | 55 ++ pkgs/db/models/bob_types.bob_test.go | 15 + pkgs/db/models/bob_where.bob.go | 30 + pkgs/db/models/device_locations.bob.go | 484 +++++++++++ pkgs/db/models/schema_migrations.bob.go | 348 ++++++++ 78 files changed, 7816 insertions(+) create mode 100644 .env.example create mode 100644 .github/pull_request_template.md create mode 100644 .github/workflows/ci-dev.yml create mode 100644 .gitignore create mode 100644 .nosec.json create mode 100644 Dockerfile create mode 100644 README.md create mode 100644 cmd/telemetry/main.go create mode 100644 cmd/telemetry/serve.go create mode 100644 configs/alerts_processors.yaml create mode 100644 configs/config.yaml create mode 100644 go.mod create mode 100644 go.sum create mode 100644 internal/alerts/registry/generic_numeric.go create mode 100644 internal/alerts/registry/loader.go create mode 100644 internal/alerts/registry/registry.go create mode 100644 internal/amqp/multi-tenant/consumer.go create mode 100644 internal/api/alerts/handler.go create mode 100644 internal/api/alerts/routes.go create mode 100644 internal/api/common/request.go create mode 100644 internal/api/data/handler.go create mode 100644 internal/api/data/models/models.go create mode 100644 internal/api/data/router.go create mode 100644 internal/api/entities/handler.go create mode 100644 internal/api/entities/router.go create mode 100644 internal/api/location/handler.go create mode 100644 internal/api/location/models/request.go create mode 100644 internal/api/location/models/response.go create mode 100644 internal/api/location/router.go create mode 100644 internal/api/router.go create mode 100644 internal/api/widget/device_types.go create mode 100644 internal/api/widget/handler.go create mode 100644 internal/api/widget/models.go create mode 100644 internal/api/widget/router.go create mode 100644 internal/config/config.go create mode 100644 internal/health/handler.go create mode 100644 internal/health/models/response.go create mode 100644 internal/health/router.go create mode 100644 internal/models/device.go create mode 100644 internal/models/org_event.go create mode 100644 internal/models/telemetry.go create mode 100644 internal/services/processor.go create mode 100644 internal/timescaledb/alerts.go create mode 100644 internal/timescaledb/attributes.go create mode 100644 internal/timescaledb/client.go create mode 100644 internal/timescaledb/context.go create mode 100644 internal/timescaledb/device_properties.go create mode 100644 internal/timescaledb/entities.go create mode 100644 internal/timescaledb/errors.go create mode 100644 internal/timescaledb/schema.go create mode 100644 internal/timescaledb/telemetry.go create mode 100644 internal/timescaledb/tx.go create mode 100644 internal/timescaledb/widget_data.go create mode 100644 pkgs/db/dberrors/bob_errors.bob.go create mode 100644 pkgs/db/dberrors/bob_main.bob_test.go create mode 100644 pkgs/db/dberrors/device_locations.bob.go create mode 100644 pkgs/db/dberrors/schema_migrations.bob.go create mode 100644 pkgs/db/dbinfo/bob_types.bob.go create mode 100644 pkgs/db/dbinfo/device_locations.bob.go create mode 100644 pkgs/db/dbinfo/schema_migrations.bob.go create mode 100644 pkgs/db/factory/bobfactory_context.bob.go create mode 100644 pkgs/db/factory/bobfactory_main.bob.go create mode 100644 pkgs/db/factory/bobfactory_main.bob_test.go create mode 100644 pkgs/db/factory/bobfactory_random.bob.go create mode 100644 pkgs/db/factory/bobfactory_random.bob_test.go create mode 100644 pkgs/db/factory/device_locations.bob.go create mode 100644 pkgs/db/factory/schema_migrations.bob.go create mode 100644 pkgs/db/migrate.go create mode 100644 pkgs/db/migrations/20251114041904_create_device_locations.sql create mode 100644 pkgs/db/migrations/20251126082459_add-space-slug-column.sql create mode 100644 pkgs/db/migrations/20251208120000_create_entities_schema.sql create mode 100644 pkgs/db/migrations/20251209120000_drop_device_locations.sql create mode 100644 pkgs/db/models/bob_joins.bob.go create mode 100644 pkgs/db/models/bob_loaders.bob.go create mode 100644 pkgs/db/models/bob_types.bob_test.go create mode 100644 pkgs/db/models/bob_where.bob.go create mode 100644 pkgs/db/models/device_locations.bob.go create mode 100644 pkgs/db/models/schema_migrations.bob.go diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..c7a7e4c --- /dev/null +++ b/.env.example @@ -0,0 +1,21 @@ +# Server Configuration +SERVER_LOG_LEVEL="__SERVER_LOG_LEVEL__" +SERVER_API_PORT="__SERVER_API_PORT__" + +# TimescaleDB Configuration +DB_NAME="__DB_NAME__" +DB_USERNAME="__DB_USERNAME__" +DB_PASSWORD="__DB_PASSWORD__" +DB_HOST="__DB_HOST__" +DB_PORT="__DB_PORT__" +DB_BATCH_SIZE="__DB_BATCH_SIZE__" +DB_FLUSH_INTERVAL="__DB_FLUSH_INTERVAL__" +DB_MAX_CONNECTIONS="__DB_MAX_CONNECTIONS__" +DB_MAX_IDLE_CONNS="__DB_MAX_IDLE_CONNS__" + +# RabbitMQ Configuration (Multi-tenant mode) +AMQP_BROKER_URL="__AMQP_BROKER_URL__" +AMQP_CONSUMER_TAG="__AMQP_CONSUMER_TAG__" +AMQP_PREFETCH_COUNT="__AMQP_PREFETCH_COUNT__" +AMQP_ALLOWED_VHOSTS="__AMQP_ALLOWED_VHOSTS__" +AMQP_RECONNECT_DELAY="__AMQP_RECONNECT_DELAY__" \ No newline at end of file diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..80b8493 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,19 @@ +## What? + +## Why? + +## How? + +## Testing? +- [ ] Functional Testing +- [ ] Security +- [ ] Performance +- [ ] Error Handling +- [ ] Code Quality +- [ ] Documentation +- [ ] Database +- [ ] Deployment +- [ ] Final Review + +## Anything Else? + diff --git a/.github/workflows/ci-dev.yml b/.github/workflows/ci-dev.yml new file mode 100644 index 0000000..31b81da --- /dev/null +++ b/.github/workflows/ci-dev.yml @@ -0,0 +1,48 @@ +name: Telemetry Service CI + +on: + push: + branches: ["dev", "main"] + pull_request: + branches: ["dev", "main"] + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version-file: "go.mod" + + - name: Run golangci-lint + uses: golangci/golangci-lint-action@v8 + with: + version: v2.4.0 + args: --timeout=5m ./... + + - name: Run gosec security scanner + uses: securego/gosec@master + with: + args: -conf .nosec.json ./... + + build: + name: Build + runs-on: ubuntu-latest + needs: [lint] + steps: + - uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version-file: "go.mod" + + - name: Build binary + run: go build -v ./cmd/telemetry + + - name: Build Docker image + run: docker build -t telemetry-service . diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0e866d2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,76 @@ +# Environment variables +.env +.env.local +.env.*.local + +# Build artifacts +bin/ +dist/ +*.exe +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool +*.out +coverage.txt +coverage.html + +# Security scan reports +gosec-report.json +gosec-report.txt + +# Dependency directories +vendor/ + +# IDE files +.idea/ +.vscode/ +*.swp +*.swo +*~ +.DS_Store + +# Logs +*.log +logs/ + +# Temporary files +*.tmp +tmp/ + +# Debug files +debug +*.pprof + +# Air hot reload +tmp/ + +# Go workspace file +go.work + +# Local database files +*.sqlite +*.db + +# Config override files (but not examples) +config.local.yaml +config.override.yaml + +# Kubernetes secrets (if any) +*.secret.yaml +*-secret.yaml + +# Terraform (if used) +*.tfstate +*.tfstate.backup +.terraform/ + +# Docker override files +docker-compose.override.yml +docker-compose.local.yml + +.env diff --git a/.nosec.json b/.nosec.json new file mode 100644 index 0000000..b27c7c9 --- /dev/null +++ b/.nosec.json @@ -0,0 +1,7 @@ +{ + "global": { + "nosec": "nosec", + "exclude": "G109,G115", + "exclude-dir": "pkgs/db" + } +} diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..d5e2ab9 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,50 @@ +# Build stage +FROM golang:1.24-alpine AS builder + +# Install build dependencies +RUN apk add --no-cache git make + +# Set working directory +WORKDIR /build + +# Copy go mod files +COPY go.mod go.sum ./ + +# Download dependencies +RUN go mod download + +# Copy source code +COPY . . + +# Build the application +RUN go build -v -o telemetry ./cmd/telemetry + +# Runtime stage +FROM alpine:latest + +# Install runtime dependencies +RUN apk --no-cache add ca-certificates tzdata + +# Create non-root user +RUN addgroup -g 1000 -S telemetry && \ + adduser -u 1000 -S telemetry -G telemetry + +# Set working directory +WORKDIR /app + +# Copy binary from builder +COPY --from=builder /build/telemetry . +COPY --from=builder /build/configs ./configs +COPY --from=builder /build/pkgs/db/migrations ./pkgs/db/migrations + +# Change ownership +RUN chown -R telemetry:telemetry /app + +# Switch to non-root user +USER telemetry + +EXPOSE 8080 + +# Run the application +ENTRYPOINT ["./telemetry"] +CMD ["serve"] diff --git a/README.md b/README.md new file mode 100644 index 0000000..788667c --- /dev/null +++ b/README.md @@ -0,0 +1,217 @@ +# Telemetry Service + +A robust Go-based telemetry aggregation and alert management service for the SpaceDF IoT platform. Processes device telemetry data, stores metrics in TimescaleDB, and manages real-time alerts. + +## Overview + +The Telemetry Service is a high-performance message processor that: + +- **Aggregates telemetry data** from IoT devices via AMQP +- **Stores time-series data** in TimescaleDB for historical analysis +- **Manages alert rules** and generates alerts based on configurable thresholds +- **Processes messages in batches** for optimal database performance +- **Provides REST API** for querying metrics and alerts + +## Architecture + +``` +RabbitMQ (AMQP) + ↓ +┌──────────────────────────────────┐ +│ Telemetry Service │ +│ ┌─────────────────────────────┐ │ +│ │ AMQP Consumer (Multi-tenant)│ │ +│ └──────────────┬──────────────┘ │ +│ ↓ │ +│ ┌──────────────────────────────┐│ +│ │ Message Processing Engine ││ +│ │ - Data Transformation ││ +│ │ - Alert Evaluation ││ +│ │ - Batch Aggregation ││ +│ └──────────┬───────────────────┘│ +│ ↓ │ +│ ┌──────────────────────────────┐│ +│ │ TimescaleDB (PostgreSQL) ││ +│ │ - Device Metrics ││ +│ │ - Alert History ││ +│ │ - Rule Configurations ││ +│ └──────────────────────────────┘│ +│ ↕ │ +│ ┌──────────────────────────────┐│ +│ │ REST API ││ +│ │ - Query Metrics ││ +│ │ - Manage Alerts ││ +│ │ - Health Checks ││ +│ └──────────────────────────────┘│ +└──────────────────────────────────┘ +``` + +## Key Features + +### 📊 Data Processing +- **Multi-tenant support** - Isolated data per organization/tenant +- **Batch processing** - Efficient database writes with configurable flush intervals +- **Device location tracking** - Processes location coordinates with trilateration +- **Message enrichment** - Adds metadata and context to raw messages + +### 🗄️ Data Storage +- **TimescaleDB** - Time-series optimized PostgreSQL extension +- **Hypertables** - Automatic partitioning for scalability +- **Retention policies** - Automatic old data cleanup +- **Indexed queries** - Fast metric retrieval + +### 🔌 Integration +- **AMQP/RabbitMQ** - Message consumer with auto-reconnect +- **Multiple vhosts** - Support for RabbitMQ virtual hosts +- **REST API** - Query and manage telemetry data +- **Health checks** - Liveness and readiness endpoints + +## Prerequisites + +- **Go 1.24+** +- **TimescaleDB 2.10+** (PostgreSQL with TimescaleDB extension) +- **RabbitMQ 3.12+** +- **Docker & Docker Compose** (for containerized deployment) + +## Installation + +### Clone and Setup + +```bash +cd telemetry-service + +# Download dependencies +go mod download +go mod tidy + +# Copy environment template +cp .env.example .env + +# Edit configuration +nano .env +``` + +### Configuration +Create a `.env` file with the following example settings: + +```bash +# Server +SERVER_LOG_LEVEL=info +SERVER_API_PORT=8080 + +# TimescaleDB Configuration +DB_NAME="spacedf_telemetry" +DB_USERNAME="postgres" +DB_PASSWORD="postgres" +DB_HOST="localhost" +DB_PORT="5437" +DB_BATCH_SIZE=1000 +DB_FLUSH_INTERVAL=1s +DB_MAX_CONNECTIONS=25 +DB_MAX_IDLE_CONNS=5 + +# RabbitMQ Configuration (Multi-tenant mode) +AMQP_BROKER_URL=amqp://admin:password@rabbitmq:5672/ +AMQP_CONSUMER_TAG=telemetry-service +AMQP_PREFETCH_COUNT=100 +AMQP_ALLOWED_VHOSTS= +AMQP_RECONNECT_DELAY=5s +``` + +### Database Setup + +Initialize TimescaleDB: + +```bash +# Using dbmate migration tool +dbmate up + +# Or manually create the database +createdb telemetry_db +psql telemetry_db -c "CREATE EXTENSION IF NOT EXISTS timescaledb;" +``` + +## Usage + +### Run the Service + +```bash +# Start the telemetry service +go run ./cmd/telemetry serve + +# Or build and run +make build +./bin/telemetry serve +``` + +### Docker + +```bash +# Build image +docker build -t telemetry-service:latest . + +# Run container +docker run -p 8080:8080 \ + --env-file .env \ + -e AMQP_BROKER_URL=amqp://rabbitmq:5672/ \ + -e DB_HOST=timescaledb \ + telemetry-service:latest + +# Using Docker Compose +docker-compose up -d telemetry-service +``` +## Development + +### Running Tests + +```bash +# Run all tests +go test -v ./... + +# Run specific package tests +go test -v ./internal/services/... + +# With coverage +go test -cover ./... +``` + +### Code Quality + +```bash +# Format code +gofmt -w . + +# Install tools +go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest +go install github.com/securego/gosec/v2/cmd/gosec@latest + +# Lint code +golangci-lint run ./... + +# Security scan +gosec ./... +``` + +### Database Migrations + +The service uses `dbmate` for database migrations: + +```bash +# Create new migration +dbmate new create_metrics_table + +# Apply migrations +dbmate up + +# Rollback +dbmate down +``` +## Contributing + +1. Fork the repository +2. Create a feature branch +3. Make your changes +4. Add tests +5. Submit a pull request + +[![SpaceDF - A project from Digital Fortress](https://df.technology/images/SpaceDF.png)](https://df.technology/) \ No newline at end of file diff --git a/cmd/telemetry/main.go b/cmd/telemetry/main.go new file mode 100644 index 0000000..75b9704 --- /dev/null +++ b/cmd/telemetry/main.go @@ -0,0 +1,100 @@ +package main + +import ( + "fmt" + "os" + + "github.com/Space-DF/telemetry-service/internal/config" + "github.com/urfave/cli/v2" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var ( + appConfig *config.Config +) + +func init() { + cfg, err := config.LoadConfig() + if err != nil { + fmt.Println("Failed to load configuration:", err) + panic(err) + } + appConfig = cfg +} + +func main() { + app := &cli.App{ + Name: "telemetry-service", + Usage: "Telemetry Service for SpaceDF IoT Platform", + Version: "1.0.0", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "log-level", + Aliases: []string{"l"}, + Value: "info", + Usage: "Log level (debug, info, warn, error)", + EnvVars: []string{"LOG_LEVEL"}, + }, + }, + Action: func(ctx *cli.Context) error { + // Default action is to show help + return cli.ShowAppHelp(ctx) + }, + Commands: []*cli.Command{ + { + Name: "serve", + Aliases: []string{"s"}, + Usage: "Start the telemetry service", + Action: func(ctx *cli.Context) error { + logLevel := ctx.String("log-level") + if logLevel == "" { + logLevel = appConfig.Server.LogLevel + } + + logger, err := initLogger(logLevel) + if err != nil { + return fmt.Errorf("failed to initialize logger: %w", err) + } + + return cmdServe(ctx, logger) + }, + }, + }, + } + + if err := app.Run(os.Args); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } +} + +func initLogger(level string) (*zap.Logger, error) { + var zapLevel zapcore.Level + switch level { + case "debug": + zapLevel = zapcore.DebugLevel + case "info": + zapLevel = zapcore.InfoLevel + case "warn": + zapLevel = zapcore.WarnLevel + case "error": + zapLevel = zapcore.ErrorLevel + default: + zapLevel = zapcore.InfoLevel + } + + config := zap.Config{ + Level: zap.NewAtomicLevelAt(zapLevel), + Development: zapLevel == zapcore.DebugLevel, + Encoding: "json", + EncoderConfig: zap.NewProductionEncoderConfig(), + OutputPaths: []string{"stdout"}, + ErrorOutputPaths: []string{"stderr"}, + } + + config.EncoderConfig.TimeKey = "timestamp" + config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + + return config.Build() +} diff --git a/cmd/telemetry/serve.go b/cmd/telemetry/serve.go new file mode 100644 index 0000000..ae17b7d --- /dev/null +++ b/cmd/telemetry/serve.go @@ -0,0 +1,187 @@ +package main + +import ( + "context" + "fmt" + "net/url" + "os" + "os/signal" + "strings" + "syscall" + "time" + + alertregistry "github.com/Space-DF/telemetry-service/internal/alerts/registry" + amqp "github.com/Space-DF/telemetry-service/internal/amqp/multi-tenant" + "github.com/Space-DF/telemetry-service/internal/api" + "github.com/Space-DF/telemetry-service/internal/health" + "github.com/Space-DF/telemetry-service/internal/services" + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/Space-DF/telemetry-service/pkgs/db" + "github.com/labstack/echo/v4" + "github.com/labstack/echo/v4/middleware" + "github.com/urfave/cli/v2" + "go.uber.org/zap" +) + +const ShutdownTimeout = 30 * time.Second + +func cmdServe(ctx *cli.Context, logger *zap.Logger) error { + defer func() { + if err := logger.Sync(); err != nil { + logger.Debug("Error syncing logger", zap.Error(err)) + } + }() + + logger.Info("Starting Telemetry Service", + zap.String("version", "1.0.0"), + zap.String("mode", "multi-tenant"), + zap.Any("config", appConfig), + ) + + // Load alert processors from config (if provided) + loadAlertProcessors(logger, appConfig.Server.AlertsProcessorsCfg) + + // Run database migrations + logger.Info("Running database migrations...") + dsn := fmt.Sprintf("postgres://%s:%s@%s:%d/%s?sslmode=disable", + appConfig.Db.Username, appConfig.Db.Password, appConfig.Db.Host, appConfig.Db.Port, appConfig.Db.Name) + dbURL, err := url.Parse(dsn) + if err != nil { + return fmt.Errorf("failed to parse database DSN: %w", err) + } + + migrationPath := "pkgs/db/migrations" + if err := db.Migrate(dbURL, migrationPath); err != nil { + return fmt.Errorf("failed to run migrations: %w", err) + } + logger.Info("Database migrations completed successfully") + + // Initialize Psql client + tsClient, err := timescaledb.NewClient( + dsn, + appConfig.Db.BatchSize, + appConfig.Db.FlushInterval, + logger, + ) + if err != nil { + return fmt.Errorf("failed to initialize Psql client: %w", err) + } + defer func() { + if err := tsClient.Close(); err != nil { + logger.Error("Failed to close Psql client", zap.Error(err)) + } + }() + + // Initialize location processor + processor := services.NewLocationProcessor(tsClient, logger) + + // Initialize multi-tenant AMQP consumer with schema initializer + consumer := amqp.NewMultiTenantConsumer(appConfig.AMQP, appConfig.OrgEvents, processor, tsClient, logger) + + // Connect to RabbitMQ + if err := consumer.Connect(); err != nil { + return fmt.Errorf("failed to connect to AMQP: %w", err) + } + + // Initialize Echo + e := echo.New() + e.HideBanner = true + e.HidePort = true + + // Middleware + e.Use(middleware.Logger()) + e.Use(middleware.Recover()) + e.Use(middleware.CORS()) + + group := e.Group("/api/telemetry") + api.Setup(appConfig, group, logger, tsClient) + health.Setup(group, consumer, tsClient, logger) + + // Create context for graceful shutdown + srvCtx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Start Echo server + go func() { + logger.Info("Starting API server", zap.Int("port", appConfig.Server.APIPort)) + addr := fmt.Sprintf(":%d", appConfig.Server.APIPort) + if err := e.Start(addr); err != nil { + logger.Error("API server error", zap.Error(err)) + } + }() + + // Start AMQP consumer + go func() { + logger.Info("Starting AMQP consumer") + if err := consumer.Start(srvCtx); err != nil { + logger.Error("AMQP consumer error", zap.Error(err)) + cancel() + } + }() + + // Setup reload signal for alert processors + reloadChan := make(chan os.Signal, 1) + signal.Notify(reloadChan, syscall.SIGHUP) + go func() { + for range reloadChan { + loadAlertProcessors(logger, appConfig.Server.AlertsProcessorsCfg) + } + }() + + // Wait for interrupt signal + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + <-sigChan + + logger.Info("Shutting down service...") + + // Cancel context to stop all components + cancel() + + // Give components time to cleanup + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), ShutdownTimeout) + defer shutdownCancel() + + // Stop Echo server + if err := e.Shutdown(shutdownCtx); err != nil { + logger.Error("Error shutting down API server", zap.Error(err)) + } + + // Stop AMQP consumer + if err := consumer.Stop(); err != nil { + logger.Error("Error stopping AMQP consumer", zap.Error(err)) + } + + // Wait for batch writer to finish draining with timeout + logger.Info("Waiting for batch writer to finish draining...") + done := make(chan struct{}) + go func() { + tsClient.Wait() + close(done) + }() + + select { + case <-done: + logger.Info("Batch writer finished draining successfully") + case <-shutdownCtx.Done(): + logger.Warn("Batch writer drain timeout exceeded, some data may be lost") + } + + logger.Info("Service shutdown complete") + return nil +} + +func loadAlertProcessors(logger *zap.Logger, path string) { + if strings.TrimSpace(path) == "" { + return + } + + processors, err := alertregistry.LoadFromConfig(path) + if err != nil { + logger.Warn("Failed to load alert processors from config", zap.Error(err), zap.String("path", path)) + return + } + + alertregistry.ReplaceAll(processors) + logger.Info("Loaded alert processors from config", zap.String("path", path), zap.Int("count", len(processors))) +} diff --git a/configs/alerts_processors.yaml b/configs/alerts_processors.yaml new file mode 100644 index 0000000..b342834 --- /dev/null +++ b/configs/alerts_processors.yaml @@ -0,0 +1,13 @@ +processors: + - category: water_depth + value_key: water_depth + unit: cm + state_predicate: "s.state ~ '^-?[0-9]+(\\\\.[0-9]+)?$'" + default_caution: 10 + default_warning: 30 + default_critical: 60 + messages: + safe: "Water level is safe" + caution: "Water is rising quickly" + warning: "Flooding" + critical: "Flood risk" diff --git a/configs/config.yaml b/configs/config.yaml new file mode 100644 index 0000000..cbb8152 --- /dev/null +++ b/configs/config.yaml @@ -0,0 +1,35 @@ +# Server Configuration +server: + log_level: "info" # debug, info, warn, error + api_port: 8080 # API server port (includes health endpoints) + alerts_processors_path: "configs/alerts_processors.yaml" + +# AMQP/RabbitMQ Configuration +amqp: + broker_url: "" # Set via environment variable AMQP_BROKER_URL + consumer_tag: "telemetry-service" + prefetch_count: 100 + allowed_vhosts: [] # Empty means process all vhosts + reconnect_delay: "5s" + +# Organization Events Configuration +org_events: + exchange: "org.events" # Topic exchange for org events + queue: "telemetry.org.events.queue" # Telemetry's queue for org events + routing_key: "org.#" # Listen to all org events + consumer_tag: "telemetry-org-events" + +# TimescaleDB Configuration +db: + name: "spacedf_telemetry" + username: "postgres" + password: "postgres" + host: "localhost" + port: 5437 + batch_size: 1000 # Number of locations to batch before writing + flush_interval: "1s" # Maximum time between batch writes + max_connections: 25 # Maximum number of open database connections + max_idle_conns: 5 # Maximum number of idle connections in the pool + +psql: + dsn: "postgres://postgres:postgres@localhost:5437/spacedf_telemetry?sslmode=disable" diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..7f73ce5 --- /dev/null +++ b/go.mod @@ -0,0 +1,53 @@ +module github.com/Space-DF/telemetry-service + +go 1.24.0 + +require ( + github.com/aarondl/opt v0.0.0-20250607033636-982744e1bd65 + github.com/amacneil/dbmate/v2 v2.28.0 + github.com/google/uuid v1.6.0 + github.com/jaswdr/faker/v2 v2.9.0 + github.com/joho/godotenv v1.5.1 + github.com/labstack/echo/v4 v4.13.4 + github.com/lib/pq v1.10.9 + github.com/rabbitmq/amqp091-go v1.10.0 + github.com/spf13/viper v1.21.0 + github.com/stephenafamo/bob v0.41.1 + github.com/urfave/cli/v2 v2.27.7 + go.uber.org/zap v1.27.0 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect +) + +require ( + github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/labstack/gommon v0.4.2 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/qdm12/reprint v0.0.0-20200326205758-722754a53494 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/stephenafamo/scan v0.7.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasttemplate v1.2.2 // indirect + github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.42.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/text v0.29.0 // indirect + golang.org/x/time v0.12.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..8fee928 --- /dev/null +++ b/go.sum @@ -0,0 +1,219 @@ +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/aarondl/opt v0.0.0-20250607033636-982744e1bd65 h1:lbdPe4LBNmNDzeQFwNhEc88w90841qv737MI4+aXSYU= +github.com/aarondl/opt v0.0.0-20250607033636-982744e1bd65/go.mod h1:+xKBXrTAUOvrDXO5PRwIr4E1wciHY3Glgl+6OkCXknU= +github.com/amacneil/dbmate/v2 v2.28.0 h1:4fAKHjp1k7yY5Mjn4pBm765qPMTs1hd1a2hV0t8pFas= +github.com/amacneil/dbmate/v2 v2.28.0/go.mod h1:aFMv3X21dCZr3AMJVAYG1ft4/2ylcqrId2o8eqFBVmQ= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= +github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw= +github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw= +github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jaswdr/faker/v2 v2.9.0 h1:Sqqpp+pxduDO+MGOhYE3UHtI9Sowt9j95f8h8nVvips= +github.com/jaswdr/faker/v2 v2.9.0/go.mod h1:jZq+qzNQr8/P+5fHd9t3txe2GNPnthrTfohtnJ7B+68= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.13.4 h1:oTZZW+T3s9gAu5L8vmzihV7/lkXGZuITzTQkTEhcXEA= +github.com/labstack/echo/v4 v4.13.4/go.mod h1:g63b33BZ5vZzcIUF8AtRH40DrTlXnx4UMC8rBdndmjQ= +github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0= +github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ= +github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pganalyze/pg_query_go/v6 v6.1.0 h1:jG5ZLhcVgL1FAw4C/0VNQaVmX1SUJx71wBGdtTtBvls= +github.com/pganalyze/pg_query_go/v6 v6.1.0/go.mod h1:nvTHIuoud6e1SfrUaFwHqT0i4b5Nr+1rPWVds3B5+50= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/qdm12/reprint v0.0.0-20200326205758-722754a53494 h1:wSmWgpuccqS2IOfmYrbRiUgv+g37W5suLLLxwwniTSc= +github.com/qdm12/reprint v0.0.0-20200326205758-722754a53494/go.mod h1:yipyliwI08eQ6XwDm1fEwKPdF/xdbkiHtrU+1Hg+vc4= +github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw= +github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/shirou/gopsutil/v4 v4.25.5 h1:rtd9piuSMGeU8g1RMXjZs9y9luK5BwtnG7dZaQUJAsc= +github.com/shirou/gopsutil/v4 v4.25.5/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/stephenafamo/bob v0.41.1 h1:xcRPuRMCwtZZ9tS4JIVbZ5Erdm5Dy5dIvbS5kivwPpA= +github.com/stephenafamo/bob v0.41.1/go.mod h1:8l55917DM36gF518Iz1MHjLds7KGAfkitJfxISYlth8= +github.com/stephenafamo/fakedb v0.0.0-20221230081958-0b86f816ed97 h1:XItoZNmhOih06TC02jK7l3wlpZ0XT/sPQYutDcGOQjg= +github.com/stephenafamo/fakedb v0.0.0-20221230081958-0b86f816ed97/go.mod h1:bM3Vmw1IakoaXocHmMIGgJFYob0vuK+CFWiJHQvz0jQ= +github.com/stephenafamo/scan v0.7.0 h1:lfFiD9H5+n4AdK3qNzXQjj2M3NfTOpmWBIA39NwB94c= +github.com/stephenafamo/scan v0.7.0/go.mod h1:FhIUJ8pLNyex36xGFiazDJJ5Xry0UkAi+RkWRrEcRMg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/testcontainers/testcontainers-go v0.38.0 h1:d7uEapLcv2P8AvH8ahLqDMMxda2W9gQN1nRbHS28HBw= +github.com/testcontainers/testcontainers-go v0.38.0/go.mod h1:C52c9MoHpWO+C4aqmgSU+hxlR5jlEayWtgYrb8Pzz1w= +github.com/testcontainers/testcontainers-go/modules/postgres v0.38.0 h1:KFdx9A0yF94K70T6ibSuvgkQQeX1xKlZVF3hEagXEtY= +github.com/testcontainers/testcontainers-go/modules/postgres v0.38.0/go.mod h1:T/QRECND6N6tAKMxF1Za+G2tpwnGEHcODzHRsgIpw9M= +github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I= +github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU= +github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/wasilibs/go-pgquery v0.0.0-20250409022910-10ac41983c07 h1:mJdDDPblDfPe7z7go8Dvv1AJQDI3eQ/5xith3q2mFlo= +github.com/wasilibs/go-pgquery v0.0.0-20250409022910-10ac41983c07/go.mod h1:Ak17IJ037caFp4jpCw/iQQ7/W74Sqpb1YuKJU6HTKfM= +github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52 h1:OvLBa8SqJnZ6P+mjlzc2K7PM22rRUPE1x32G9DTPrC4= +github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52/go.mod h1:jMeV4Vpbi8osrE/pKUxRZkVaA0EX7NZN0A9/oRzgpgY= +github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAzt5X7s6266i6cSVkkFPS0TuXWbIg= +github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zenizh/go-capturer v0.0.0-20211219060012-52ea6c8fed04 h1:qXafrlZL1WsJW5OokjraLLRURHiw0OzKHD/RNdspp4w= +github.com/zenizh/go-capturer v0.0.0-20211219060012-52ea6c8fed04/go.mod h1:FiwNQxz6hGoNFBC4nIx+CxZhI3nne5RmIOlT/MXcSD4= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/alerts/registry/generic_numeric.go b/internal/alerts/registry/generic_numeric.go new file mode 100644 index 0000000..c9355be --- /dev/null +++ b/internal/alerts/registry/generic_numeric.go @@ -0,0 +1,65 @@ +package registry + +import ( + "fmt" + "strconv" +) + +// GenericNumericProcessor is a config-driven processor for numeric sensors. +type GenericNumericProcessor struct { + CategoryValue string + ValueKeyValue string + UnitValue string + StatePred string + DefaultCaution float64 + DefaultWarn float64 + DefaultCritical float64 + Messages map[string]string +} + +func (p *GenericNumericProcessor) Category() string { return p.CategoryValue } + +func (p *GenericNumericProcessor) DefaultCautionThreshold() float64 { return p.DefaultCaution } +func (p *GenericNumericProcessor) DefaultWarningThreshold() float64 { return p.DefaultWarn } +func (p *GenericNumericProcessor) DefaultCriticalThreshold() float64 { + return p.DefaultCritical +} +func (p *GenericNumericProcessor) Unit() string { return p.UnitValue } +func (p *GenericNumericProcessor) ValueKey() string { return p.ValueKeyValue } +func (p *GenericNumericProcessor) StatePredicate() string { + if p.StatePred == "" { + return "TRUE" + } + return p.StatePred +} + +func (p *GenericNumericProcessor) ParseValue(raw string) (float64, error) { + return strconv.ParseFloat(raw, 64) +} + +func (p *GenericNumericProcessor) DetermineLevel(value, cautionThreshold, warningThreshold, criticalThreshold float64) string { + switch { + case value > criticalThreshold: + return "critical" + case value >= warningThreshold: + return "warning" + case value > cautionThreshold: + return "caution" + default: + return "safe" + } +} + +func (p *GenericNumericProcessor) DetermineType(value, cautionThreshold, warningThreshold, criticalThreshold float64) string { + return p.DetermineLevel(value, cautionThreshold, warningThreshold, criticalThreshold) +} + +func (p *GenericNumericProcessor) GenerateMessage(level string, value float64) string { + if p.Messages != nil { + if msg, ok := p.Messages[level]; ok { + return msg + } + } + // No fallback on purpose to surface missing templates during debugging + return fmt.Sprintf("missing message template for level %s (value %.2f %s)", level, value, p.UnitValue) +} diff --git a/internal/alerts/registry/loader.go b/internal/alerts/registry/loader.go new file mode 100644 index 0000000..4d96be0 --- /dev/null +++ b/internal/alerts/registry/loader.go @@ -0,0 +1,81 @@ +package registry + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "gopkg.in/yaml.v3" +) + +// processorConfig mirrors the YAML structure for a processor entry. +type processorConfig struct { + Category string `yaml:"category"` + ValueKey string `yaml:"value_key"` + Unit string `yaml:"unit"` + StatePredicate string `yaml:"state_predicate"` + DefaultCaution float64 `yaml:"default_caution"` + DefaultWarning float64 `yaml:"default_warning"` + DefaultCritical float64 `yaml:"default_critical"` + Messages map[string]string `yaml:"messages"` +} + +type processorsConfig struct { + Processors []processorConfig `yaml:"processors"` +} + +// LoadFromConfig loads processors from a YAML file and returns them keyed by lowercased category. +func LoadFromConfig(path string) (map[string]Processor, error) { + data, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, fmt.Errorf("read processors config: %w", err) + } + + var cfg processorsConfig + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("parse processors config: %w", err) + } + + result := make(map[string]Processor, len(cfg.Processors)) + for _, p := range cfg.Processors { + category := strings.TrimSpace(p.Category) + if category == "" { + return nil, fmt.Errorf("processor category is required") + } + + valueKey := strings.TrimSpace(p.ValueKey) + if valueKey == "" { + valueKey = "value" + } + + result[strings.ToLower(category)] = &GenericNumericProcessor{ + CategoryValue: category, + ValueKeyValue: valueKey, + UnitValue: p.Unit, + StatePred: p.StatePredicate, + DefaultCaution: p.DefaultCaution, + DefaultWarn: p.DefaultWarning, + DefaultCritical: p.DefaultCritical, + Messages: p.Messages, + } + } + + return result, nil +} + +// RegisterFromConfig merges processors from YAML into the global registry, overriding existing categories. +func RegisterFromConfig(path string) error { + processors, err := LoadFromConfig(path) + if err != nil { + return err + } + + globalRegistry.mu.Lock() + defer globalRegistry.mu.Unlock() + + for category, processor := range processors { + globalRegistry.processors[category] = processor + } + return nil +} diff --git a/internal/alerts/registry/registry.go b/internal/alerts/registry/registry.go new file mode 100644 index 0000000..f008be1 --- /dev/null +++ b/internal/alerts/registry/registry.go @@ -0,0 +1,72 @@ +package registry + +import ( + "fmt" + "strings" + "sync" +) + +// Processor defines behavior for building alerts for a given category/device type. +type Processor interface { + Category() string + DefaultCautionThreshold() float64 + DefaultWarningThreshold() float64 + DefaultCriticalThreshold() float64 + Unit() string + ValueKey() string + StatePredicate() string + ParseValue(raw string) (float64, error) + DetermineLevel(value, cautionThreshold, warningThreshold, criticalThreshold float64) string + DetermineType(value, cautionThreshold, warningThreshold, criticalThreshold float64) string + GenerateMessage(level string, value float64) string +} + +type registry struct { + mu sync.RWMutex + processors map[string]Processor +} + +var globalRegistry = ®istry{processors: make(map[string]Processor)} + +// Register adds a processor; categories are stored lowercased. +func Register(p Processor) error { + if p == nil { + return fmt.Errorf("processor cannot be nil") + } + + cat := strings.ToLower(p.Category()) + + globalRegistry.mu.Lock() + defer globalRegistry.mu.Unlock() + + if _, exists := globalRegistry.processors[cat]; exists { + return fmt.Errorf("processor for category %s already registered", cat) + } + + globalRegistry.processors[cat] = p + return nil +} + +// Get returns a processor by category (case-insensitive). +func Get(category string) (Processor, bool) { + globalRegistry.mu.RLock() + defer globalRegistry.mu.RUnlock() + + p, ok := globalRegistry.processors[strings.ToLower(category)] + return p, ok +} + +// ReplaceAll swaps the registry contents with the provided processors map. +func ReplaceAll(processors map[string]Processor) { + globalRegistry.mu.Lock() + defer globalRegistry.mu.Unlock() + globalRegistry.processors = copyProcessors(processors) +} + +func copyProcessors(in map[string]Processor) map[string]Processor { + out := make(map[string]Processor, len(in)) + for k, v := range in { + out[k] = v + } + return out +} diff --git a/internal/amqp/multi-tenant/consumer.go b/internal/amqp/multi-tenant/consumer.go new file mode 100644 index 0000000..c72301e --- /dev/null +++ b/internal/amqp/multi-tenant/consumer.go @@ -0,0 +1,749 @@ +package amqp + +import ( + "context" + "crypto/rand" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "net/url" + "os" + "strings" + "sync" + "time" + + "github.com/Space-DF/telemetry-service/internal/config" + "github.com/Space-DF/telemetry-service/internal/models" + "github.com/Space-DF/telemetry-service/internal/timescaledb" + amqp "github.com/rabbitmq/amqp091-go" + "go.uber.org/zap" +) + +// MessageProcessor processes device location messages +type MessageProcessor interface { + ProcessMessage(context context.Context, msg *models.DeviceLocationMessage) error + ProcessTelemetry(ctx context.Context, payload *models.TelemetryPayload) error + OnOrgCreated(ctx context.Context, orgSlug string) error + OnOrgDeleted(ctx context.Context, orgSlug string) error +} + +// TenantConsumer represents a consumer for a specific tenant +type TenantConsumer struct { + OrgSlug string + Vhost string + QueueName string + Exchange string + ConsumerTag string + Channel *amqp.Channel + Cancel context.CancelFunc +} + +type pooledConnection struct { + conn *amqp.Connection + refCount int +} + +// SchemaInitializer handles database schema initialization +type SchemaInitializer interface { + CreateSchemaAndTables(ctx context.Context, orgSlug string) error +} + +// MultiTenantConsumer handles message consumption for multiple tenants +type MultiTenantConsumer struct { + config config.AMQP + orgEventsConfig config.OrgEvents + orgEventsConn *amqp.Connection + orgEventsChannel *amqp.Channel + processor MessageProcessor + schemaInit SchemaInitializer + logger *zap.Logger + done chan bool + instanceID string // Unique identifier for this instance + + tenantMu sync.RWMutex + tenantConsumers map[string]*TenantConsumer + + vhostMu sync.Mutex + vhostConnections map[string]*pooledConnection +} + +// generateInstanceID creates a unique identifier for this service instance +func generateInstanceID() string { + // Try to use hostname first + hostname, err := os.Hostname() + if err != nil { + hostname = "unknown" + } + + // Generate random bytes for uniqueness + randomBytes := make([]byte, 4) + if _, err := rand.Read(randomBytes); err != nil { + // Fallback to timestamp if random fails + return fmt.Sprintf("%s-%d", hostname, time.Now().UnixNano()) + } + + return fmt.Sprintf("%s-%s", hostname, hex.EncodeToString(randomBytes)) +} + +// NewMultiTenantConsumer creates a new multi-tenant consumer +func NewMultiTenantConsumer(cfg config.AMQP, orgEventsCfg config.OrgEvents, processor MessageProcessor, schemaInit SchemaInitializer, logger *zap.Logger) *MultiTenantConsumer { + instanceID := generateInstanceID() + + logger.Info("Creating multi-tenant consumer with unique instance ID", + zap.String("instance_id", instanceID)) + + return &MultiTenantConsumer{ + config: cfg, + orgEventsConfig: orgEventsCfg, + processor: processor, + schemaInit: schemaInit, + logger: logger, + done: make(chan bool, 1), + instanceID: instanceID, + tenantConsumers: make(map[string]*TenantConsumer), + vhostConnections: make(map[string]*pooledConnection), + } +} + +// Connect establishes connection to AMQP broker +func (c *MultiTenantConsumer) Connect() error { + var err error + + // Connect to AMQP broker for org events + c.orgEventsConn, err = amqp.Dial(c.config.BrokerURL) + if err != nil { + return fmt.Errorf("failed to connect to AMQP broker: %w", err) + } + + // Create separate channel for org events + c.orgEventsChannel, err = c.orgEventsConn.Channel() + if err != nil { + return fmt.Errorf("failed to open org events channel: %w", err) + } + + return nil +} + +// getOrgEventsQueueName returns a unique queue name for this instance +func (c *MultiTenantConsumer) getOrgEventsQueueName() string { + // Each instance gets its own queue to ensure all instances receive all org events + return fmt.Sprintf("%s.%s", c.orgEventsConfig.Queue, c.instanceID) +} + +// Start begins consuming messages with multi-tenant support +func (c *MultiTenantConsumer) Start(ctx context.Context) error { + c.logger.Info("Starting telemetry service with multi-tenant architecture", + zap.String("instance_id", c.instanceID), + zap.String("org_events_queue", c.getOrgEventsQueueName())) + c.logger.Info("Waiting for organization events to discover active tenants") + + // Start listening to organization events + go func() { + if err := c.listenToOrgEvents(ctx); err != nil { + c.logger.Error("Org events listener error", zap.Error(err)) + } + }() + + // Send bootstrap discovery request after a small delay + go func() { + time.Sleep(2 * time.Second) + if err := c.sendDiscoveryRequest(ctx); err != nil { + c.logger.Error("Failed to send discovery request", zap.Error(err)) + } + }() + + // Wait for context cancellation or done signal + select { + case <-ctx.Done(): + c.logger.Info("Context cancelled, stopping multi-tenant consumer") + c.stopAllConsumers() + case <-c.done: + c.logger.Info("Multi-tenant consumer stopped") + c.stopAllConsumers() + } + + return nil +} + +// sendDiscoveryRequest sends a request to console service to get all active orgs +func (c *MultiTenantConsumer) sendDiscoveryRequest(ctx context.Context) error { + c.logger.Info("Sending discovery request to console service for existing organizations", + zap.String("reply_to", c.getOrgEventsQueueName())) + + request := models.OrgDiscoveryRequest{ + EventType: models.OrgDiscoveryReq, + EventID: fmt.Sprintf("discovery-%s-%d", c.instanceID, time.Now().Unix()), + Timestamp: time.Now(), + ServiceName: "telemetry-service", + ReplyTo: c.getOrgEventsQueueName(), // Use instance-specific queue + } + + body, err := json.Marshal(request) + if err != nil { + return fmt.Errorf("failed to marshal discovery request: %w", err) + } + + err = c.orgEventsChannel.PublishWithContext( + ctx, + c.orgEventsConfig.Exchange, + "org.discovery.request", + false, + false, + amqp.Publishing{ + ContentType: "application/json", + Body: body, + Timestamp: time.Now(), + }, + ) + + if err != nil { + return fmt.Errorf("failed to publish discovery request: %w", err) + } + + c.logger.Info("Discovery request sent successfully") + return nil +} + +func (c *MultiTenantConsumer) ensureOrgEventsTopology() error { + if err := c.orgEventsChannel.ExchangeDeclare( + c.orgEventsConfig.Exchange, + "topic", + true, + false, + false, + false, + nil, + ); err != nil { + return fmt.Errorf("exchange declare failed: %w", err) + } + + // Use instance-specific queue name + queueName := c.getOrgEventsQueueName() + + // Queue should auto-delete when this instance disconnects + // Set exclusive to true so each instance gets its own queue + if _, err := c.orgEventsChannel.QueueDeclare( + queueName, + false, // non-durable (will be recreated on restart) + true, // auto-delete when unused + true, // exclusive to this connection + false, // no-wait + nil, // arguments + ); err != nil { + return fmt.Errorf("queue declare failed for %s: %w", queueName, err) + } + + if err := c.orgEventsChannel.QueueBind( + queueName, + c.orgEventsConfig.RoutingKey, + c.orgEventsConfig.Exchange, + false, + nil, + ); err != nil { + return fmt.Errorf("queue bind failed for %s: %w", queueName, err) + } + + c.logger.Info("Org events topology configured", + zap.String("queue", queueName), + zap.String("exchange", c.orgEventsConfig.Exchange), + zap.String("routing_key", c.orgEventsConfig.RoutingKey)) + + return nil +} + +func (c *MultiTenantConsumer) shouldHandleVhost(vhost string) bool { + if len(c.config.AllowedVHosts) == 0 { + return true + } + + for _, allowed := range c.config.AllowedVHosts { + if allowed == vhost { + return true + } + } + + return false +} + +func (c *MultiTenantConsumer) buildVhostURL(vhost string) (string, error) { + baseURL := c.config.BrokerURL + parsed, err := url.Parse(baseURL) + if err != nil { + return "", fmt.Errorf("failed to parse broker url: %w", err) + } + + if vhost == "" { + parsed.Path = "/" + parsed.RawPath = "" + } else { + encoded := "/" + url.PathEscape(vhost) + parsed.Path = encoded + parsed.RawPath = encoded + } + + return parsed.String(), nil +} + +func (c *MultiTenantConsumer) getOrCreateVhostConnection(vhost string) (*amqp.Connection, error) { + c.vhostMu.Lock() + defer c.vhostMu.Unlock() + + if pooled, exists := c.vhostConnections[vhost]; exists { + pooled.refCount++ + return pooled.conn, nil + } + + vhostURL, err := c.buildVhostURL(vhost) + if err != nil { + return nil, err + } + + conn, err := amqp.Dial(vhostURL) + if err != nil { + return nil, fmt.Errorf("failed to connect to vhost %s: %w", vhost, err) + } + + c.vhostConnections[vhost] = &pooledConnection{ + conn: conn, + refCount: 1, + } + + return conn, nil +} + +func (c *MultiTenantConsumer) releaseVhostConnection(vhost string) { + c.vhostMu.Lock() + defer c.vhostMu.Unlock() + + pooled, exists := c.vhostConnections[vhost] + if !exists { + return + } + + pooled.refCount-- + if pooled.refCount <= 0 { + _ = pooled.conn.Close() + delete(c.vhostConnections, vhost) + } +} + +func (c *MultiTenantConsumer) makeConsumerTag(orgSlug, vhost string) string { + safeVhost := strings.NewReplacer("/", "_", ".", "_", ":", "_").Replace(vhost) + return fmt.Sprintf("telemetry-%s-%s-%s", orgSlug, safeVhost, c.instanceID) +} + +// subscribeToOrganization starts consuming from an organization's queue +func (c *MultiTenantConsumer) subscribeToOrganization(parentCtx context.Context, orgSlug, vhost, queueName, exchange string) error { + // Create default queue and exchange names if not provided + if queueName == "" { + queueName = fmt.Sprintf("%s.telemetry.queue", orgSlug) + } + if exchange == "" { + exchange = fmt.Sprintf("%s.exchange", orgSlug) + } + + if !c.shouldHandleVhost(vhost) { + c.logger.Info("Skipping subscription; vhost not assigned to this telemetry service", + zap.String("org", orgSlug), + zap.String("vhost", vhost)) + return nil + } + + c.tenantMu.RLock() + if _, exists := c.tenantConsumers[orgSlug]; exists { + c.tenantMu.RUnlock() + c.logger.Info("Subscription already active", + zap.String("org", orgSlug), + zap.String("vhost", vhost)) + return nil + } + c.tenantMu.RUnlock() + + conn, err := c.getOrCreateVhostConnection(vhost) + if err != nil { + return err + } + + channel, err := conn.Channel() + if err != nil { + c.releaseVhostConnection(vhost) + return fmt.Errorf("failed to open channel for vhost %s: %w", vhost, err) + } + + if err := channel.Qos(c.config.PrefetchCount, 0, false); err != nil { + _ = channel.Close() + c.releaseVhostConnection(vhost) + return fmt.Errorf("failed to set QoS for vhost %s: %w", vhost, err) + } + + // Declare the queue - will be idempotent if it already exists with same params + // The queue is shared across all telemetry instances for load balancing + queue, err := channel.QueueDeclare( + queueName, + true, // durable + false, // delete when unused + false, // exclusive + false, // no-wait + nil, // arguments + ) + if err != nil { + _ = channel.Close() + c.releaseVhostConnection(vhost) + return fmt.Errorf("failed to declare queue '%s' in vhost '%s': %w", queueName, vhost, err) + } + + // Bind queue to exchange with the routing key pattern + routingKey := fmt.Sprintf("tenant.%s.transformed.telemetry.device.location", orgSlug) + if err := channel.QueueBind( + queue.Name, + routingKey, + exchange, + false, + nil, + ); err != nil { + _ = channel.Close() + c.releaseVhostConnection(vhost) + return fmt.Errorf("failed to bind queue '%s' to exchange '%s': %w", queue.Name, exchange, err) + } + + consumerTag := c.makeConsumerTag(orgSlug, vhost) + + messages, err := channel.Consume( + queue.Name, + consumerTag, + false, + false, + false, + false, + nil, + ) + if err != nil { + _ = channel.Close() + c.releaseVhostConnection(vhost) + return fmt.Errorf("failed to start consuming from queue '%s' in vhost '%s': %w", queueName, vhost, err) + } + + tenantCtx, cancel := context.WithCancel(parentCtx) + consumer := &TenantConsumer{ + OrgSlug: orgSlug, + Vhost: vhost, + QueueName: queueName, + Exchange: exchange, + ConsumerTag: consumerTag, + Channel: channel, + Cancel: cancel, + } + + c.tenantMu.Lock() + c.tenantConsumers[orgSlug] = consumer + c.tenantMu.Unlock() + + go c.processTenantMessages(tenantCtx, consumer, messages) + + c.logger.Info("Started consuming from organization", + zap.String("org", orgSlug), + zap.String("vhost", vhost), + zap.String("queue", queueName), + zap.String("routing_key", routingKey)) + return nil +} + +// unsubscribeFromOrganization stops consuming from an organization's queue +func (c *MultiTenantConsumer) unsubscribeFromOrganization(orgSlug string) { + c.tenantMu.Lock() + consumer, exists := c.tenantConsumers[orgSlug] + if exists { + delete(c.tenantConsumers, orgSlug) + } + c.tenantMu.Unlock() + + if !exists || consumer == nil { + return + } + + consumer.Cancel() // Cancel the context to stop the goroutine + + if consumer.Channel != nil { + _ = consumer.Channel.Cancel(consumer.ConsumerTag, false) + _ = consumer.Channel.Close() + } + + c.releaseVhostConnection(consumer.Vhost) + + c.logger.Info("Stopped consuming from organization", + zap.String("org", consumer.OrgSlug), + zap.String("vhost", consumer.Vhost), + zap.String("queue", consumer.QueueName)) +} + +// stopAllConsumers stops all active tenant consumers +func (c *MultiTenantConsumer) stopAllConsumers() { + c.logger.Info("Stopping all tenant consumers") + c.tenantMu.Lock() + for slug, consumer := range c.tenantConsumers { + if consumer != nil { + consumer.Cancel() + if consumer.Channel != nil { + _ = consumer.Channel.Cancel(consumer.ConsumerTag, false) + _ = consumer.Channel.Close() + } + c.releaseVhostConnection(consumer.Vhost) + c.logger.Info("Stopped consuming from organization", + zap.String("org", consumer.OrgSlug), + zap.String("vhost", consumer.Vhost)) + } + delete(c.tenantConsumers, slug) + } + c.tenantMu.Unlock() + + c.vhostMu.Lock() + for vhost, pooled := range c.vhostConnections { + if pooled != nil && pooled.conn != nil { + _ = pooled.conn.Close() + } + delete(c.vhostConnections, vhost) + } + c.vhostMu.Unlock() + c.logger.Info("All tenant consumers stopped") +} + +// listenToOrgEvents listens for organization lifecycle events +func (c *MultiTenantConsumer) listenToOrgEvents(ctx context.Context) error { + var ( + messages <-chan amqp.Delivery + err error + attempt = 1 + ) + + for { + if err = c.ensureOrgEventsTopology(); err == nil { + queueName := c.getOrgEventsQueueName() + consumerTag := fmt.Sprintf("%s-%s", c.orgEventsConfig.ConsumerTag, c.instanceID) + + messages, err = c.orgEventsChannel.Consume( + queueName, + consumerTag, + false, // manual ack for reliability + false, + false, + false, + nil, + ) + if err == nil { + c.logger.Info("Started consuming org events", + zap.String("queue", queueName), + zap.String("consumer_tag", consumerTag)) + break + } + } + + backoff := time.Duration(attempt) + if backoff > 10 { + backoff = 10 + } + + c.logger.Warn("Telemetry org events setup retry", + zap.Int("attempt", attempt), + zap.Duration("next_retry", backoff*time.Second), + zap.Error(err)) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(backoff * time.Second): + } + + if attempt < 10 { + attempt++ + } + } + + // Process org events + for { + select { + case <-ctx.Done(): + return nil + case msg, ok := <-messages: + if !ok { + return nil + } + + c.logger.Debug("Received org event", zap.String("routing_key", msg.RoutingKey)) + + if err := c.handleOrgEvent(ctx, msg); err != nil { + c.logger.Error("Error handling org event", zap.Error(err)) + _ = msg.Nack(false, true) // Requeue + } else { + _ = msg.Ack(false) + } + } + } +} + +// handleOrgEvent processes organization lifecycle events +func (c *MultiTenantConsumer) handleOrgEvent(ctx context.Context, msg amqp.Delivery) error { + c.logger.Info("Processing org event", zap.String("routing_key", msg.RoutingKey)) + + var event models.OrgEvent + + if err := json.Unmarshal(msg.Body, &event); err != nil { + return fmt.Errorf("failed to unmarshal org event: %w", err) + } + + c.logger.Info("Processing org event details", + zap.String("event_type", string(event.EventType)), + zap.String("org", event.Payload.Slug)) + + orgSlug := event.Payload.Slug + vhost := event.Payload.Vhost + + switch event.EventType { + case models.OrgCreated: + // New org created - subscribe to its queue + if vhost == "" { + c.logger.Warn("Org created event missing vhost", zap.String("org", orgSlug)) + return nil + } + // Use empty strings to let subscribeToOrganization create default names + if err := c.subscribeToOrganization(ctx, orgSlug, vhost, "", ""); err != nil { + return err + } + + // Ask processor to ensure any per-organization setup (e.g., DB schema) + if c.processor != nil { + if err := c.processor.OnOrgCreated(ctx, orgSlug); err != nil { + c.logger.Error("Processor failed to handle org creation", + zap.String("org", orgSlug), + zap.Error(err)) + return err + } + } + + return nil + + case models.OrgDeactivated, models.OrgDeleted: + // Org deleted/deactivated - unsubscribe + c.unsubscribeFromOrganization(orgSlug) + + // If this is a deletion event, notify processor to perform cleanup + if event.EventType == models.OrgDeleted { + if c.processor != nil { + if err := c.processor.OnOrgDeleted(ctx, orgSlug); err != nil { + c.logger.Error("Processor failed to handle org deletion", + zap.String("org", orgSlug), + zap.Error(err)) + return err + } + } + } + + return nil + + default: + c.logger.Warn("Unknown event type", zap.String("event_type", string(event.EventType))) + return nil + } +} + +// processTenantMessages processes messages for a specific tenant +func (c *MultiTenantConsumer) processTenantMessages(ctx context.Context, tenant *TenantConsumer, messages <-chan amqp.Delivery) { + c.logger.Info("Processing messages for organization", zap.String("org", tenant.OrgSlug)) + + for { + select { + case <-ctx.Done(): + c.logger.Info("Stopping message processing for organization", zap.String("org", tenant.OrgSlug)) + return + + case msg, ok := <-messages: + if !ok { + c.logger.Info("Message channel closed for organization", zap.String("org", tenant.OrgSlug)) + return + } + + c.logger.Debug("Received message from organization", + zap.String("org", tenant.OrgSlug), + zap.String("routing_key", msg.RoutingKey)) + + orgCtx := timescaledb.ContextWithOrg(ctx, tenant.OrgSlug) + + // First try telemetry payload (entities). + var telemetry models.TelemetryPayload + if err := json.Unmarshal(msg.Body, &telemetry); err == nil && len(telemetry.Entities) > 0 { + // Fill org if missing. + if telemetry.Organization == "" { + telemetry.Organization = tenant.OrgSlug + } + if telemetry.SpaceSlug == "" { + telemetry.SpaceSlug = tenant.OrgSlug + } + + if err := c.processor.ProcessTelemetry(orgCtx, &telemetry); err != nil { + c.logger.Error("Failed to process telemetry payload", + zap.Error(err), + zap.String("org", tenant.OrgSlug)) + if nackErr := msg.Nack(false, true); nackErr != nil { + c.logger.Error("Failed to nack message", zap.Error(nackErr)) + } + continue + } + + if ackErr := msg.Ack(false); ackErr != nil { + c.logger.Error("Failed to ack message", zap.Error(ackErr)) + } + continue + } + + // Fallback to legacy device location message. + var deviceMsg models.DeviceLocationMessage + if err := json.Unmarshal(msg.Body, &deviceMsg); err != nil { + c.logger.Error("Failed to unmarshal message", + zap.Error(err), + zap.String("org", tenant.OrgSlug)) + if nackErr := msg.Nack(false, false); nackErr != nil { + c.logger.Error("Failed to nack bad message", zap.Error(nackErr)) + } + continue + } + + if err := c.processor.ProcessMessage(orgCtx, &deviceMsg); err != nil { + c.logger.Error("Failed to process message", + zap.Error(err), + zap.String("org", tenant.OrgSlug)) + if errors.Is(err, timescaledb.ErrLocationDroppedTimeout) { + c.logger.Warn("Location dropped due to timeout", + zap.String("org", tenant.OrgSlug)) + if nackErr := msg.Nack(false, true); nackErr != nil { + c.logger.Error("Failed to nack timeout message", zap.Error(nackErr)) + } + } + } else { + if ackErr := msg.Ack(false); ackErr != nil { + c.logger.Error("Failed to ack message", zap.Error(ackErr)) + } + } + } + } +} + +// Stop gracefully stops the consumer +func (c *MultiTenantConsumer) Stop() error { + close(c.done) + c.stopAllConsumers() + + if c.orgEventsChannel != nil { + _ = c.orgEventsChannel.Close() + } + + if c.orgEventsConn != nil { + _ = c.orgEventsConn.Close() + } + + return nil +} + +// IsHealthy checks if the consumer is healthy +func (c *MultiTenantConsumer) IsHealthy() bool { + return c.orgEventsConn != nil && !c.orgEventsConn.IsClosed() +} diff --git a/internal/api/alerts/handler.go b/internal/api/alerts/handler.go new file mode 100644 index 0000000..37183b8 --- /dev/null +++ b/internal/api/alerts/handler.go @@ -0,0 +1,162 @@ +package alerts + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + alertregistry "github.com/Space-DF/telemetry-service/internal/alerts/registry" + "github.com/Space-DF/telemetry-service/internal/api/common" + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +type Handler struct { + logger *zap.Logger + tsClient *timescaledb.Client +} + +func NewHandler(logger *zap.Logger, tsClient *timescaledb.Client) *Handler { + return &Handler{ + logger: logger, + tsClient: tsClient, + } +} + +type Alert struct { + ID string `json:"id"` + Type string `json:"type"` + Level string `json:"level"` + Message string `json:"message"` + EntityID string `json:"entity_id"` + EntityName string `json:"entity_name"` + DeviceID string `json:"device_id"` + SpaceSlug string `json:"space_slug"` + Location *LocationInfo `json:"location,omitempty"` + WaterDepth float64 `json:"water_depth"` + Unit string `json:"unit"` + Threshold *ThresholdInfo `json:"threshold"` + ReportedAt time.Time `json:"reported_at"` + Attributes map[string]interface{} `json:"attributes,omitempty"` +} + +type LocationInfo struct { + Latitude float64 `json:"latitude"` + Longitude float64 `json:"longitude"` + Address string `json:"address,omitempty"` +} + +type ThresholdInfo struct { + Warning float64 `json:"warning"` + Critical float64 `json:"critical"` +} + +type AlertsResponse struct { + Results []interface{} `json:"results"` + TotalCount int `json:"total_count"` + Page int `json:"page"` + PageSize int `json:"page_size"` +} + +// GetAlerts returns alerts based on water level thresholds +func (h *Handler) GetAlerts(c echo.Context) error { + // Resolve organization from hostname or X-Organization header + orgSlug := common.ResolveOrgFromRequest(c) + if orgSlug == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "Could not determine organization from hostname or X-Organization header", + }) + } + + h.logger.Info("Getting alerts", zap.String("org", orgSlug)) + + // Parse query parameters + spaceSlug, spaceErr := common.ResolveSpaceSlugFromRequest(c) + if spaceErr != nil { + return spaceErr + } + deviceID := c.QueryParam("device_id") + category := c.QueryParam("category") + + processor, ok := alertregistry.Get(category) + if !ok { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "unsupported category", + }) + } + startDate := strings.TrimSpace(c.QueryParam("start_date")) + endDate := strings.TrimSpace(c.QueryParam("end_date")) + + if deviceID == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "device_id is required", + }) + } + // Pagination + page, _ := strconv.Atoi(c.QueryParam("page")) + if page < 1 { + page = 1 + } + pageSize, _ := strconv.Atoi(c.QueryParam("page_size")) + if pageSize < 1 || pageSize > 100 { + pageSize = 20 + } + + cautionThreshold := processor.DefaultCautionThreshold() + warningThreshold := processor.DefaultWarningThreshold() + criticalThreshold := processor.DefaultCriticalThreshold() + + if ct := c.QueryParam("caution_threshold"); ct != "" { + if val, err := strconv.ParseFloat(ct, 64); err == nil { + cautionThreshold = val + } + } + if wt := c.QueryParam("warning_threshold"); wt != "" { + if val, err := strconv.ParseFloat(wt, 64); err == nil { + warningThreshold = val + } + } + if crt := c.QueryParam("critical_threshold"); crt != "" { + if val, err := strconv.ParseFloat(crt, 64); err == nil { + criticalThreshold = val + } + } + + alerts, totalCount, err := h.tsClient.GetAlerts( + c.Request().Context(), + orgSlug, + category, + spaceSlug, + deviceID, + startDate, + endDate, + cautionThreshold, + warningThreshold, + criticalThreshold, + page, + pageSize, + ) + + if err != nil { + switch { + case errors.Is(err, timescaledb.ErrDateRequired): + return c.JSON(http.StatusBadRequest, map[string]string{"error": "start_date and end_date are required"}) + case errors.Is(err, timescaledb.ErrInvalidDateFormat): + return c.JSON(http.StatusBadRequest, map[string]string{"error": "invalid date format, expected YYYY-MM-DD"}) + } + h.logger.Error("Failed to get alerts", zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{"error": "failed to retrieve alerts"}) + } + + response := AlertsResponse{ + Results: alerts, + TotalCount: totalCount, + Page: page, + PageSize: pageSize, + } + + return c.JSON(http.StatusOK, response) +} diff --git a/internal/api/alerts/routes.go b/internal/api/alerts/routes.go new file mode 100644 index 0000000..bc02709 --- /dev/null +++ b/internal/api/alerts/routes.go @@ -0,0 +1,13 @@ +package alerts + +import ( + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +func RegisterRoutes(group *echo.Group, logger *zap.Logger, tsClient *timescaledb.Client) { + handler := NewHandler(logger, tsClient) + + group.GET("/alerts", handler.GetAlerts) +} diff --git a/internal/api/common/request.go b/internal/api/common/request.go new file mode 100644 index 0000000..96387d8 --- /dev/null +++ b/internal/api/common/request.go @@ -0,0 +1,41 @@ +package common + +import ( + "net/http" + "strings" + + "github.com/labstack/echo/v4" +) + +// ResolveOrgFromRequest extracts organization from hostname (like test.localhost) +// or from X-Organization header with header taking priority. +// Format: {org_slug}.{domain} where domain is configured in settings +func ResolveOrgFromRequest(c echo.Context) string { + // First try X-Organization header (for explicit control) + if orgHeader := c.Request().Header.Get("X-Organization"); orgHeader != "" { + return orgHeader + } + + // Extract from hostname: org_slug.domain + hostname := c.Request().Host + if hostname == "" { + return "" + } + + parts := strings.Split(hostname, ".") + if len(parts) > 0 && parts[0] != "" { + return parts[0] + } + + return "" +} + +// ResolveSpaceSlugFromRequest extracts space_slug from X-Space header. +// Returns an error if the header is missing. +func ResolveSpaceSlugFromRequest(c echo.Context) (string, error) { + spaceSlug := c.Request().Header.Get("X-Space") + if spaceSlug == "" { + return "", echo.NewHTTPError(http.StatusBadRequest, "X-Space header is required") + } + return spaceSlug, nil +} diff --git a/internal/api/data/handler.go b/internal/api/data/handler.go new file mode 100644 index 0000000..df681cd --- /dev/null +++ b/internal/api/data/handler.go @@ -0,0 +1,63 @@ +package data + +import ( + "net/http" + + "github.com/Space-DF/telemetry-service/internal/api/common" + models "github.com/Space-DF/telemetry-service/internal/api/data/models" + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +func getDeviceProperties(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { + return func(c echo.Context) error { + var r models.GetDevicePropertiesRequest + + // Bind query parameters + if err := c.Bind(&r); err != nil { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "invalid query parameters", + }) + } + + if r.DeviceID == "" || r.SpaceSlug == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "device_id and space_slug are required", + }) + } + + // Resolve organization from hostname or X-Organization header + orgToUse := common.ResolveOrgFromRequest(c) + + // Log which org will be used for DB scoping + logger.Info("Fetching device properties", + zap.String("org_used", orgToUse), + zap.String("space_slug", r.SpaceSlug), + zap.String("device_id", r.DeviceID), + ) + + // Build context with org for DB search_path + ctx := timescaledb.ContextWithOrg(c.Request().Context(), orgToUse) + + // Query all device properties + props, err := tsClient.GetDeviceProperties(ctx, r.DeviceID, r.SpaceSlug) + if err != nil { + logger.Error("Failed to query device properties", + zap.Error(err), + zap.String("device_id", r.DeviceID), + zap.String("space_slug", r.SpaceSlug), + ) + return c.JSON(http.StatusInternalServerError, map[string]string{ + "error": "failed to retrieve device properties", + }) + } + + // Return empty dict if no properties found + if props == nil { + props = make(map[string]interface{}) + } + + return c.JSON(http.StatusOK, props) + } +} diff --git a/internal/api/data/models/models.go b/internal/api/data/models/models.go new file mode 100644 index 0000000..338d9d5 --- /dev/null +++ b/internal/api/data/models/models.go @@ -0,0 +1,8 @@ +package models + +type GetDevicePropertiesRequest struct { + DeviceID string `query:"device_id" validate:"required"` + SpaceSlug string `query:"space_slug" validate:"required"` +} + +type DevicePropertiesResponse map[string]interface{} diff --git a/internal/api/data/router.go b/internal/api/data/router.go new file mode 100644 index 0000000..19943af --- /dev/null +++ b/internal/api/data/router.go @@ -0,0 +1,12 @@ +package data + +import ( + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +func RegisterRoutes(e *echo.Group, logger *zap.Logger, tsClient *timescaledb.Client) { + group := e.Group("/data") + group.GET("/latest", getDeviceProperties(logger, tsClient)) +} diff --git a/internal/api/entities/handler.go b/internal/api/entities/handler.go new file mode 100644 index 0000000..cc4af1a --- /dev/null +++ b/internal/api/entities/handler.go @@ -0,0 +1,89 @@ +package entities + +import ( + "net/http" + "strconv" + "strings" + + "github.com/Space-DF/telemetry-service/internal/api/common" + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +func getEntities(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { + return func(c echo.Context) error { + // Parse query params + category := c.QueryParam("category") + deviceID := c.QueryParam("device_id") + displayTypes := parseDisplayTypes(c.QueryParam("display_type")) + search := strings.TrimSpace(c.QueryParam("search")) + pageStr := c.QueryParam("page") + pageSizeStr := c.QueryParam("page_size") + + // Resolve space slug from X-Space header (required) + spaceSlug, err := common.ResolveSpaceSlugFromRequest(c) + if err != nil { + return err + } + + // defaults + page := 1 + pageSize := 100 + if pageStr != "" { + if p, err := strconv.Atoi(pageStr); err == nil && p > 0 { + page = p + } + } + if pageSizeStr != "" { + if ps, err := strconv.Atoi(pageSizeStr); err == nil && ps > 0 { + pageSize = ps + } + } + + // Resolve organization from hostname or X-Organization header + orgToUse := common.ResolveOrgFromRequest(c) + if orgToUse == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "Could not determine organization from hostname or X-Organization header", + }) + } + + logger.Info("Selecting DB schema for entities request", + zap.String("org_used", orgToUse)) + + ctx := timescaledb.ContextWithOrg(c.Request().Context(), orgToUse) + + // Query DB + entities, count, err := tsClient.GetEntities(ctx, spaceSlug, category, deviceID, displayTypes, search, page, pageSize) + if err != nil { + logger.Error("failed to query entities", zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{"error": "failed to query entities"}) + } + + return c.JSON(http.StatusOK, map[string]interface{}{ + "count": count, + "results": entities, + }) + } +} + +func parseDisplayTypes(param string) []string { + if param == "" { + return nil + } + + parts := strings.Split(param, ",") + j := 0 + for i := range parts { + if trimmed := strings.TrimSpace(parts[i]); trimmed != "" { + parts[j] = trimmed + j++ + } + } + + if j == 0 { + return nil + } + return parts[:j] +} diff --git a/internal/api/entities/router.go b/internal/api/entities/router.go new file mode 100644 index 0000000..dea8f01 --- /dev/null +++ b/internal/api/entities/router.go @@ -0,0 +1,12 @@ +package entities + +import ( + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +func RegisterRoutes(e *echo.Group, logger *zap.Logger, tsClient *timescaledb.Client) { + group := e.Group("/entities") + group.GET("", getEntities(logger, tsClient)) +} diff --git a/internal/api/location/handler.go b/internal/api/location/handler.go new file mode 100644 index 0000000..af31e8c --- /dev/null +++ b/internal/api/location/handler.go @@ -0,0 +1,90 @@ +package location + +import ( + "net/http" + + "github.com/Space-DF/telemetry-service/internal/api/common" + models "github.com/Space-DF/telemetry-service/internal/api/location/models" + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +const ( + DefaultLimit = 100 + MaxLimit = 24 * 3600 / 30 * 7 // one week +) + +func getLocationHistory(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { + return func(c echo.Context) error { + var r models.LocationHistoryRequest + + // Bind query parameters + if err := c.Bind(&r); err != nil { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "invalid query parameters", + }) + } + req, err := r.Validate() + if err != nil { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": err.Error(), + }) + } + + // Handle limit + limit := req.Limit + if limit == 0 { + limit = DefaultLimit + } else if limit < 0 { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "limit must be positive", + }) + } else if limit > MaxLimit { + limit = MaxLimit + } + + // Resolve organization from hostname or X-Organization header + orgToUse := common.ResolveOrgFromRequest(c) + + // Log which org will be used for DB scoping + logger.Info("Selecting DB schema for request", + zap.String("org_used", orgToUse), + zap.String("space_slug", req.SpaceSlug), + ) + + // Build context with org for DB search_path + ctx := timescaledb.ContextWithOrg(c.Request().Context(), orgToUse) + + // Query database + locations, err := tsClient.GetLocationHistory(ctx, req.DeviceID, req.SpaceSlug, req.Start, req.End, limit) + if err != nil { + logger.Error("Failed to query location history", + zap.Error(err), + zap.String("device_id", req.DeviceID), + zap.String("space_slug", req.SpaceSlug), + ) + return c.JSON(http.StatusInternalServerError, map[string]string{ + "error": "failed to retrieve location history", + }) + } + + // Convert to response format + locationResponses := make([]models.LocationResponse, len(locations)) + for i, loc := range locations { + locationResponses[i] = models.LocationResponse{ + Timestamp: loc.Time, + Latitude: loc.Latitude, + Longitude: loc.Longitude, + DeviceID: loc.DeviceID, + } + } + + response := models.LocationHistoryResponse{ + Count: len(locationResponses), + Locations: locationResponses, + } + + return c.JSON(http.StatusOK, response) + } +} diff --git a/internal/api/location/models/request.go b/internal/api/location/models/request.go new file mode 100644 index 0000000..a470261 --- /dev/null +++ b/internal/api/location/models/request.go @@ -0,0 +1,36 @@ +package models + +import ( + "fmt" + "time" +) + +// LocationHistoryRequest represents query parameters for location history +type LocationHistoryRequest struct { + DeviceID string `query:"device_id" validate:"required"` + SpaceSlug string `query:"space_slug" validate:"required"` + Start time.Time `query:"start"` + End time.Time `query:"end"` + Limit int `query:"limit"` +} + +func (r LocationHistoryRequest) Validate() (*LocationHistoryRequest, error) { + if r.DeviceID == "" { + return nil, fmt.Errorf("device_id is required") + } + + if r.SpaceSlug == "" { + return nil, fmt.Errorf("space_slug is required") + } + if r.Start.IsZero() { + return nil, fmt.Errorf("start time is required") + } + if r.End.IsZero() { + r.End = time.Now().UTC() + } + if r.End.Before(r.Start) { + return nil, fmt.Errorf("end time must be after start time") + } + + return &r, nil +} diff --git a/internal/api/location/models/response.go b/internal/api/location/models/response.go new file mode 100644 index 0000000..89fca41 --- /dev/null +++ b/internal/api/location/models/response.go @@ -0,0 +1,24 @@ +package models + +import "time" + +// LocationHistoryResponse represents the response for location history +type LocationHistoryResponse struct { + Count int `json:"count"` + Locations []LocationResponse `json:"locations"` +} + +// LocationResponse represents a single location +type LocationResponse struct { + Timestamp time.Time `json:"timestamp"` + Latitude float64 `json:"latitude"` + Longitude float64 `json:"longitude"` + DeviceID string `json:"device_id"` +} + +// QueryParamsResponse shows the actual query parameters used +type QueryParamsResponse struct { + Start time.Time `json:"start"` + End time.Time `json:"end"` + Limit int `json:"limit"` +} diff --git a/internal/api/location/router.go b/internal/api/location/router.go new file mode 100644 index 0000000..b887f90 --- /dev/null +++ b/internal/api/location/router.go @@ -0,0 +1,12 @@ +package location + +import ( + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +func RegisterRoutes(e *echo.Group, logger *zap.Logger, tsClient *timescaledb.Client) { + group := e.Group("/location") // Device location routes + group.GET("/history", getLocationHistory(logger, tsClient)) +} diff --git a/internal/api/router.go b/internal/api/router.go new file mode 100644 index 0000000..e4dc72b --- /dev/null +++ b/internal/api/router.go @@ -0,0 +1,22 @@ +package api + +import ( + "github.com/Space-DF/telemetry-service/internal/api/alerts" + "github.com/Space-DF/telemetry-service/internal/api/data" + "github.com/Space-DF/telemetry-service/internal/api/entities" + "github.com/Space-DF/telemetry-service/internal/api/location" + "github.com/Space-DF/telemetry-service/internal/api/widget" + "github.com/Space-DF/telemetry-service/internal/config" + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +func Setup(cfg *config.Config, e *echo.Group, logger *zap.Logger, tsClient *timescaledb.Client) { + group := e.Group("/v1") + location.RegisterRoutes(group, logger, tsClient) + entities.RegisterRoutes(group, logger, tsClient) + alerts.RegisterRoutes(group, logger, tsClient) + widget.RegisterRoutes(group, logger, tsClient) + data.RegisterRoutes(group, logger, tsClient) +} diff --git a/internal/api/widget/device_types.go b/internal/api/widget/device_types.go new file mode 100644 index 0000000..f9a1ced --- /dev/null +++ b/internal/api/widget/device_types.go @@ -0,0 +1,96 @@ +package widget + +import ( + "context" + "net/http" + + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +func gaugeHandler(c echo.Context, logger *zap.Logger, tsClient *timescaledb.Client, ctx context.Context, req WidgetDataRequest) error { + value, unitOfMeasurement, err := tsClient.GetLatestEntityValue(ctx, req.EntityID) + if err != nil { + logger.Error("failed to get latest entity value", zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{"error": "failed to retrieve value"}) + } + + return c.JSON(http.StatusOK, GaugeValueResponse{ + Value: value, + UnitOfMeasurement: unitOfMeasurement, + }) +} + +func switchHandler(c echo.Context, logger *zap.Logger, tsClient *timescaledb.Client, ctx context.Context, req WidgetDataRequest) error { + boolValue, err := tsClient.GetLatestEntityBoolValue(ctx, req.EntityID) + if err != nil { + logger.Error("failed to get latest entity bool value", zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{"error": "failed to retrieve value"}) + } + + return c.JSON(http.StatusOK, SwitchValueResponse{Value: boolValue}) +} + +func chartHandler(c echo.Context, logger *zap.Logger, tsClient *timescaledb.Client, ctx context.Context, req WidgetDataRequest) error { + dataPoints, err := tsClient.GetAggregatedEntityData( + ctx, req.EntityID, *req.StartTime, *req.EndTime, + ) + if err != nil { + logger.Error("failed to get aggregated data", zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{"error": "failed to retrieve chart data"}) + } + + chartPoints := make([]ChartDataPoint, len(dataPoints)) + for i, dp := range dataPoints { + chartPoints[i] = ChartDataPoint{Timestamp: dp.Timestamp, Value: dp.Value} + } + + return c.JSON(http.StatusOK, ChartDataResponse{Data: chartPoints}) +} + +func histogramHandler(c echo.Context, logger *zap.Logger, tsClient *timescaledb.Client, ctx context.Context, req WidgetDataRequest) error { + buckets, err := tsClient.GetHistogramData( + ctx, req.EntityID, *req.StartTime, *req.EndTime, + ) + if err != nil { + logger.Error("failed to get histogram data", zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{"error": "failed to retrieve histogram data"}) + } + + histogramBuckets := make([]HistogramBucket, len(buckets)) + for i, b := range buckets { + histogramBuckets[i] = HistogramBucket{Bucket: b.Bucket, Count: b.Count, Value: b.Value} + } + + return c.JSON(http.StatusOK, HistogramDataResponse{Data: histogramBuckets}) +} + +func tableHandler(c echo.Context, logger *zap.Logger, tsClient *timescaledb.Client, ctx context.Context, req WidgetDataRequest) error { + tableData, columns, err := tsClient.GetTableData(ctx, req.EntityID, *req.StartTime, *req.EndTime) + if err != nil { + logger.Error("failed to get table data", zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{"error": "failed to retrieve table data"}) + } + + rows := make([]TableRow, len(tableData)) + for i, row := range tableData { + rows[i] = TableRow{Timestamp: row.Timestamp, Values: row.Values} + } + + return c.JSON(http.StatusOK, TableDataResponse{Columns: columns, Data: rows}) +} +func mapHandler(c echo.Context, logger *zap.Logger, tsClient *timescaledb.Client, ctx context.Context, req WidgetDataRequest) error { + latitude, longitude, err := tsClient.GetLatestEntityLocation(ctx, req.EntityID) + if err != nil { + logger.Error("failed to get entity location", zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{"error": "failed to retrieve location data"}) + } + + return c.JSON(http.StatusOK, MapDataResponse{ + Coordinate: Coordinate{ + Latitude: latitude, + Longitude: longitude, + }, + }) +} diff --git a/internal/api/widget/handler.go b/internal/api/widget/handler.go new file mode 100644 index 0000000..78d51b1 --- /dev/null +++ b/internal/api/widget/handler.go @@ -0,0 +1,67 @@ +package widget + +import ( + "fmt" + "net/http" + + "github.com/Space-DF/telemetry-service/internal/api/common" + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +func getWidgetData(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { + return func(c echo.Context) error { + var req WidgetDataRequest + + // Get entity_id from URL path + req.EntityID = c.Param("entity_id") + if req.EntityID == "" { + return c.JSON(http.StatusBadRequest, map[string]string{"error": "entity_id is required"}) + } + + // Bind remaining query parameters + if err := c.Bind(&req); err != nil { + return c.JSON(http.StatusBadRequest, map[string]string{"error": "invalid query parameters"}) + } + + if req.DisplayType == "" { + return c.JSON(http.StatusBadRequest, map[string]string{"error": "display_type is required"}) + } + + orgSlug := common.ResolveOrgFromRequest(c) + if orgSlug == "" { + return c.JSON(http.StatusBadRequest, map[string]string{"error": "organization not found"}) + } + + // Validate time-range requirements + if req.DisplayType == DisplayTypeChart || req.DisplayType == DisplayTypeHistogram || req.DisplayType == DisplayTypeTable { + if req.StartTime == nil || req.EndTime == nil { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": fmt.Sprintf("%s requires start_time and end_time", req.DisplayType), + }) + } + } + + ctx := timescaledb.ContextWithOrg(c.Request().Context(), orgSlug) + + switch req.DisplayType { + case DisplayTypeGauge, DisplayTypeSlider, DisplayTypeValue: + return gaugeHandler(c, logger, tsClient, ctx, req) + case DisplayTypeSwitch: + return switchHandler(c, logger, tsClient, ctx, req) + case DisplayTypeChart: + return chartHandler(c, logger, tsClient, ctx, req) + case DisplayTypeHistogram: + return histogramHandler(c, logger, tsClient, ctx, req) + case DisplayTypeTable: + return tableHandler(c, logger, tsClient, ctx, req) + case DisplayTypeMap: + return mapHandler(c, logger, tsClient, ctx, req) + default: + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": fmt.Sprintf("unknown display_type: %s", req.DisplayType), + }) + } + } +} diff --git a/internal/api/widget/models.go b/internal/api/widget/models.go new file mode 100644 index 0000000..5279325 --- /dev/null +++ b/internal/api/widget/models.go @@ -0,0 +1,83 @@ +package widget + +import "time" + +// DisplayType constants +const ( + DisplayTypeGauge = "gauge" + DisplayTypeSlider = "slider" + DisplayTypeValue = "value" + DisplayTypeChart = "chart" + DisplayTypeHistogram = "histogram" + DisplayTypeTable = "table" + DisplayTypeSwitch = "switch" + DisplayTypeMap = "map" +) + +// WidgetDataRequest represents widget data fetch request +type WidgetDataRequest struct { + EntityID string `query:"entity_id"` + OrgSlug string `query:"org_slug"` + SpaceSlug string `query:"space_slug"` + DeviceID string `query:"device_id"` + StartTime *time.Time `query:"start_time"` + EndTime *time.Time `query:"end_time"` + DisplayType string `query:"display_type"` +} + +// GaugeValueResponse for gauge, slider, value types +type GaugeValueResponse struct { + Value float64 `json:"value"` + UnitOfMeasurement string `json:"unit_of_measurement,omitempty"` +} + +// SwitchValueResponse for switch type +type SwitchValueResponse struct { + Value bool `json:"value"` +} + +// ChartDataPoint for chart/histogram data +type ChartDataPoint struct { + Timestamp time.Time `json:"timestamp"` + Value float64 `json:"value"` +} + +// ChartDataResponse for chart and histogram types +type ChartDataResponse struct { + Data []ChartDataPoint `json:"data"` +} + +// TableRow for table type +type TableRow struct { + Timestamp time.Time `json:"timestamp"` + Values map[string]interface{} `json:"values"` +} + +// TableDataResponse for table type +type TableDataResponse struct { + Columns []string `json:"columns"` + Data []TableRow `json:"data"` +} + +// HistogramBucket for histogram type +type HistogramBucket struct { + Bucket string `json:"bucket"` + Count int64 `json:"count"` + Value float64 `json:"value"` +} + +// HistogramDataResponse for histogram type +type HistogramDataResponse struct { + Data []HistogramBucket `json:"data"` +} + +// Coordinate for map type +type Coordinate struct { + Latitude float64 `json:"latitude"` + Longitude float64 `json:"longitude"` +} + +// MapDataResponse for map type +type MapDataResponse struct { + Coordinate Coordinate `json:"coordinate"` +} diff --git a/internal/api/widget/router.go b/internal/api/widget/router.go new file mode 100644 index 0000000..88c6bc0 --- /dev/null +++ b/internal/api/widget/router.go @@ -0,0 +1,12 @@ +package widget + +import ( + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +func RegisterRoutes(e *echo.Group, logger *zap.Logger, tsClient *timescaledb.Client) { + group := e.Group("/widget") + group.GET("/data/:entity_id", getWidgetData(logger, tsClient)) +} diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 0000000..4850eda --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,130 @@ +package config + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" + + "github.com/joho/godotenv" + "github.com/spf13/viper" +) + +// Config represents the service configuration +type Config struct { + Server Server `mapstructure:"server"` + AMQP AMQP `mapstructure:"amqp"` + OrgEvents OrgEvents `mapstructure:"org_events"` + Db Db `mapstructure:"db"` +} + +// OrgEvents contains organization events configuration +type OrgEvents struct { + Exchange string `mapstructure:"exchange"` + Queue string `mapstructure:"queue"` + RoutingKey string `mapstructure:"routing_key"` + ConsumerTag string `mapstructure:"consumer_tag"` +} + +// Server contains server configuration +type Server struct { + LogLevel string `mapstructure:"log_level"` + APIPort int `mapstructure:"api_port"` + AlertsProcessorsCfg string `mapstructure:"alerts_processors_path"` +} + +// AMQP contains RabbitMQ configuration +type AMQP struct { + BrokerURL string `mapstructure:"broker_url"` + ConsumerTag string `mapstructure:"consumer_tag"` + PrefetchCount int `mapstructure:"prefetch_count"` + AllowedVHosts []string `mapstructure:"allowed_vhosts"` + ReconnectDelay time.Duration `mapstructure:"reconnect_delay"` +} + +// Db contains Db configuration +type Db struct { + Name string `mapstructure:"name"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + Host string `mapstructure:"host"` + Port int `mapstructure:"port"` + BatchSize int `mapstructure:"batch_size"` + FlushInterval time.Duration `mapstructure:"flush_interval"` + MaxConnections int `mapstructure:"max_connections"` + MaxIdleConns int `mapstructure:"max_idle_conns"` +} + +// LoadConfig loads configuration from file and environment variables +func LoadConfig() (*Config, error) { + _ = godotenv.Load(".env") + + var cfg Config + vp := viper.New() + + vp.SetConfigFile("configs/config.yaml") + vp.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + vp.AutomaticEnv() + + if err := vp.ReadInConfig(); err != nil { + var configFileNotFoundError viper.ConfigFileNotFoundError + if !errors.As(err, &configFileNotFoundError) { + return nil, fmt.Errorf("error reading config file: %w", err) + } + } + + if err := vp.Unmarshal(&cfg); err != nil { + return nil, fmt.Errorf("unmarshal error: %w", err) + } + + if err := validateConfig(&cfg); err != nil { + return nil, fmt.Errorf("invalid configuration: %w", err) + } + + return &cfg, nil +} + +// validateConfig validates the configuration values +func validateConfig(cfg *Config) error { + // Validate AMQP broker URL if provided + if cfg.AMQP.BrokerURL != "" { + if _, err := url.Parse(cfg.AMQP.BrokerURL); err != nil { + return fmt.Errorf("invalid AMQP broker URL: %w", err) + } + } else { + return fmt.Errorf("AMQP broker URL is required") + } + + // Validate port numbers + if cfg.Server.APIPort <= 0 || cfg.Server.APIPort > 65535 { + return fmt.Errorf("invalid API port: %d", cfg.Server.APIPort) + } + + // Validate batch settings + if cfg.Db.BatchSize <= 0 { + return fmt.Errorf("batch size must be positive: %d", cfg.Db.BatchSize) + } + if cfg.Db.FlushInterval <= 0 { + return fmt.Errorf("flush interval must be positive: %v", cfg.Db.FlushInterval) + } + + // Validate connection pool settings + if cfg.Db.MaxConnections <= 0 { + return fmt.Errorf("max connections must be positive: %d", cfg.Db.MaxConnections) + } + if cfg.Db.MaxIdleConns < 0 { + return fmt.Errorf("max idle connections must be non-negative: %d", cfg.Db.MaxIdleConns) + } + if cfg.Db.MaxIdleConns > cfg.Db.MaxConnections { + return fmt.Errorf("max idle connections (%d) cannot exceed max connections (%d)", + cfg.Db.MaxIdleConns, cfg.Db.MaxConnections) + } + + // Validate prefetch count + if cfg.AMQP.PrefetchCount <= 0 { + return fmt.Errorf("prefetch count must be positive: %d", cfg.AMQP.PrefetchCount) + } + + return nil +} diff --git a/internal/health/handler.go b/internal/health/handler.go new file mode 100644 index 0000000..519cb3e --- /dev/null +++ b/internal/health/handler.go @@ -0,0 +1,96 @@ +package health + +import ( + "fmt" + "net/http" + + "github.com/Space-DF/telemetry-service/internal/health/models" + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +// handleHealth returns the overall health status +func handleHealth(healthChecker HealthChecker, tsClient *timescaledb.Client, logger *zap.Logger) echo.HandlerFunc { + return func(c echo.Context) error { + response := models.NewHealthResponse() + healthy := true + + // Check AMQP connection + if healthChecker != nil { + if healthChecker.IsHealthy() { + response.Checks["rabbitmq"] = models.ComponentCheck{ + Status: "healthy", + Message: "Connected to RabbitMQ", + } + } else { + healthy = false + response.Checks["rabbitmq"] = models.ComponentCheck{ + Status: "unhealthy", + Message: "Not connected to RabbitMQ", + } + } + } + + // Check TimescaleDB connection + if tsClient != nil { + if err := tsClient.HealthCheck(); err != nil { + healthy = false + response.Checks["timescaledb"] = models.ComponentCheck{ + Status: "unhealthy", + Message: fmt.Sprintf("Database connection failed: %v", err), + } + } else { + response.Checks["timescaledb"] = models.ComponentCheck{ + Status: "healthy", + Message: "Connected to TimescaleDB", + } + } + } + + if !healthy { + response.Status = "unhealthy" + return c.JSON(http.StatusServiceUnavailable, response) + } + + return c.JSON(http.StatusOK, response) + } +} + +// handleReady returns the readiness status +func handleReady(healthChecker HealthChecker, tsClient *timescaledb.Client, logger *zap.Logger) echo.HandlerFunc { + return func(c echo.Context) error { + ready := true + message := "Service is ready" + + // Check AMQP + if healthChecker != nil && !healthChecker.IsHealthy() { + ready = false + message = "AMQP not connected" + } + + // Check TimescaleDB + if tsClient != nil { + if err := tsClient.HealthCheck(); err != nil { + ready = false + message = "TimescaleDB not connected" + } + } + + response := models.NewReadyResponse(ready, message) + + if !ready { + return c.JSON(http.StatusServiceUnavailable, response) + } + + return c.JSON(http.StatusOK, response) + } +} + +// handleLive returns the liveness status +func handleLive(logger *zap.Logger) echo.HandlerFunc { + return func(c echo.Context) error { + response := models.NewLiveResponse() + return c.JSON(http.StatusOK, response) + } +} diff --git a/internal/health/models/response.go b/internal/health/models/response.go new file mode 100644 index 0000000..c032adb --- /dev/null +++ b/internal/health/models/response.go @@ -0,0 +1,55 @@ +package models + +import "time" + +// HealthResponse represents the overall health status +type HealthResponse struct { + Status string `json:"status"` + Timestamp string `json:"timestamp"` + Checks map[string]ComponentCheck `json:"checks"` +} + +// ComponentCheck represents the health status of a single component +type ComponentCheck struct { + Status string `json:"status"` + Message string `json:"message"` +} + +// ReadyResponse represents the readiness status +type ReadyResponse struct { + Ready bool `json:"ready"` + Message string `json:"message"` + Timestamp string `json:"timestamp"` +} + +// LiveResponse represents the liveness status +type LiveResponse struct { + Live bool `json:"live"` + Timestamp string `json:"timestamp"` +} + +// NewHealthResponse creates a new health response +func NewHealthResponse() *HealthResponse { + return &HealthResponse{ + Status: "healthy", + Timestamp: time.Now().UTC().Format(time.RFC3339), + Checks: make(map[string]ComponentCheck), + } +} + +// NewReadyResponse creates a new ready response +func NewReadyResponse(ready bool, message string) *ReadyResponse { + return &ReadyResponse{ + Ready: ready, + Message: message, + Timestamp: time.Now().UTC().Format(time.RFC3339), + } +} + +// NewLiveResponse creates a new live response +func NewLiveResponse() *LiveResponse { + return &LiveResponse{ + Live: true, + Timestamp: time.Now().UTC().Format(time.RFC3339), + } +} diff --git a/internal/health/router.go b/internal/health/router.go new file mode 100644 index 0000000..6973852 --- /dev/null +++ b/internal/health/router.go @@ -0,0 +1,19 @@ +package health + +import ( + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +// HealthChecker interface for health checking +type HealthChecker interface { + IsHealthy() bool +} + +// Setup registers health check routes +func Setup(e *echo.Group, healthChecker HealthChecker, tsClient *timescaledb.Client, logger *zap.Logger) { + e.GET("/health", handleHealth(healthChecker, tsClient, logger)) + e.GET("/ready", handleReady(healthChecker, tsClient, logger)) + e.GET("/live", handleLive(logger)) +} diff --git a/internal/models/device.go b/internal/models/device.go new file mode 100644 index 0000000..4f1b581 --- /dev/null +++ b/internal/models/device.go @@ -0,0 +1,43 @@ +package models + +// DeviceLocationMessage represents the transformed device location message from RabbitMQ +type DeviceLocationMessage struct { + DeviceID string `json:"device_id"` + Location LocationCoordinates `json:"location"` + Timestamp string `json:"timestamp"` + Space string `json:"space_slug"` + Organization string `json:"organization"` + Source string `json:"source"` + Metadata map[string]any `json:"metadata"` +} + +// LocationCoordinates represents geographic coordinates with accuracy +type LocationCoordinates struct { + Latitude float64 `json:"latitude"` + Longitude float64 `json:"longitude"` + Accuracy float64 `json:"accuracy"` + Direction *string `json:"direction,omitempty"` +} + +// ToTelemetryPayload converts DeviceLocationMessage to TelemetryPayload for entity_states storage +func (m *DeviceLocationMessage) ToTelemetryPayload() *TelemetryPayload { + // Skip if device_id is unknown or empty + if m.DeviceID == "" || m.DeviceID == "unknown" { + return nil + } + + // Skip if we don't have valid coordinates + if m.Location.Latitude == 0 && m.Location.Longitude == 0 { + return nil + } + + return &TelemetryPayload{ + DeviceID: m.DeviceID, + Organization: m.Organization, + SpaceSlug: m.Space, + Entities: []TelemetryEntity{}, + Timestamp: m.Timestamp, + Source: m.Source, + Metadata: m.Metadata, + } +} diff --git a/internal/models/org_event.go b/internal/models/org_event.go new file mode 100644 index 0000000..f6855ca --- /dev/null +++ b/internal/models/org_event.go @@ -0,0 +1,71 @@ +package models + +import "time" + +// OrgEventType represents the type of organization event +type OrgEventType string + +const ( + // OrgCreated is emitted when a new organization is created + OrgCreated OrgEventType = "org.created" + // OrgUpdated is emitted when an organization is updated + OrgUpdated OrgEventType = "org.updated" + // OrgDeleted is emitted when an organization is deleted + OrgDeleted OrgEventType = "org.deleted" + // OrgDeactivated is emitted when an organization is deactivated + OrgDeactivated OrgEventType = "org.deactivated" + // OrgActivated is emitted when an organization is activated + OrgActivated OrgEventType = "org.activated" + // OrgDiscoveryReq is emitted when requesting active orgs + OrgDiscoveryReq OrgEventType = "org.discovery.request" + // OrgDiscoveryResp is the response with active orgs + OrgDiscoveryResp OrgEventType = "org.discovery.response" +) + +// OrgEvent represents an organization lifecycle event +type OrgEvent struct { + EventType OrgEventType `json:"event_type"` + EventID string `json:"event_id"` + Timestamp time.Time `json:"timestamp"` + Payload OrgEventPayload `json:"payload"` +} + +// OrgEventPayload contains organization details +type OrgEventPayload struct { + ID string `json:"id"` + Slug string `json:"slug"` + Name string `json:"name"` + Vhost string `json:"vhost,omitempty"` + Exchange string `json:"exchange,omitempty"` + TransformerQueue string `json:"transformer_queue,omitempty"` + TelemetryQueue string `json:"telemetry_queue,omitempty"` + IsActive bool `json:"is_active"` + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` +} + +// OrgDiscoveryRequest is sent to discover active organizations +type OrgDiscoveryRequest struct { + EventType OrgEventType `json:"event_type"` + EventID string `json:"event_id"` + Timestamp time.Time `json:"timestamp"` + ServiceName string `json:"service_name"` + ReplyTo string `json:"reply_to"` +} + +// OrgDiscoveryResponse contains all active organizations +type OrgDiscoveryResponse struct { + EventType OrgEventType `json:"event_type"` + EventID string `json:"event_id"` + Timestamp time.Time `json:"timestamp"` + Spaces []OrgInfo `json:"spaces"` + TotalCount int `json:"total_count"` +} + +// OrgInfo contains basic organization information +type OrgInfo struct { + Slug string `json:"slug"` + Name string `json:"name"` + Vhost string `json:"vhost"` + IsActive bool `json:"is_active"` +} diff --git a/internal/models/telemetry.go b/internal/models/telemetry.go new file mode 100644 index 0000000..308c8eb --- /dev/null +++ b/internal/models/telemetry.go @@ -0,0 +1,38 @@ +package models + +// TelemetryPayload represents the entity-centric telemetry emitted by transformer-service. +type TelemetryPayload struct { + Organization string `json:"organization"` + DeviceEUI string `json:"device_eui"` + DeviceID string `json:"device_id,omitempty"` + SpaceSlug string `json:"space_slug,omitempty"` + DeviceInfo TelemetryDevice `json:"device_info"` + Entities []TelemetryEntity `json:"entities"` + Timestamp string `json:"timestamp"` + Source string `json:"source"` + Metadata map[string]any `json:"metadata,omitempty"` +} + +// TelemetryDevice holds basic device metadata. +type TelemetryDevice struct { + Identifiers []string `json:"identifiers"` + Name string `json:"name"` + Manufacturer string `json:"manufacturer"` + Model string `json:"model"` + ModelID string `json:"model_id"` +} + +// TelemetryEntity describes a single entity in telemetry payloads. +type TelemetryEntity struct { + UniqueID string `json:"unique_id"` + EntityID string `json:"entity_id"` + EntityType string `json:"entity_type"` + DeviceClass string `json:"device_class,omitempty"` + Name string `json:"name"` + State any `json:"state"` + DisplayType []string `json:"display_type,omitempty"` + Attributes map[string]any `json:"attributes,omitempty"` + UnitOfMeas string `json:"unit_of_measurement,omitempty"` + Icon string `json:"icon,omitempty"` + Timestamp string `json:"timestamp"` +} diff --git a/internal/services/processor.go b/internal/services/processor.go new file mode 100644 index 0000000..a6ea578 --- /dev/null +++ b/internal/services/processor.go @@ -0,0 +1,206 @@ +package services + +import ( + "context" + "fmt" + "sync/atomic" + + "github.com/Space-DF/telemetry-service/internal/models" + timescaledb "github.com/Space-DF/telemetry-service/internal/timescaledb" + "go.uber.org/zap" +) + +// LocationProcessor processes device location messages and stores them in Psql +type LocationProcessor struct { + tsClient *timescaledb.Client + logger *zap.Logger + + // Counters for monitoring + processedCount atomic.Int64 + errorCount atomic.Int64 + droppedCount atomic.Int64 +} + +// NewLocationProcessor creates a new location processor +func NewLocationProcessor(tsClient *timescaledb.Client, logger *zap.Logger) *LocationProcessor { + return &LocationProcessor{ + tsClient: tsClient, + logger: logger, + } +} + +// ProcessMessage processes a device location message +func (p *LocationProcessor) ProcessMessage(ctx context.Context, msg *models.DeviceLocationMessage) error { + p.logger.Debug("Processing device location message", + zap.String("device_id", msg.DeviceID), + zap.String("space", msg.Space), + zap.Float64("latitude", msg.Location.Latitude), + zap.Float64("longitude", msg.Location.Longitude), + ) + + // Validate message + if err := p.validateMessage(msg); err != nil { + p.logger.Warn("Invalid message, dropping", + zap.Error(err), + ) + p.droppedCount.Add(1) + return nil // Don't retry invalid messages + } + + // Skip if we don't have valid coordinates + if msg.Location.Latitude == 0 && msg.Location.Longitude == 0 { + p.logger.Debug("Skipping message with no coordinates", + zap.String("device_id", msg.DeviceID), + ) + p.droppedCount.Add(1) + return nil + } + + // Convert to telemetry payload and save to entity_states + payload := msg.ToTelemetryPayload() + if payload == nil { + p.logger.Debug("No payload generated from message (unknown device or no coordinates)", + zap.String("device_id", msg.DeviceID), + ) + p.droppedCount.Add(1) + return nil + } + + // Save telemetry payload to entity_states + if err := p.tsClient.SaveTelemetryPayload(ctx, payload); err != nil { + p.errorCount.Add(1) + p.logger.Error("Failed to save telemetry payload", + zap.Error(err), + zap.String("device_id", msg.DeviceID), + ) + return fmt.Errorf("failed to save telemetry payload: %w", err) + } + + p.processedCount.Add(1) + + p.logger.Debug("Successfully processed message", + zap.String("device_id", msg.DeviceID), + zap.Int64("total_processed", p.processedCount.Load()), + ) + + // Log progress every 100 messages + if p.processedCount.Load()%100 == 0 { + p.logger.Info("Processing progress", + zap.Int64("processed", p.processedCount.Load()), + zap.Int64("errors", p.errorCount.Load()), + zap.Int64("dropped", p.droppedCount.Load()), + ) + } + + return nil +} + +// validateMessage validates a device location message +func (p *LocationProcessor) validateMessage(msg *models.DeviceLocationMessage) error { + // Check required fields + if msg.Space == "" { + return fmt.Errorf("missing space") + } + + if msg.Timestamp == "" { + return fmt.Errorf("missing timestamp") + } + + // Validate location data if present + if msg.Location.Latitude == 0 && msg.Location.Longitude == 0 { + // Location is optional, but if one coordinate is set, both should be + if msg.Location.Latitude != 0 || msg.Location.Longitude != 0 { + return fmt.Errorf("incomplete location data") + } + } else { + // Validate coordinate ranges + if msg.Location.Latitude < -90 || msg.Location.Latitude > 90 { + return fmt.Errorf("invalid latitude: %f", msg.Location.Latitude) + } + + if msg.Location.Longitude < -180 || msg.Location.Longitude > 180 { + return fmt.Errorf("invalid longitude: %f", msg.Location.Longitude) + } + } + + return nil +} + +// ProcessTelemetry processes the entity-centric telemetry payload and stores it in the entities schema. +func (p *LocationProcessor) ProcessTelemetry(ctx context.Context, payload *models.TelemetryPayload) error { + if payload == nil { + return fmt.Errorf("nil telemetry payload") + } + + if p.tsClient == nil { + return fmt.Errorf("timescaledb client is not initialized") + } + + p.logger.Info("Processing telemetry payload", + zap.String("org", payload.Organization), + zap.String("device_id", payload.DeviceID), + zap.Int("entities", len(payload.Entities)), + ) + + if err := p.tsClient.SaveTelemetryPayload(ctx, payload); err != nil { + p.logger.Error("Failed to persist telemetry payload", zap.Error(err)) + return err + } + + return nil +} + +// GetStats returns processor statistics +func (p *LocationProcessor) GetStats() map[string]interface{} { + return map[string]interface{}{ + "processed_count": p.processedCount.Load(), + "error_count": p.errorCount.Load(), + "dropped_count": p.droppedCount.Load(), + } +} + +// OnOrgCreated is invoked when a new organization is created. It ensures any +// organization-specific setup is performed, such as creating a dedicated DB schema. +func (p *LocationProcessor) OnOrgCreated(ctx context.Context, orgSlug string) error { + p.logger.Info("Handling org created in processor", zap.String("org", orgSlug)) + + if orgSlug == "" { + return fmt.Errorf("empty org slug") + } + + if p.tsClient == nil { + return fmt.Errorf("timescaledb client is not initialized") + } + + if err := p.tsClient.CreateSchemaAndTables(ctx, orgSlug); err != nil { + p.logger.Error("Failed to create org schema/tables", zap.String("org", orgSlug), zap.Error(err)) + return err + } + + p.logger.Info("Organization schema created/ensured", zap.String("org", orgSlug)) + return nil +} + +// OnOrgDeleted is invoked when an organization is deleted. It performs +// cleanup for organization-specific resources such as dropping the DB schema. +func (p *LocationProcessor) OnOrgDeleted(ctx context.Context, orgSlug string) error { + p.logger.Info("Handling org deleted in processor", zap.String("org", orgSlug)) + + if orgSlug == "" { + return fmt.Errorf("empty org slug") + } + + if p.tsClient == nil { + return fmt.Errorf("timescaledb client is not initialized") + } + + // Attempt to drop the schema. This is destructive and should only be + // called after ensuring no active processing remains for the org. + if err := p.tsClient.DropSchema(ctx, orgSlug); err != nil { + p.logger.Error("Failed to drop org schema", zap.String("org", orgSlug), zap.Error(err)) + return err + } + + p.logger.Info("Organization schema dropped", zap.String("org", orgSlug)) + return nil +} diff --git a/internal/timescaledb/alerts.go b/internal/timescaledb/alerts.go new file mode 100644 index 0000000..3859eff --- /dev/null +++ b/internal/timescaledb/alerts.go @@ -0,0 +1,229 @@ +package timescaledb + +import ( + "context" + "database/sql" + "fmt" + "strings" + "time" + + alertregistry "github.com/Space-DF/telemetry-service/internal/alerts/registry" + "github.com/stephenafamo/bob" +) + +// GetAlerts retrieves alerts for a device/category within a time range. +func (c *Client) GetAlerts(ctx context.Context, orgSlug, category, spaceSlug, deviceID, startStr, endStr string, cautionThreshold, warningThreshold, criticalThreshold float64, page, pageSize int) ([]interface{}, int, error) { + org := orgSlug + if org == "" { + org = orgFromContext(ctx) + } + if org == "" || spaceSlug == "" || deviceID == "" { + return nil, 0, fmt.Errorf("org, space_slug, and device_id are required") + } + + offset := (page - 1) * pageSize + + processor, ok := alertregistry.Get(category) + if !ok { + return nil, 0, fmt.Errorf("unsupported category: %s", category) + } + + // Apply processor defaults if not provided + if cautionThreshold <= 0 { + cautionThreshold = processor.DefaultCautionThreshold() + } + if warningThreshold <= 0 { + warningThreshold = processor.DefaultWarningThreshold() + } + if criticalThreshold <= 0 { + criticalThreshold = processor.DefaultCriticalThreshold() + } + + startAt, endAt, err := buildDateRange(startStr, endStr) + if err != nil { + return nil, 0, err + } + + args := []interface{}{category, spaceSlug, deviceID, startAt, endAt, pageSize, offset} + countArgs := args[:5] + + statePredicate := processor.StatePredicate() + if strings.TrimSpace(statePredicate) == "" { + statePredicate = "TRUE" + } + whereClause := fmt.Sprintf(` + e.is_enabled = true + AND e.category = $1 + AND e.space_slug = $2 + AND e.device_id::text = $3 + AND %s + AND s.reported_at >= $4 + AND s.reported_at < $5 + `, statePredicate) + + query := fmt.Sprintf(alertsQueryTemplate, whereClause) + + // Count query + countQuery := fmt.Sprintf(alertsCountQueryTemplate, whereClause) + + var totalCount int + var results []interface{} + + if err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + // Get total count + row := tx.QueryRowContext(txCtx, countQuery, countArgs...) + if err := row.Scan(&totalCount); err != nil { + return fmt.Errorf("failed to count alerts: %w", err) + } + + // Get alerts + rows, err := tx.QueryContext(txCtx, query, args...) + if err != nil { + return fmt.Errorf("failed to query alerts: %w", err) + } + defer func() { + _ = rows.Close() + }() + + for rows.Next() { + var entityID, entityName, deviceIDVal, spaceSlugVal, state string + var reportedAt time.Time + var latitude, longitude sql.NullFloat64 + + if err := rows.Scan(&entityID, &entityName, &deviceIDVal, &spaceSlugVal, &state, &reportedAt, &latitude, &longitude); err != nil { + return fmt.Errorf("failed to scan alert row: %w", err) + } + + value := 0.0 + if parsed, err := processor.ParseValue(state); err == nil { + value = parsed + } + + levelComputed := processor.DetermineLevel(value, cautionThreshold, warningThreshold, criticalThreshold) + + // Skip safe alerts, only return caution, warning and critical + if levelComputed == "safe" { + continue + } + + alert := map[string]interface{}{ + "id": entityID, + "type": processor.DetermineType(value, cautionThreshold, warningThreshold, criticalThreshold), + "level": levelComputed, + "message": processor.GenerateMessage(levelComputed, value), + "entity_id": entityID, + "entity_name": entityName, + "device_id": deviceIDVal, + "space_slug": spaceSlugVal, + processor.ValueKey(): value, + "unit": processor.Unit(), + "threshold": map[string]interface{}{ + "caution": cautionThreshold, + "warning": warningThreshold, + "critical": criticalThreshold, + }, + "reported_at": reportedAt, + } + + // Add location if available + if latitude.Valid && longitude.Valid { + alert["location"] = map[string]interface{}{ + "latitude": latitude.Float64, + "longitude": longitude.Float64, + } + } + + results = append(results, alert) + } + + if err := rows.Err(); err != nil { + return fmt.Errorf("error iterating alert rows: %w", err) + } + + return nil + }); err != nil { + return nil, 0, err + } + + return results, totalCount, nil +} + +func buildDateRange(startStr, endStr string) (time.Time, time.Time, error) { + const dateLayout = "2006-01-02" + startTime := strings.TrimSpace(startStr) + endTime := strings.TrimSpace(endStr) + + if startTime == "" { + return time.Time{}, time.Time{}, fmt.Errorf("%w", ErrDateRequired) + } + + var start time.Time + var err error + + start, err = time.Parse(time.RFC3339, startTime) + if err != nil { + parsed, pErr := time.ParseInLocation(dateLayout, startTime, time.UTC) + if pErr != nil { + return time.Time{}, time.Time{}, fmt.Errorf("%w", ErrInvalidDateFormat) + } + start = parsed.UTC() + } + + var end time.Time + if endTime == "" { + end = time.Now().UTC() + } else { + end, err = time.Parse(time.RFC3339, endTime) + if err != nil { + parsed, pErr := time.ParseInLocation(dateLayout, endTime, time.UTC) + if pErr != nil { + return time.Time{}, time.Time{}, fmt.Errorf("%w", ErrInvalidDateFormat) + } + end = parsed.UTC().Add(24 * time.Hour) + } + } + + if !start.Before(end) { + return time.Time{}, time.Time{}, fmt.Errorf("start must be before end") + } + return start.UTC(), end.UTC(), nil +} + +const alertsQueryTemplate = ` + SELECT + e.id as entity_id, + e.name as entity_name, + e.device_id, + e.space_slug, + s.state, + s.reported_at, + loc.latitude, + loc.longitude + FROM entities e + INNER JOIN entity_states s ON s.entity_id = e.id + LEFT JOIN LATERAL ( + SELECT + (a.shared_attrs->>'latitude')::float as latitude, + (a.shared_attrs->>'longitude')::float as longitude + FROM entities e2 + INNER JOIN entity_states s2 ON s2.entity_id = e2.id + LEFT JOIN entity_state_attributes a ON a.id = s2.attributes_id + WHERE e2.device_id = e.device_id + AND e2.category = 'location' + AND e2.is_enabled = true + AND a.shared_attrs ? 'latitude' + AND a.shared_attrs ? 'longitude' + ORDER BY s2.reported_at DESC + LIMIT 1 + ) loc ON true + WHERE %s + ORDER BY s.reported_at DESC + LIMIT $6 OFFSET $7 +` + +const alertsCountQueryTemplate = ` + SELECT COUNT(*) + FROM entities e + INNER JOIN entity_states s ON s.entity_id = e.id + WHERE %s +` diff --git a/internal/timescaledb/attributes.go b/internal/timescaledb/attributes.go new file mode 100644 index 0000000..432c891 --- /dev/null +++ b/internal/timescaledb/attributes.go @@ -0,0 +1,330 @@ +package timescaledb + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "log" + "time" + + "github.com/lib/pq" + "github.com/stephenafamo/bob" + "go.uber.org/zap" +) + +// GetLatestAttributesForDeviceAt returns the shared attributes JSON for the +// given device at or before the provided timestamp. If there are no +// attributes available it returns (nil, nil). +func (c *Client) GetLatestAttributesForDeviceAt(ctx context.Context, deviceID string, at time.Time) (map[string]interface{}, error) { + org := orgFromContext(ctx) + + query := `SELECT a.shared_attrs + FROM entities e + JOIN entity_states s ON s.entity_id = e.id + LEFT JOIN entity_state_attributes a ON s.attributes_id = a.id + WHERE e.device_id::text = $1 AND s.reported_at <= $2 AND a.shared_attrs IS NOT NULL + ORDER BY s.reported_at DESC + LIMIT 1` + + var rawAttrs []byte + if org != "" { + if err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + rows, err := tx.QueryContext(txCtx, query, deviceID, at) + if err != nil { + return err + } + defer func() { + _ = rows.Close() + }() + if rows.Next() { + return rows.Scan(&rawAttrs) + } + return nil + }); err != nil { + return nil, fmt.Errorf("failed to query attributes: %w", err) + } + } else { + rows, err := c.DB.QueryContext(ctx, query, deviceID, at) + if err != nil { + return nil, fmt.Errorf("failed to query attributes: %w", err) + } + defer func() { + _ = rows.Close() + }() + if rows.Next() { + if err := rows.Scan(&rawAttrs); err != nil { + return nil, err + } + } + } + + if len(rawAttrs) == 0 { + return nil, nil + } + + var attrs map[string]interface{} + if err := json.Unmarshal(rawAttrs, &attrs); err != nil { + return nil, fmt.Errorf("failed to unmarshal attributes JSON: %w", err) + } + + return attrs, nil +} + +type Location struct { + Time time.Time + DeviceID string + SpaceSlug string + Latitude float64 + Longitude float64 + Attributes map[string]interface{} +} + +// GetLocationHistory retrieves location history for a device +func (c *Client) GetLocationHistory(ctx context.Context, deviceID, spaceSlug string, start, end time.Time, limit int) ([]*Location, error) { + org := orgFromContext(ctx) + + if c.Logger != nil { + c.Logger.Info("GetLocationHistory called", + zap.String("org_from_ctx", org), + zap.String("space_slug_param", spaceSlug), + zap.String("device_id", deviceID), + zap.Time("start", start), + zap.Time("end", end), + zap.Int("limit", limit), + ) + } + + log.Printf("GetLocationHistory called - org='%s' space_slug='%s' device_id='%s' start='%s' end='%s' limit=%d", + org, spaceSlug, deviceID, start.String(), end.String(), limit) + + query := `SELECT s.reported_at, e.device_id::text, e.space_slug, a.shared_attrs + FROM entity_states s + JOIN entities e ON s.entity_id = e.id + LEFT JOIN entity_state_attributes a ON s.attributes_id = a.id + WHERE e.device_id::text = $1 AND e.space_slug = $2 + AND e.category = 'location' + AND s.reported_at >= $3 AND s.reported_at <= $4 + AND a.shared_attrs IS NOT NULL + AND a.shared_attrs ? 'latitude' AND a.shared_attrs ? 'longitude' + ORDER BY s.reported_at ASC + LIMIT $5` + + locations := make([]*Location, 0) + var err error + if org != "" { + err = c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + rows, qerr := tx.QueryContext(txCtx, query, deviceID, spaceSlug, start, end, limit) + if qerr != nil { + return qerr + } + defer func() { _ = rows.Close() }() + + for rows.Next() { + var t pq.NullTime + var did sql.NullString + var sslug sql.NullString + var rawAttrs []byte + if err := rows.Scan(&t, &did, &sslug, &rawAttrs); err != nil { + return err + } + attrs := map[string]interface{}(nil) + if len(rawAttrs) > 0 { + var m map[string]interface{} + if jerr := json.Unmarshal(rawAttrs, &m); jerr == nil { + attrs = m + } + } + var lat, lon float64 + if attrs != nil { + if l, ok := attrs["latitude"].(float64); ok { + lat = l + } + if l, ok := attrs["longitude"].(float64); ok { + lon = l + } + } + loc := &Location{ + Time: t.Time, + DeviceID: did.String, + SpaceSlug: sslug.String, + Latitude: lat, + Longitude: lon, + Attributes: attrs, + } + locations = append(locations, loc) + } + return rows.Err() + }) + } else { + rows, err := c.DB.QueryContext(ctx, query, deviceID, spaceSlug, start, end, limit) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + for rows.Next() { + var t pq.NullTime + var did sql.NullString + var sslug sql.NullString + var rawAttrs []byte + if err := rows.Scan(&t, &did, &sslug, &rawAttrs); err != nil { + return nil, err + } + attrs := map[string]interface{}(nil) + if len(rawAttrs) > 0 { + var m map[string]interface{} + if jerr := json.Unmarshal(rawAttrs, &m); jerr == nil { + attrs = m + } + } + var lat, lon float64 + if attrs != nil { + if l, ok := attrs["latitude"].(float64); ok { + lat = l + } + if l, ok := attrs["longitude"].(float64); ok { + lon = l + } + } + loc := &Location{ + Time: t.Time, + DeviceID: did.String, + SpaceSlug: sslug.String, + Latitude: lat, + Longitude: lon, + Attributes: attrs, + } + locations = append(locations, loc) + } + if err := rows.Err(); err != nil { + return nil, err + } + } + + if err != nil { + return nil, fmt.Errorf("failed to query location history: %w", err) + } + + if c.Logger != nil { + c.Logger.Info("GetLocationHistory result", zap.Int("rows", len(locations)), zap.String("org", org)) + } + log.Printf("GetLocationHistory result - org='%s' rows=%d", org, len(locations)) + + return locations, nil +} + +// GetLastLocation retrieves the most recent location for a device +func (c *Client) GetLastLocation(ctx context.Context, deviceID, spaceSlug string) (*Location, error) { + org := orgFromContext(ctx) + + if c.Logger != nil { + c.Logger.Info("GetLastLocation called", + zap.String("org_from_ctx", org), + zap.String("space_slug_param", spaceSlug), + zap.String("device_id", deviceID), + ) + } + log.Printf("GetLastLocation called - org='%s' space_slug='%s' device_id='%s'", org, spaceSlug, deviceID) + + query := `SELECT s.reported_at, e.device_id::text, e.space_slug, a.shared_attrs + FROM entity_states s + JOIN entities e ON s.entity_id = e.id + LEFT JOIN entity_state_attributes a ON s.attributes_id = a.id + WHERE e.device_id::text = $1 AND e.space_slug = $2 + AND e.category = 'location' + AND a.shared_attrs IS NOT NULL + AND a.shared_attrs ? 'latitude' AND a.shared_attrs ? 'longitude' + ORDER BY s.reported_at DESC + LIMIT 1` + + var location *Location + var err error + if org != "" { + err = c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + row := tx.QueryRowContext(txCtx, query, deviceID, spaceSlug) + var t pq.NullTime + var did sql.NullString + var sslug sql.NullString + var rawAttrs []byte + if err := row.Scan(&t, &did, &sslug, &rawAttrs); err != nil { + if err == sql.ErrNoRows { + return nil + } + return err + } + attrs := map[string]interface{}(nil) + if len(rawAttrs) > 0 { + var m map[string]interface{} + if jerr := json.Unmarshal(rawAttrs, &m); jerr == nil { + attrs = m + } + } + var lat, lon float64 + if attrs != nil { + if l, ok := attrs["latitude"].(float64); ok { + lat = l + } + if l, ok := attrs["longitude"].(float64); ok { + lon = l + } + } + location = &Location{ + Time: t.Time, + DeviceID: did.String, + SpaceSlug: sslug.String, + Latitude: lat, + Longitude: lon, + Attributes: attrs, + } + return nil + }) + } else { + row := c.DB.QueryRowContext(ctx, query, deviceID, spaceSlug) + var t pq.NullTime + var did sql.NullString + var sslug sql.NullString + var rawAttrs []byte + if err := row.Scan(&t, &did, &sslug, &rawAttrs); err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, err + } + attrs := map[string]interface{}(nil) + if len(rawAttrs) > 0 { + var m map[string]interface{} + if jerr := json.Unmarshal(rawAttrs, &m); jerr == nil { + attrs = m + } + } + var lat, lon float64 + if attrs != nil { + if l, ok := attrs["latitude"].(float64); ok { + lat = l + } + if l, ok := attrs["longitude"].(float64); ok { + lon = l + } + } + location = &Location{ + Time: t.Time, + DeviceID: did.String, + SpaceSlug: sslug.String, + Latitude: lat, + Longitude: lon, + Attributes: attrs, + } + } + + if err != nil { + return nil, fmt.Errorf("failed to query last location: %w", err) + } + + if c.Logger != nil { + c.Logger.Info("GetLastLocation result", zap.Bool("found", location != nil), zap.String("org", org)) + } + log.Printf("GetLastLocation result - org='%s' found=%t", org, location != nil) + + return location, nil +} diff --git a/internal/timescaledb/client.go b/internal/timescaledb/client.go new file mode 100644 index 0000000..d9cff8e --- /dev/null +++ b/internal/timescaledb/client.go @@ -0,0 +1,60 @@ +package timescaledb + +import ( + "fmt" + "sync" + "time" + + "github.com/stephenafamo/bob" + "go.uber.org/zap" +) + +// Client represents a TimescaleDB client with basic lifecycle helpers. +type Client struct { + DB bob.DB // Exported for subpackages + Logger *zap.Logger // Exported for subpackages + batchSize int + flushInterval time.Duration + connStr string + + wg sync.WaitGroup +} + +// NewClient creates a new TimescaleDB client and verifies connectivity. +func NewClient(connStr string, batchSize int, flushInterval time.Duration, logger *zap.Logger) (*Client, error) { + db, err := bob.Open("postgres", connStr) + if err != nil { + return nil, fmt.Errorf("failed to connect to database: %w", err) + } + + db.SetMaxOpenConns(25) + db.SetMaxIdleConns(5) + db.SetConnMaxLifetime(5 * time.Minute) + + if err := db.Ping(); err != nil { + return nil, fmt.Errorf("failed to ping database: %w", err) + } + + return &Client{ + DB: db, + Logger: logger, + batchSize: batchSize, + flushInterval: flushInterval, + connStr: connStr, + }, nil +} + +// HealthCheck checks if TimescaleDB is reachable. +func (c *Client) HealthCheck() error { + return c.DB.Ping() +} + +// Wait blocks until all background workers have finished. +func (c *Client) Wait() { + c.wg.Wait() +} + +// Close closes the database connection. +func (c *Client) Close() error { + return c.DB.Close() +} diff --git a/internal/timescaledb/context.go b/internal/timescaledb/context.go new file mode 100644 index 0000000..99c23c5 --- /dev/null +++ b/internal/timescaledb/context.go @@ -0,0 +1,42 @@ +package timescaledb + +import ( + "context" + "strings" +) + +// context key type for organization +type ctxKeyOrg struct{} + +// contextWithOrg returns a new context that carries the organization slug +func contextWithOrg(ctx context.Context, org string) context.Context { + return context.WithValue(ctx, ctxKeyOrg{}, org) +} + +// ContextWithOrg is exported helper to attach organization slug to context +func ContextWithOrg(ctx context.Context, org string) context.Context { + return contextWithOrg(ctx, org) +} + +// orgFromContext extracts the organization slug from context if present +func orgFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + if v := ctx.Value(ctxKeyOrg{}); v != nil { + if s, ok := v.(string); ok { + return s + } + } + return "" +} + +// OrgFromContext is the exported version of orgFromContext +func OrgFromContext(ctx context.Context) string { + return orgFromContext(ctx) +} + +// pqQuoteIdentifier quotes an identifier for Postgres (very small helper) +func pqQuoteIdentifier(s string) string { + return `"` + strings.ReplaceAll(s, `"`, `""`) + `"` +} diff --git a/internal/timescaledb/device_properties.go b/internal/timescaledb/device_properties.go new file mode 100644 index 0000000..1340302 --- /dev/null +++ b/internal/timescaledb/device_properties.go @@ -0,0 +1,91 @@ +package timescaledb + +import ( + "context" + "fmt" + + "github.com/stephenafamo/bob" + "go.uber.org/zap" +) + +// GetDeviceProperties retrieves all latest properties for a device +func (c *Client) GetDeviceProperties(ctx context.Context, deviceID, spaceSlug string) (map[string]interface{}, error) { + org := orgFromContext(ctx) + if org == "" { + return nil, fmt.Errorf("organization not found in context") + } + + props := make(map[string]interface{}) + + // Get last location + location, err := c.GetLastLocation(ctx, deviceID, spaceSlug) + if err == nil && location != nil { + props["latest_checkpoint"] = map[string]interface{}{ + "timestamp": location.Time, + "latitude": location.Latitude, + "longitude": location.Longitude, + } + } + + // Get latest values for all entity categories associated with this device + err = c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + rows, err := tx.QueryContext(txCtx, ` + SELECT DISTINCT e.category + FROM entities e + WHERE e.device_id::text = $1 AND e.space_slug = $2 AND e.category != 'location' + ORDER BY e.category + `, deviceID, spaceSlug) + if err != nil { + return err + } + defer func() { _ = rows.Close() }() + + var entityCategories []string + for rows.Next() { + var category string + if err := rows.Scan(&category); err != nil { + return err + } + entityCategories = append(entityCategories, category) + } + if err := rows.Err(); err != nil { + return err + } + + // Then get latest value for each category + for _, category := range entityCategories { + row := tx.QueryRowContext(txCtx, ` + SELECT COALESCE(es.state::float8, 0) + FROM entity_states es + JOIN entities e ON es.entity_id = e.id + WHERE e.device_id::text = $1 AND e.space_slug = $2 AND e.category = $3 + ORDER BY es.reported_at DESC + LIMIT 1 + `, deviceID, spaceSlug, category) + + var value float64 + if err := row.Scan(&value); err != nil { + if err.Error() != "sql: no rows in result set" { + c.Logger.Warn("Failed to query entity value", + zap.Error(err), + zap.String("category", category), + zap.String("device_id", deviceID), + ) + } + continue + } + props[category] = value + } + return nil + }) + + if err != nil { + c.Logger.Error("Failed to query device properties", + zap.Error(err), + zap.String("device_id", deviceID), + ) + return props, nil + } + + return props, nil +} diff --git a/internal/timescaledb/entities.go b/internal/timescaledb/entities.go new file mode 100644 index 0000000..c117a1e --- /dev/null +++ b/internal/timescaledb/entities.go @@ -0,0 +1,182 @@ +package timescaledb + +import ( + "context" + "database/sql" + "fmt" + + "github.com/lib/pq" + "github.com/stephenafamo/bob" +) + +// GetEntities returns entities for a given space with optional filters and pagination. +func (c *Client) GetEntities(ctx context.Context, spaceSlug, category, deviceID string, displayTypes []string, search string, page, pageSize int) ([]map[string]interface{}, int, error) { + org := orgFromContext(ctx) + + if page < 1 { + page = 1 + } + if pageSize <= 0 { + pageSize = 100 + } + offset := (page - 1) * pageSize + + // Build WHERE clauses + args := []interface{}{spaceSlug} + where := "e.space_slug = $1" + idx := 2 + if category != "" { + where += fmt.Sprintf(" AND e.category = $%d", idx) + args = append(args, category) + idx++ + } + if deviceID != "" { + where += fmt.Sprintf(" AND e.device_id = $%d", idx) + args = append(args, deviceID) + idx++ + } + if len(displayTypes) > 0 { + where += fmt.Sprintf(" AND e.display_type::text[] && $%d::text[]", idx) + args = append(args, pq.Array(displayTypes)) + idx++ + } + if search != "" { + searchPattern := "%" + search + "%" + where += fmt.Sprintf(" AND (e.name ILIKE $%[1]d OR e.unique_key ILIKE $%[1]d OR e.category ILIKE $%[1]d OR e.device_id::text ILIKE $%[1]d OR et.name ILIKE $%[1]d OR et.unique_key ILIKE $%[1]d)", idx) + args = append(args, searchPattern) + idx++ + } + + // Count query + countQuery := fmt.Sprintf("SELECT COUNT(1) FROM entities e LEFT JOIN entity_types et ON e.entity_type_id = et.id WHERE %s", where) + var total int + if org != "" { + if err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + row := tx.QueryRowContext(txCtx, countQuery, args...) + return row.Scan(&total) + }); err != nil { + return nil, 0, fmt.Errorf("failed to count entities: %w", err) + } + } else { + row := c.DB.QueryRowContext(ctx, countQuery, args...) + if err := row.Scan(&total); err != nil { + return nil, 0, fmt.Errorf("failed to count entities: %w", err) + } + } + + // Select query + selectQuery := fmt.Sprintf(`SELECT e.id, e.device_id, e.name, e.unique_key, et.id AS entity_type_id, et.name AS entity_type_name, et.unique_key AS entity_type_unique_key, et.image_url AS entity_type_image_url, e.category, e.unit_of_measurement, e.display_type, e.image_url, e.is_enabled, e.created_at, e.updated_at, s.time_start, s.time_end + FROM entities e + LEFT JOIN entity_types et ON e.entity_type_id = et.id + LEFT JOIN ( + SELECT entity_id, MIN(reported_at) AS time_start, MAX(reported_at) AS time_end FROM entity_states GROUP BY entity_id + ) s ON s.entity_id = e.id + WHERE %s + ORDER BY e.created_at DESC + LIMIT $%d OFFSET $%d`, where, idx, idx+1) + + args = append(args, pageSize, offset) + + // Run query + var results []map[string]interface{} + if org != "" { + if err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + rows, err := tx.QueryContext(txCtx, selectQuery, args...) + if err != nil { + return err + } + defer func() { + _ = rows.Close() + }() + + for rows.Next() { + var id, deviceIDCol, name, uniqueKey sql.NullString + var etID, etName, etUnique, etImage sql.NullString + var categoryCol, unit, imageURL sql.NullString + var displayType pq.StringArray + var isEnabled bool + var createdAt, updatedAt pq.NullTime + var timeStart, timeEnd pq.NullTime + + if err := rows.Scan(&id, &deviceIDCol, &name, &uniqueKey, &etID, &etName, &etUnique, &etImage, &categoryCol, &unit, &displayType, &imageURL, &isEnabled, &createdAt, &updatedAt, &timeStart, &timeEnd); err != nil { + return err + } + + rowMap := map[string]interface{}{ + "id": id.String, + "device_id": deviceIDCol.String, + "device_name": name.String, + "unique_key": uniqueKey.String, + "entity_type": map[string]interface{}{ + "id": etID.String, + "name": etName.String, + "unique_key": etUnique.String, + "image_url": etImage.String, + }, + "name": name.String, + "category": categoryCol.String, + "unit_of_measurement": unit.String, + "display_type": []string(displayType), + "image_url": imageURL.String, + "is_enabled": isEnabled, + "created_at": createdAt.Time, + "updated_at": updatedAt.Time, + "time_start": timeStart.Time, + "time_end": timeEnd.Time, + } + results = append(results, rowMap) + } + return nil + }); err != nil { + return nil, 0, err + } + } else { + rows, err := c.DB.QueryContext(ctx, selectQuery, args...) + if err != nil { + return nil, 0, fmt.Errorf("failed to query entities: %w", err) + } + defer func() { + _ = rows.Close() + }() + + for rows.Next() { + var id, deviceIDCol, name, uniqueKey sql.NullString + var etID, etName, etUnique, etImage sql.NullString + var categoryCol, unit, imageURL sql.NullString + var displayType pq.StringArray + var isEnabled bool + var createdAt, updatedAt pq.NullTime + var timeStart, timeEnd pq.NullTime + + if err := rows.Scan(&id, &deviceIDCol, &name, &uniqueKey, &etID, &etName, &etUnique, &etImage, &categoryCol, &unit, &displayType, &imageURL, &isEnabled, &createdAt, &updatedAt, &timeStart, &timeEnd); err != nil { + return nil, 0, err + } + + rowMap := map[string]interface{}{ + "id": id.String, + "device_id": deviceIDCol.String, + "device_name": name.String, + "unique_key": uniqueKey.String, + "entity_type": map[string]interface{}{ + "id": etID.String, + "name": etName.String, + "unique_key": etUnique.String, + "image_url": etImage.String, + }, + "name": name.String, + "category": categoryCol.String, + "unit_of_measurement": unit.String, + "display_type": []string(displayType), + "image_url": imageURL.String, + "is_enabled": isEnabled, + "created_at": createdAt.Time, + "updated_at": updatedAt.Time, + "time_start": timeStart.Time, + "time_end": timeEnd.Time, + } + results = append(results, rowMap) + } + } + + return results, total, nil +} diff --git a/internal/timescaledb/errors.go b/internal/timescaledb/errors.go new file mode 100644 index 0000000..6e77c3c --- /dev/null +++ b/internal/timescaledb/errors.go @@ -0,0 +1,26 @@ +package timescaledb + +import ( + "errors" + "fmt" + "time" +) + +const ( + BatchChannelBufferSize = 10 + DropTimeout = 1 * time.Second +) + +var ( + ErrLocationDroppedTimeout = fmt.Errorf("location dropped due to timeout") + ErrDateRequired = errors.New("date is required") + ErrInvalidDateFormat = errors.New("invalid date format, expected YYYY-MM-DD") +) + +type ErrDroppedBatch struct { + Size int +} + +func (e *ErrDroppedBatch) Error() string { + return fmt.Sprintf("batch dropped due to timeout, size: %d", e.Size) +} diff --git a/internal/timescaledb/schema.go b/internal/timescaledb/schema.go new file mode 100644 index 0000000..87f8974 --- /dev/null +++ b/internal/timescaledb/schema.go @@ -0,0 +1,75 @@ +package timescaledb + +import ( + "context" + "fmt" + "net/url" + "strings" + + dbpkg "github.com/Space-DF/telemetry-service/pkgs/db" + "go.uber.org/zap" +) + +// CreateSchema creates a PostgreSQL schema for the given organization if it doesn't exist. +func (c *Client) CreateSchema(ctx context.Context, orgSlug string) error { + if orgSlug == "" { + return fmt.Errorf("empty organization slug") + } + + escaped := strings.ReplaceAll(orgSlug, `"`, `""`) + query := fmt.Sprintf(`CREATE SCHEMA IF NOT EXISTS "%s"`, escaped) + + if _, err := c.DB.ExecContext(ctx, query); err != nil { + return fmt.Errorf("failed to create schema '%s': %w", orgSlug, err) + } + + c.Logger.Info("Ensured database schema for organization", zap.String("org", orgSlug)) + return nil +} + +// CreateSchemaAndTables ensures the schema exists and creates required tables +// for telemetry within that schema: device_locations and schema_migrations. +func (c *Client) CreateSchemaAndTables(ctx context.Context, orgSlug string) error { + if err := c.CreateSchema(ctx, orgSlug); err != nil { + return err + } + + if c.connStr == "" { + return fmt.Errorf("no connection string available to run migrations") + } + + parsed, err := url.Parse(c.connStr) + if err != nil { + return fmt.Errorf("failed to parse connection string for migrations: %w", err) + } + + q := parsed.Query() + q.Set("options", fmt.Sprintf("-c search_path=%s,public", orgSlug)) + parsed.RawQuery = q.Encode() + + migrationPath := "pkgs/db/migrations" + + if err := dbpkg.Migrate(parsed, migrationPath); err != nil { + return fmt.Errorf("failed to run migrations for schema '%s': %w", orgSlug, err) + } + + c.Logger.Info("Ran migrations for organization schema", zap.String("org", orgSlug)) + return nil +} + +// DropSchema drops a PostgreSQL schema for the given organization. +func (c *Client) DropSchema(ctx context.Context, orgSlug string) error { + if orgSlug == "" { + return fmt.Errorf("empty organization slug") + } + + escaped := strings.ReplaceAll(orgSlug, `"`, `""`) + query := fmt.Sprintf(`DROP SCHEMA IF EXISTS "%s" CASCADE`, escaped) + + if _, err := c.DB.ExecContext(ctx, query); err != nil { + return fmt.Errorf("failed to drop schema '%s': %w", orgSlug, err) + } + + c.Logger.Info("Dropped database schema for organization", zap.String("org", orgSlug)) + return nil +} diff --git a/internal/timescaledb/telemetry.go b/internal/timescaledb/telemetry.go new file mode 100644 index 0000000..8c85a54 --- /dev/null +++ b/internal/timescaledb/telemetry.go @@ -0,0 +1,209 @@ +package timescaledb + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "hash/crc32" + "log" + "time" + + "github.com/Space-DF/telemetry-service/internal/models" + "github.com/google/uuid" + "github.com/lib/pq" + "github.com/stephenafamo/bob" +) + +func (c *Client) SaveTelemetryPayload(ctx context.Context, payload *models.TelemetryPayload) error { + if payload == nil { + return fmt.Errorf("nil telemetry payload") + } + + org := payload.Organization + if org == "" { + return fmt.Errorf("missing organization in telemetry payload") + } + + log.Printf("[Telemetry] SaveTelemetryPayload: org=%s, device_id=%s, entities=%d", org, payload.DeviceID, len(payload.Entities)) + return c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + for _, ent := range payload.Entities { + if err := c.upsertTelemetryEntity(txCtx, tx, &ent, payload); err != nil { + log.Printf("[Telemetry] ERROR upserting entity: %v", err) + return err + } + log.Printf("[Telemetry] Entity upserted: org=%s, device_id=%s, entity_id=%s", org, payload.DeviceID, ent.UniqueID) + } + log.Printf("[Telemetry] Successfully saved payload: org=%s, device_id=%s", org, payload.DeviceID) + return nil + }) +} + +func (c *Client) upsertTelemetryEntity(ctx context.Context, tx bob.Tx, ent *models.TelemetryEntity, payload *models.TelemetryPayload) error { + if ent == nil { + return fmt.Errorf("nil telemetry entity") + } + + displayType := ent.DisplayType + if len(displayType) == 0 { + displayType = []string{"unknown"} + } + + // Ensure entity type exists (unique by unique_key). + entityTypeKey := ent.EntityType + if entityTypeKey == "" { + entityTypeKey = "unknown" + } + + var entityTypeID uuid.UUID + if err := tx.QueryRowContext(ctx, ` + INSERT INTO entity_types (id, name, unique_key, created_at, updated_at) + VALUES ($1, $2, $3, now(), now()) + ON CONFLICT (unique_key) DO UPDATE SET name = EXCLUDED.name, updated_at = now() + RETURNING id`, + uuid.New(), + ent.EntityType, + entityTypeKey, + ).Scan(&entityTypeID); err != nil { + return fmt.Errorf("upsert entity_type '%s': %w", entityTypeKey, err) + } + + // Prepare optional device_id. + var deviceUUID *uuid.UUID + if payload.DeviceID != "" { + if parsed, err := uuid.Parse(payload.DeviceID); err == nil { + deviceUUID = &parsed + } + } + + // Upsert entity row. + var entityID uuid.UUID + if err := tx.QueryRowContext(ctx, ` + INSERT INTO entities ( + id, space_slug, device_id, unique_key, category, entity_type_id, + name, unit_of_measurement, display_type, is_enabled, created_at, updated_at + ) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, true, now(), now()) + ON CONFLICT (unique_key) DO UPDATE SET + space_slug = EXCLUDED.space_slug, + device_id = EXCLUDED.device_id, + name = EXCLUDED.name, + unit_of_measurement = EXCLUDED.unit_of_measurement, + category = EXCLUDED.category, + entity_type_id = EXCLUDED.entity_type_id, + display_type = EXCLUDED.display_type, + updated_at = now() + RETURNING id`, + uuid.New(), + payload.SpaceSlug, + deviceUUID, + ent.UniqueID, + ent.EntityType, + entityTypeID, + ent.Name, + ent.UnitOfMeas, + pq.Array(displayType), + ).Scan(&entityID); err != nil { + return fmt.Errorf("upsert entity '%s': %w", ent.UniqueID, err) + } + + // Handle attributes: deduplicate by hash to reuse existing row. + var attrsID sql.NullString + if len(ent.Attributes) > 0 { + rawAttrs, err := json.Marshal(ent.Attributes) + if err != nil { + return fmt.Errorf("marshal attributes for '%s': %w", ent.UniqueID, err) + } + + hash := int64(crc32.ChecksumIEEE(rawAttrs)) + if err := tx.QueryRowContext(ctx, ` + INSERT INTO entity_state_attributes (id, hash, shared_attrs) + VALUES ($1, $2, $3) + ON CONFLICT (hash) DO UPDATE SET shared_attrs = EXCLUDED.shared_attrs + RETURNING id`, + uuid.New(), + hash, + rawAttrs, + ).Scan(&attrsID); err != nil { + return fmt.Errorf("upsert attributes for '%s': %w", ent.UniqueID, err) + } + } + + // Parse timestamps. + reportedAt := parseRFC3339(ent.Timestamp) + if reportedAt.IsZero() { + reportedAt = parseRFC3339(payload.Timestamp) + } + if reportedAt.IsZero() { + reportedAt = time.Now().UTC() + } + + stateStr := fmt.Sprint(ent.State) + + // Get the most recent state for this entity to check if it changed + var lastStateID sql.NullString + var lastState sql.NullString + var lastChangedAt time.Time + var lastReportedAt time.Time + err := tx.QueryRowContext(ctx, ` + SELECT id, state, last_changed_at, reported_at + FROM entity_states + WHERE entity_id = $1 AND reported_at < $2 + ORDER BY reported_at DESC + LIMIT 1`, + entityID, + reportedAt, + ).Scan(&lastStateID, &lastState, &lastChangedAt, &lastReportedAt) + + // Determine last_changed_at: if state value changed, use reportedAt; otherwise keep old timestamp + changedAt := reportedAt + if err == nil && lastState.Valid && lastState.String == stateStr { + // State hasn't changed, preserve the last_changed_at timestamp + changedAt = lastChangedAt + } + + // Prepare old_state_id for linking + var oldStateUUID *uuid.UUID + if lastStateID.Valid && lastStateID.String != "" { + if parsed, err := uuid.Parse(lastStateID.String); err == nil { + oldStateUUID = &parsed + } + } + + stateID := uuid.New() + _, err = tx.ExecContext(ctx, ` + INSERT INTO entity_states ( + id, entity_id, state, attributes_id, old_state_id, reported_at, last_changed_at + ) VALUES ($1, $2, $3, $4, $5, $6, $7)`, + stateID, + entityID, + stateStr, + nullUUID(attrsID), + oldStateUUID, + reportedAt, + changedAt, + ) + if err != nil { + return fmt.Errorf("insert entity_state for '%s': %w", ent.UniqueID, err) + } + + return nil +} + +func parseRFC3339(ts string) time.Time { + if ts == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC3339, ts) + if err != nil { + return time.Time{} + } + return t.UTC() +} + +func nullUUID(id sql.NullString) any { + if id.Valid && id.String != "" { + return id.String + } + return nil +} diff --git a/internal/timescaledb/tx.go b/internal/timescaledb/tx.go new file mode 100644 index 0000000..8d885d9 --- /dev/null +++ b/internal/timescaledb/tx.go @@ -0,0 +1,38 @@ +package timescaledb + +import ( + "context" + "fmt" + "log" + + "github.com/stephenafamo/bob" +) + +// WithOrgTx begins a transaction, sets the search_path for the provided org, +// runs the provided function passing the transaction as the db handle, and commits the transaction. +// Exported for use by subpackages. +func (c *Client) WithOrgTx(ctx context.Context, org string, fn func(ctx context.Context, tx bob.Tx) error) error { + tx, err := c.DB.BeginTx(ctx, nil) + if err != nil { + return fmt.Errorf("failed to begin transaction: %w", err) + } + defer func() { + _ = tx.Rollback(ctx) + }() + + setSQL := fmt.Sprintf("SET LOCAL search_path = %s,public", pqQuoteIdentifier(org)) + log.Printf("Executing SQL for transaction: %s", setSQL) + if _, err := tx.ExecContext(ctx, setSQL); err != nil { + return fmt.Errorf("failed to set search_path for org '%s': %w", org, err) + } + + if err := fn(ctx, tx); err != nil { + return err + } + + if err := tx.Commit(ctx); err != nil { + return fmt.Errorf("failed to commit transaction for org '%s': %w", org, err) + } + + return nil +} diff --git a/internal/timescaledb/widget_data.go b/internal/timescaledb/widget_data.go new file mode 100644 index 0000000..c95bdc2 --- /dev/null +++ b/internal/timescaledb/widget_data.go @@ -0,0 +1,404 @@ +package timescaledb + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/stephenafamo/bob" +) + +// EntityDataPoint represents a data point for aggregation +type EntityDataPoint struct { + Timestamp time.Time + Value float64 +} + +// HistogramBucketData represents histogram bucket data +type HistogramBucketData struct { + Bucket string + Count int64 + Value float64 +} + +// TableDataRow represents a row in table data +type TableDataRow struct { + Timestamp time.Time + Values map[string]interface{} +} + +// GetLatestEntityValue gets the latest numeric value for an entity +func (c *Client) GetLatestEntityValue(ctx context.Context, entityID string) (float64, string, error) { + var value float64 + var unitOfMeasurement string + + org := orgFromContext(ctx) + if org == "" { + return 0.0, "", fmt.Errorf("organization not found in context") + } + + err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + row := tx.QueryRowContext(txCtx, ` + SELECT COALESCE(es.state::float8, 0), COALESCE(e.unit_of_measurement, '') + FROM entity_states es + JOIN entities e ON es.entity_id = e.id + WHERE e.unique_key = $1 + ORDER BY es.reported_at DESC + LIMIT 1 + `, entityID) + + if err := row.Scan(&value, &unitOfMeasurement); err != nil { + if err.Error() == "sql: no rows in result set" { + return nil + } + return fmt.Errorf("scan row: %w", err) + } + + return nil + }) + + if err != nil { + return 0.0, "", err + } + + return value, unitOfMeasurement, nil +} + +// GetLatestEntityBoolValue gets the latest boolean value for an entity +func (c *Client) GetLatestEntityBoolValue(ctx context.Context, entityID string) (bool, error) { + var state string + + org := orgFromContext(ctx) + if org == "" { + return false, fmt.Errorf("organization not found in context") + } + + err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + row := tx.QueryRowContext(txCtx, ` + SELECT es.state + FROM entity_states es + JOIN entities e ON es.entity_id = e.id + WHERE e.unique_key = $1 + ORDER BY es.reported_at DESC + LIMIT 1 + `, entityID) + + if err := row.Scan(&state); err != nil { + if err.Error() == "sql: no rows in result set" { + return nil + } + return fmt.Errorf("scan row: %w", err) + } + + return nil + }) + + if err != nil { + return false, err + } + + // Parse state as boolean + return state == "true" || state == "on" || state == "1", nil +} + +// GetAggregatedEntityData gets all raw data points for a time range (not aggregated) +func (c *Client) GetAggregatedEntityData( + ctx context.Context, + entityID string, + startTime, endTime time.Time, +) ([]EntityDataPoint, error) { + var dataPoints []EntityDataPoint + + org := orgFromContext(ctx) + if org == "" { + return nil, fmt.Errorf("organization not found in context") + } + + err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + query := ` + SELECT + es.reported_at, + COALESCE(es.state::float8, 0) as value + FROM entity_states es + JOIN entities e ON es.entity_id = e.id + WHERE e.unique_key = $1 + AND es.reported_at BETWEEN $2 AND $3 + ORDER BY es.reported_at ASC + LIMIT 10000 + ` + + rows, err := tx.QueryContext(txCtx, query, entityID, startTime, endTime) + if err != nil { + return fmt.Errorf("query entity data: %w", err) + } + defer func() { + if err := rows.Close(); err != nil { + log.Printf("error closing rows: %v", err) + } + }() + + for rows.Next() { + var timestamp time.Time + var value float64 + + if err := rows.Scan(×tamp, &value); err != nil { + return fmt.Errorf("scan entity row: %w", err) + } + + dataPoints = append(dataPoints, EntityDataPoint{ + Timestamp: timestamp, + Value: value, + }) + } + + if err = rows.Err(); err != nil { + return fmt.Errorf("row iteration error: %w", err) + } + + return nil + }) + + if err != nil { + return nil, err + } + + return dataPoints, nil +} + +// GetHistogramData gets histogram bucket data for an entity +func (c *Client) GetHistogramData( + ctx context.Context, + entityID string, + startTime, endTime time.Time, +) ([]HistogramBucketData, error) { + var values []float64 + + org := orgFromContext(ctx) + if org == "" { + return nil, fmt.Errorf("organization not found in context") + } + + // Get all values in range + err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + query := ` + SELECT COALESCE(es.state::float8, 0) + FROM entity_states es + JOIN entities e ON es.entity_id = e.id + WHERE e.unique_key = $1 + AND es.reported_at BETWEEN $2 AND $3 + ORDER BY es.state::float8 + ` + + rows, err := tx.QueryContext(txCtx, query, entityID, startTime, endTime) + if err != nil { + return fmt.Errorf("query histogram data: %w", err) + } + defer func() { + if err := rows.Close(); err != nil { + log.Printf("error closing rows: %v", err) + } + }() + + for rows.Next() { + var value float64 + if err := rows.Scan(&value); err != nil { + return fmt.Errorf("scan histogram value: %w", err) + } + values = append(values, value) + } + + if err = rows.Err(); err != nil { + return fmt.Errorf("row iteration error: %w", err) + } + + return nil + }) + + if err != nil { + return nil, err + } + + if len(values) == 0 { + return []HistogramBucketData{}, nil + } + + // Calculate min, max and bucket size + minVal := values[0] + maxVal := values[0] + for _, v := range values { + if v < minVal { + minVal = v + } + if v > maxVal { + maxVal = v + } + } + + // Create 5 buckets + numBuckets := 5 + bucketSize := (maxVal - minVal) / float64(numBuckets) + if bucketSize == 0 { + bucketSize = 1 + } + + // Initialize buckets + buckets := make([]HistogramBucketData, numBuckets) + bucketCounts := make([]int64, numBuckets) + + for i := 0; i < numBuckets; i++ { + bucketStart := minVal + float64(i)*bucketSize + bucketEnd := minVal + float64(i+1)*bucketSize + buckets[i] = HistogramBucketData{ + Bucket: fmt.Sprintf("%.1f-%.1f", bucketStart, bucketEnd), + Count: 0, + Value: (bucketStart + bucketEnd) / 2, + } + } + + // Count values in each bucket + for _, v := range values { + bucketIndex := int((v - minVal) / bucketSize) + if bucketIndex >= numBuckets { + bucketIndex = numBuckets - 1 + } + bucketCounts[bucketIndex]++ + } + + for i := 0; i < numBuckets; i++ { + buckets[i].Count = bucketCounts[i] + } + + return buckets, nil +} + +// GetTableData gets raw entity data for table display +func (c *Client) GetTableData( + ctx context.Context, + entityID string, + startTime, endTime time.Time, +) ([]TableDataRow, []string, error) { + var tableRows []TableDataRow + columns := []string{"timestamp", "state"} + + org := orgFromContext(ctx) + if org == "" { + return nil, nil, fmt.Errorf("organization not found in context") + } + + err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + query := ` + SELECT + es.reported_at, + es.state, + es.attributes_id, + COALESCE(esa.shared_attrs, '{}'::jsonb) as attributes + FROM entity_states es + JOIN entities e ON es.entity_id = e.id + LEFT JOIN entity_state_attributes esa ON es.attributes_id = esa.id + WHERE e.unique_key = $1 + AND es.reported_at BETWEEN $2 AND $3 + ORDER BY es.reported_at DESC + LIMIT 1000 + ` + + rows, err := tx.QueryContext(txCtx, query, entityID, startTime, endTime) + if err != nil { + return fmt.Errorf("query table data: %w", err) + } + defer func() { + if err := rows.Close(); err != nil { + log.Printf("error closing rows: %v", err) + } + }() + + for rows.Next() { + var timestamp time.Time + var state string + var attributesID interface{} + var attributes string + + if err := rows.Scan(×tamp, &state, &attributesID, &attributes); err != nil { + return fmt.Errorf("scan table row: %w", err) + } + + values := map[string]interface{}{ + "timestamp": timestamp, + "state": state, + } + + // Parse attributes if present + if attributes != "" && attributes != "{}" { + values["attributes"] = attributes + // Add to columns if not already there + hasAttr := false + for _, col := range columns { + if col == "attributes" { + hasAttr = true + break + } + } + if !hasAttr { + columns = append(columns, "attributes") + } + } + + tableRows = append(tableRows, TableDataRow{ + Timestamp: timestamp, + Values: values, + }) + } + + if err = rows.Err(); err != nil { + return fmt.Errorf("row iteration error: %w", err) + } + + return nil + }) + + if err != nil { + return nil, nil, err + } + + return tableRows, columns, nil +} + +// GetLatestEntityLocation gets the latest latitude and longitude for an entity +func (c *Client) GetLatestEntityLocation(ctx context.Context, entityID string) (float64, float64, error) { + var latitude float64 + var longitude float64 + + org := orgFromContext(ctx) + if org == "" { + return 0.0, 0.0, fmt.Errorf("organization not found in context") + } + + err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + row := tx.QueryRowContext(txCtx, ` + SELECT + COALESCE((esa.shared_attrs->>'latitude')::float8, 0), + COALESCE((esa.shared_attrs->>'longitude')::float8, 0) + FROM entity_states es + JOIN entities e ON es.entity_id = e.id + LEFT JOIN entity_state_attributes esa ON es.attributes_id = esa.id + WHERE e.unique_key = $1 + ORDER BY es.reported_at DESC + LIMIT 1 + `, entityID) + + if err := row.Scan(&latitude, &longitude); err != nil { + if err.Error() == "sql: no rows in result set" { + return nil + } + return fmt.Errorf("scan location row: %w", err) + } + + return nil + }) + + if err != nil { + return 0.0, 0.0, err + } + + return latitude, longitude, nil +} diff --git a/pkgs/db/dberrors/bob_errors.bob.go b/pkgs/db/dberrors/bob_errors.bob.go new file mode 100644 index 0000000..9195df5 --- /dev/null +++ b/pkgs/db/dberrors/bob_errors.bob.go @@ -0,0 +1,32 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package dberrors + +import "github.com/lib/pq" + +// ErrUniqueConstraint captures all unique constraint errors by explicitly leaving `s` empty. +var ErrUniqueConstraint = &UniqueConstraintError{s: ""} + +type UniqueConstraintError struct { + // schema is the schema where the unique constraint is defined. + schema string + // table is the name of the table where the unique constraint is defined. + table string + // columns are the columns constituting the unique constraint. + columns []string + // s is a string uniquely identifying the constraint in the raw error message returned from the database. + s string +} + +func (e *UniqueConstraintError) Error() string { + return e.s +} + +func (e *UniqueConstraintError) Is(target error) bool { + err, ok := target.(*pq.Error) + if !ok { + return false + } + return err.Code == "23505" && (e.s == "" || err.Constraint == e.s) +} diff --git a/pkgs/db/dberrors/bob_main.bob_test.go b/pkgs/db/dberrors/bob_main.bob_test.go new file mode 100644 index 0000000..b9da2c5 --- /dev/null +++ b/pkgs/db/dberrors/bob_main.bob_test.go @@ -0,0 +1,9 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package dberrors + +import "github.com/stephenafamo/bob" + +// Set the testDB to enable tests that use the database +var testDB bob.Transactor[bob.Tx] diff --git a/pkgs/db/dberrors/device_locations.bob.go b/pkgs/db/dberrors/device_locations.bob.go new file mode 100644 index 0000000..d89d648 --- /dev/null +++ b/pkgs/db/dberrors/device_locations.bob.go @@ -0,0 +1,17 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package dberrors + +var DeviceLocationErrors = &deviceLocationErrors{ + ErrUniqueDeviceLocationsPk: &UniqueConstraintError{ + schema: "", + table: "device_locations", + columns: []string{"time", "device_id"}, + s: "device_locations_pk", + }, +} + +type deviceLocationErrors struct { + ErrUniqueDeviceLocationsPk *UniqueConstraintError +} diff --git a/pkgs/db/dberrors/schema_migrations.bob.go b/pkgs/db/dberrors/schema_migrations.bob.go new file mode 100644 index 0000000..f217d9f --- /dev/null +++ b/pkgs/db/dberrors/schema_migrations.bob.go @@ -0,0 +1,17 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package dberrors + +var SchemaMigrationErrors = &schemaMigrationErrors{ + ErrUniqueSchemaMigrationsPkey: &UniqueConstraintError{ + schema: "", + table: "schema_migrations", + columns: []string{"version"}, + s: "schema_migrations_pkey", + }, +} + +type schemaMigrationErrors struct { + ErrUniqueSchemaMigrationsPkey *UniqueConstraintError +} diff --git a/pkgs/db/dbinfo/bob_types.bob.go b/pkgs/db/dbinfo/bob_types.bob.go new file mode 100644 index 0000000..138299c --- /dev/null +++ b/pkgs/db/dbinfo/bob_types.bob.go @@ -0,0 +1,83 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package dbinfo + +import "github.com/aarondl/opt/null" + +type Table[Cols columns, Idxs indexes, FKs foreignKeys, U uniques, C checks] struct { + Schema string + Name string + Columns Cols + Indexes Idxs + PrimaryKey *constraint + ForeignKeys FKs + Uniques U + Checks C + Comment string +} + +type columns interface { + AsSlice() []column +} + +type column struct { + Name string + DBType string + Default string + Comment string + Nullable bool + Generated bool + AutoIncr bool +} + +type indexes interface { + AsSlice() []index +} + +type index struct { + Type string + Name string + Columns []indexColumn + Unique bool + Comment string + NullsFirst []bool + NullsDistinct bool + Where string + Include []string +} + +type indexColumn struct { + Name string + Desc null.Val[bool] + IsExpression bool +} + +type constraint struct { + Name string + Columns []string + Comment string +} + +type foreignKeys interface { + AsSlice() []foreignKey +} + +type foreignKey struct { + constraint + ForeignTable string + ForeignColumns []string +} + +type uniques interface { + AsSlice() []constraint +} + +type checks interface { + AsSlice() []check +} + +type check struct { + constraint + Expression string +} diff --git a/pkgs/db/dbinfo/device_locations.bob.go b/pkgs/db/dbinfo/device_locations.bob.go new file mode 100644 index 0000000..19c33d2 --- /dev/null +++ b/pkgs/db/dbinfo/device_locations.bob.go @@ -0,0 +1,193 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package dbinfo + +import "github.com/aarondl/opt/null" + +var DeviceLocations = Table[ + deviceLocationColumns, + deviceLocationIndexes, + deviceLocationForeignKeys, + deviceLocationUniques, + deviceLocationChecks, +]{ + Schema: "", + Name: "device_locations", + Columns: deviceLocationColumns{ + Time: column{ + Name: "time", + DBType: "timestamp with time zone", + Default: "", + Comment: "", + Nullable: false, + Generated: false, + AutoIncr: false, + }, + DeviceID: column{ + Name: "device_id", + DBType: "character varying", + Default: "", + Comment: "", + Nullable: false, + Generated: false, + AutoIncr: false, + }, + SpaceSlug: column{ + Name: "space_slug", + DBType: "character varying", + Default: "", + Comment: "", + Nullable: false, + Generated: false, + AutoIncr: false, + }, + Latitude: column{ + Name: "latitude", + DBType: "double precision", + Default: "", + Comment: "", + Nullable: false, + Generated: false, + AutoIncr: false, + }, + Longitude: column{ + Name: "longitude", + DBType: "double precision", + Default: "", + Comment: "", + Nullable: false, + Generated: false, + AutoIncr: false, + }, + Accuracy: column{ + Name: "accuracy", + DBType: "double precision", + Default: "", + Comment: "", + Nullable: false, + Generated: false, + AutoIncr: false, + }, + }, + Indexes: deviceLocationIndexes{ + DeviceLocationsPK: index{ + Type: "btree", + Name: "device_locations_pk", + Columns: []indexColumn{ + { + Name: "\"time\"", + Desc: null.FromCond(false, true), + IsExpression: true, + }, + { + Name: "device_id", + Desc: null.FromCond(false, true), + IsExpression: false, + }, + }, + Unique: true, + Comment: "", + NullsFirst: []bool{false, false}, + NullsDistinct: false, + Where: "", + Include: []string{}, + }, + DeviceLocationsTimeIdx: index{ + Type: "btree", + Name: "device_locations_time_idx", + Columns: []indexColumn{ + { + Name: "\"time\"", + Desc: null.FromCond(true, true), + IsExpression: true, + }, + }, + Unique: false, + Comment: "", + NullsFirst: []bool{true}, + NullsDistinct: false, + Where: "", + Include: []string{}, + }, + IdxDeviceLocationsOrgDeviceTime: index{ + Type: "btree", + Name: "idx_device_locations_org_device_time", + Columns: []indexColumn{ + { + Name: "space_slug", + Desc: null.FromCond(false, true), + IsExpression: false, + }, + { + Name: "device_id", + Desc: null.FromCond(false, true), + IsExpression: false, + }, + { + Name: "\"time\"", + Desc: null.FromCond(true, true), + IsExpression: true, + }, + }, + Unique: false, + Comment: "", + NullsFirst: []bool{false, false, true}, + NullsDistinct: false, + Where: "", + Include: []string{}, + }, + }, + PrimaryKey: &constraint{ + Name: "device_locations_pk", + Columns: []string{"time", "device_id"}, + Comment: "", + }, + + Comment: "", +} + +type deviceLocationColumns struct { + Time column + DeviceID column + SpaceSlug column + Latitude column + Longitude column + Accuracy column +} + +func (c deviceLocationColumns) AsSlice() []column { + return []column{ + c.Time, c.DeviceID, c.SpaceSlug, c.Latitude, c.Longitude, c.Accuracy, + } +} + +type deviceLocationIndexes struct { + DeviceLocationsPK index + DeviceLocationsTimeIdx index + IdxDeviceLocationsOrgDeviceTime index +} + +func (i deviceLocationIndexes) AsSlice() []index { + return []index{ + i.DeviceLocationsPK, i.DeviceLocationsTimeIdx, i.IdxDeviceLocationsOrgDeviceTime, + } +} + +type deviceLocationForeignKeys struct{} + +func (f deviceLocationForeignKeys) AsSlice() []foreignKey { + return []foreignKey{} +} + +type deviceLocationUniques struct{} + +func (u deviceLocationUniques) AsSlice() []constraint { + return []constraint{} +} + +type deviceLocationChecks struct{} + +func (c deviceLocationChecks) AsSlice() []check { + return []check{} +} diff --git a/pkgs/db/dbinfo/schema_migrations.bob.go b/pkgs/db/dbinfo/schema_migrations.bob.go new file mode 100644 index 0000000..83f3aa1 --- /dev/null +++ b/pkgs/db/dbinfo/schema_migrations.bob.go @@ -0,0 +1,92 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package dbinfo + +import "github.com/aarondl/opt/null" + +var SchemaMigrations = Table[ + schemaMigrationColumns, + schemaMigrationIndexes, + schemaMigrationForeignKeys, + schemaMigrationUniques, + schemaMigrationChecks, +]{ + Schema: "", + Name: "schema_migrations", + Columns: schemaMigrationColumns{ + Version: column{ + Name: "version", + DBType: "character varying", + Default: "", + Comment: "", + Nullable: false, + Generated: false, + AutoIncr: false, + }, + }, + Indexes: schemaMigrationIndexes{ + SchemaMigrationsPkey: index{ + Type: "btree", + Name: "schema_migrations_pkey", + Columns: []indexColumn{ + { + Name: "version", + Desc: null.FromCond(false, true), + IsExpression: false, + }, + }, + Unique: true, + Comment: "", + NullsFirst: []bool{false}, + NullsDistinct: false, + Where: "", + Include: []string{}, + }, + }, + PrimaryKey: &constraint{ + Name: "schema_migrations_pkey", + Columns: []string{"version"}, + Comment: "", + }, + + Comment: "", +} + +type schemaMigrationColumns struct { + Version column +} + +func (c schemaMigrationColumns) AsSlice() []column { + return []column{ + c.Version, + } +} + +type schemaMigrationIndexes struct { + SchemaMigrationsPkey index +} + +func (i schemaMigrationIndexes) AsSlice() []index { + return []index{ + i.SchemaMigrationsPkey, + } +} + +type schemaMigrationForeignKeys struct{} + +func (f schemaMigrationForeignKeys) AsSlice() []foreignKey { + return []foreignKey{} +} + +type schemaMigrationUniques struct{} + +func (u schemaMigrationUniques) AsSlice() []constraint { + return []constraint{} +} + +type schemaMigrationChecks struct{} + +func (c schemaMigrationChecks) AsSlice() []check { + return []check{} +} diff --git a/pkgs/db/factory/bobfactory_context.bob.go b/pkgs/db/factory/bobfactory_context.bob.go new file mode 100644 index 0000000..9563ae5 --- /dev/null +++ b/pkgs/db/factory/bobfactory_context.bob.go @@ -0,0 +1,34 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package factory + +import "context" + +type contextKey string + +var ( + // Relationship Contexts for device_locations + deviceLocationWithParentsCascadingCtx = newContextual[bool]("deviceLocationWithParentsCascading") + + // Relationship Contexts for schema_migrations + schemaMigrationWithParentsCascadingCtx = newContextual[bool]("schemaMigrationWithParentsCascading") +) + +// Contextual is a convienience wrapper around context.WithValue and context.Value +type contextual[V any] struct { + key contextKey +} + +func newContextual[V any](key string) contextual[V] { + return contextual[V]{key: contextKey(key)} +} + +func (k contextual[V]) WithValue(ctx context.Context, val V) context.Context { + return context.WithValue(ctx, k.key, val) +} + +func (k contextual[V]) Value(ctx context.Context) (V, bool) { + v, ok := ctx.Value(k.key).(V) + return v, ok +} diff --git a/pkgs/db/factory/bobfactory_main.bob.go b/pkgs/db/factory/bobfactory_main.bob.go new file mode 100644 index 0000000..a59a3fc --- /dev/null +++ b/pkgs/db/factory/bobfactory_main.bob.go @@ -0,0 +1,89 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package factory + +import ( + "context" + "time" + + models "github.com/Space-DF/telemetry-service/pkgs/db/models" +) + +type Factory struct { + baseDeviceLocationMods DeviceLocationModSlice + baseSchemaMigrationMods SchemaMigrationModSlice +} + +func New() *Factory { + return &Factory{} +} + +func (f *Factory) NewDeviceLocation(mods ...DeviceLocationMod) *DeviceLocationTemplate { + return f.NewDeviceLocationWithContext(context.Background(), mods...) +} + +func (f *Factory) NewDeviceLocationWithContext(ctx context.Context, mods ...DeviceLocationMod) *DeviceLocationTemplate { + o := &DeviceLocationTemplate{f: f} + + if f != nil { + f.baseDeviceLocationMods.Apply(ctx, o) + } + + DeviceLocationModSlice(mods).Apply(ctx, o) + + return o +} + +func (f *Factory) FromExistingDeviceLocation(m *models.DeviceLocation) *DeviceLocationTemplate { + o := &DeviceLocationTemplate{f: f, alreadyPersisted: true} + + o.Time = func() time.Time { return m.Time } + o.DeviceID = func() string { return m.DeviceID } + o.SpaceSlug = func() string { return m.SpaceSlug } + o.Latitude = func() float64 { return m.Latitude } + o.Longitude = func() float64 { return m.Longitude } + o.Accuracy = func() float64 { return m.Accuracy } + + return o +} + +func (f *Factory) NewSchemaMigration(mods ...SchemaMigrationMod) *SchemaMigrationTemplate { + return f.NewSchemaMigrationWithContext(context.Background(), mods...) +} + +func (f *Factory) NewSchemaMigrationWithContext(ctx context.Context, mods ...SchemaMigrationMod) *SchemaMigrationTemplate { + o := &SchemaMigrationTemplate{f: f} + + if f != nil { + f.baseSchemaMigrationMods.Apply(ctx, o) + } + + SchemaMigrationModSlice(mods).Apply(ctx, o) + + return o +} + +func (f *Factory) FromExistingSchemaMigration(m *models.SchemaMigration) *SchemaMigrationTemplate { + o := &SchemaMigrationTemplate{f: f, alreadyPersisted: true} + + o.Version = func() string { return m.Version } + + return o +} + +func (f *Factory) ClearBaseDeviceLocationMods() { + f.baseDeviceLocationMods = nil +} + +func (f *Factory) AddBaseDeviceLocationMod(mods ...DeviceLocationMod) { + f.baseDeviceLocationMods = append(f.baseDeviceLocationMods, mods...) +} + +func (f *Factory) ClearBaseSchemaMigrationMods() { + f.baseSchemaMigrationMods = nil +} + +func (f *Factory) AddBaseSchemaMigrationMod(mods ...SchemaMigrationMod) { + f.baseSchemaMigrationMods = append(f.baseSchemaMigrationMods, mods...) +} diff --git a/pkgs/db/factory/bobfactory_main.bob_test.go b/pkgs/db/factory/bobfactory_main.bob_test.go new file mode 100644 index 0000000..142fcb4 --- /dev/null +++ b/pkgs/db/factory/bobfactory_main.bob_test.go @@ -0,0 +1,57 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package factory + +import ( + "context" + "testing" +) + +func TestCreateDeviceLocation(t *testing.T) { + if testDB == nil { + t.Skip("skipping test, no DSN provided") + } + + ctx, cancel := context.WithCancel(t.Context()) + t.Cleanup(cancel) + + tx, err := testDB.Begin(ctx) + if err != nil { + t.Fatalf("Error starting transaction: %v", err) + } + + defer func() { + if err := tx.Rollback(ctx); err != nil { + t.Fatalf("Error rolling back transaction: %v", err) + } + }() + + if _, err := New().NewDeviceLocationWithContext(ctx).Create(ctx, tx); err != nil { + t.Fatalf("Error creating DeviceLocation: %v", err) + } +} + +func TestCreateSchemaMigration(t *testing.T) { + if testDB == nil { + t.Skip("skipping test, no DSN provided") + } + + ctx, cancel := context.WithCancel(t.Context()) + t.Cleanup(cancel) + + tx, err := testDB.Begin(ctx) + if err != nil { + t.Fatalf("Error starting transaction: %v", err) + } + + defer func() { + if err := tx.Rollback(ctx); err != nil { + t.Fatalf("Error rolling back transaction: %v", err) + } + }() + + if _, err := New().NewSchemaMigrationWithContext(ctx).Create(ctx, tx); err != nil { + t.Fatalf("Error creating SchemaMigration: %v", err) + } +} diff --git a/pkgs/db/factory/bobfactory_random.bob.go b/pkgs/db/factory/bobfactory_random.bob.go new file mode 100644 index 0000000..2dadbb7 --- /dev/null +++ b/pkgs/db/factory/bobfactory_random.bob.go @@ -0,0 +1,71 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package factory + +import ( + "math" + "strconv" + "strings" + "time" + + "github.com/jaswdr/faker/v2" +) + +var defaultFaker = faker.New() + +func random_float64(f *faker.Faker, limits ...string) float64 { + if f == nil { + f = &defaultFaker + } + + var precision int64 = 5 + var scale int64 = 2 + + if len(limits) > 0 { + precision, _ = strconv.ParseInt(limits[0], 10, 32) + } + + if len(limits) > 1 { + scale, _ = strconv.ParseInt(limits[1], 10, 32) + } + + baseVal := f.Float64(10, -1, 1) + for baseVal == -1 || baseVal == 0 || baseVal == 1 { + baseVal = f.Float64(10, -1, 1) + } + + scaleFloat := math.Pow10(int(scale)) + + val := baseVal * math.Pow10(int(precision)) + val = math.Trunc(val) / scaleFloat + + return val +} + +func random_string(f *faker.Faker, limits ...string) string { + if f == nil { + f = &defaultFaker + } + + val := strings.Join(f.Lorem().Words(f.IntBetween(1, 5)), " ") + if len(limits) == 0 { + return val + } + limitInt, _ := strconv.Atoi(limits[0]) + if limitInt > 0 && limitInt < len(val) { + val = val[:limitInt] + } + return val +} + +func random_time_Time(f *faker.Faker, limits ...string) time.Time { + if f == nil { + f = &defaultFaker + } + + year := time.Hour * 24 * 365 + min := time.Now().Add(-year) + max := time.Now().Add(year) + return f.Time().TimeBetween(min, max) +} diff --git a/pkgs/db/factory/bobfactory_random.bob_test.go b/pkgs/db/factory/bobfactory_random.bob_test.go new file mode 100644 index 0000000..5ba4083 --- /dev/null +++ b/pkgs/db/factory/bobfactory_random.bob_test.go @@ -0,0 +1,46 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package factory + +import ( + "testing" + + "github.com/stephenafamo/bob" +) + +// Set the testDB to enable tests that use the database +var testDB bob.Transactor[bob.Tx] + +func TestRandom_float64(t *testing.T) { + t.Parallel() + + val1 := random_float64(nil) + val2 := random_float64(nil) + + if val1 == val2 { + t.Fatalf("random_float64() returned the same value twice: %v", val1) + } +} + +func TestRandom_string(t *testing.T) { + t.Parallel() + + val1 := random_string(nil) + val2 := random_string(nil) + + if val1 == val2 { + t.Fatalf("random_string() returned the same value twice: %v", val1) + } +} + +func TestRandom_time_Time(t *testing.T) { + t.Parallel() + + val1 := random_time_Time(nil) + val2 := random_time_Time(nil) + + if val1.Equal(val2) { + t.Fatalf("random_time_Time() returned the same value twice: %v", val1) + } +} diff --git a/pkgs/db/factory/device_locations.bob.go b/pkgs/db/factory/device_locations.bob.go new file mode 100644 index 0000000..d33f8b5 --- /dev/null +++ b/pkgs/db/factory/device_locations.bob.go @@ -0,0 +1,476 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package factory + +import ( + "context" + "testing" + "time" + + models "github.com/Space-DF/telemetry-service/pkgs/db/models" + "github.com/aarondl/opt/omit" + "github.com/jaswdr/faker/v2" + "github.com/stephenafamo/bob" +) + +type DeviceLocationMod interface { + Apply(context.Context, *DeviceLocationTemplate) +} + +type DeviceLocationModFunc func(context.Context, *DeviceLocationTemplate) + +func (f DeviceLocationModFunc) Apply(ctx context.Context, n *DeviceLocationTemplate) { + f(ctx, n) +} + +type DeviceLocationModSlice []DeviceLocationMod + +func (mods DeviceLocationModSlice) Apply(ctx context.Context, n *DeviceLocationTemplate) { + for _, f := range mods { + f.Apply(ctx, n) + } +} + +// DeviceLocationTemplate is an object representing the database table. +// all columns are optional and should be set by mods +type DeviceLocationTemplate struct { + Time func() time.Time + DeviceID func() string + SpaceSlug func() string + Latitude func() float64 + Longitude func() float64 + Accuracy func() float64 + + f *Factory + + alreadyPersisted bool +} + +// Apply mods to the DeviceLocationTemplate +func (o *DeviceLocationTemplate) Apply(ctx context.Context, mods ...DeviceLocationMod) { + for _, mod := range mods { + mod.Apply(ctx, o) + } +} + +// setModelRels creates and sets the relationships on *models.DeviceLocation +// according to the relationships in the template. Nothing is inserted into the db +func (t DeviceLocationTemplate) setModelRels(o *models.DeviceLocation) {} + +// BuildSetter returns an *models.DeviceLocationSetter +// this does nothing with the relationship templates +func (o DeviceLocationTemplate) BuildSetter() *models.DeviceLocationSetter { + m := &models.DeviceLocationSetter{} + + if o.Time != nil { + val := o.Time() + m.Time = omit.From(val) + } + if o.DeviceID != nil { + val := o.DeviceID() + m.DeviceID = omit.From(val) + } + if o.SpaceSlug != nil { + val := o.SpaceSlug() + m.SpaceSlug = omit.From(val) + } + if o.Latitude != nil { + val := o.Latitude() + m.Latitude = omit.From(val) + } + if o.Longitude != nil { + val := o.Longitude() + m.Longitude = omit.From(val) + } + if o.Accuracy != nil { + val := o.Accuracy() + m.Accuracy = omit.From(val) + } + + return m +} + +// BuildManySetter returns an []*models.DeviceLocationSetter +// this does nothing with the relationship templates +func (o DeviceLocationTemplate) BuildManySetter(number int) []*models.DeviceLocationSetter { + m := make([]*models.DeviceLocationSetter, number) + + for i := range m { + m[i] = o.BuildSetter() + } + + return m +} + +// Build returns an *models.DeviceLocation +// Related objects are also created and placed in the .R field +// NOTE: Objects are not inserted into the database. Use DeviceLocationTemplate.Create +func (o DeviceLocationTemplate) Build() *models.DeviceLocation { + m := &models.DeviceLocation{} + + if o.Time != nil { + m.Time = o.Time() + } + if o.DeviceID != nil { + m.DeviceID = o.DeviceID() + } + if o.SpaceSlug != nil { + m.SpaceSlug = o.SpaceSlug() + } + if o.Latitude != nil { + m.Latitude = o.Latitude() + } + if o.Longitude != nil { + m.Longitude = o.Longitude() + } + if o.Accuracy != nil { + m.Accuracy = o.Accuracy() + } + + o.setModelRels(m) + + return m +} + +// BuildMany returns an models.DeviceLocationSlice +// Related objects are also created and placed in the .R field +// NOTE: Objects are not inserted into the database. Use DeviceLocationTemplate.CreateMany +func (o DeviceLocationTemplate) BuildMany(number int) models.DeviceLocationSlice { + m := make(models.DeviceLocationSlice, number) + + for i := range m { + m[i] = o.Build() + } + + return m +} + +func ensureCreatableDeviceLocation(m *models.DeviceLocationSetter) { + if !(m.Time.IsValue()) { + val := random_time_Time(nil) + m.Time = omit.From(val) + } + if !(m.DeviceID.IsValue()) { + val := random_string(nil, "255") + m.DeviceID = omit.From(val) + } + if !(m.SpaceSlug.IsValue()) { + val := random_string(nil, "255") + m.SpaceSlug = omit.From(val) + } + if !(m.Latitude.IsValue()) { + val := random_float64(nil) + m.Latitude = omit.From(val) + } + if !(m.Longitude.IsValue()) { + val := random_float64(nil) + m.Longitude = omit.From(val) + } + if !(m.Accuracy.IsValue()) { + val := random_float64(nil) + m.Accuracy = omit.From(val) + } +} + +// insertOptRels creates and inserts any optional the relationships on *models.DeviceLocation +// according to the relationships in the template. +// any required relationship should have already exist on the model +func (o *DeviceLocationTemplate) insertOptRels(ctx context.Context, exec bob.Executor, m *models.DeviceLocation) error { + var err error + + return err +} + +// Create builds a deviceLocation and inserts it into the database +// Relations objects are also inserted and placed in the .R field +func (o *DeviceLocationTemplate) Create(ctx context.Context, exec bob.Executor) (*models.DeviceLocation, error) { + var err error + opt := o.BuildSetter() + ensureCreatableDeviceLocation(opt) + + m, err := models.DeviceLocations.Insert(opt).One(ctx, exec) + if err != nil { + return nil, err + } + + if err := o.insertOptRels(ctx, exec, m); err != nil { + return nil, err + } + return m, err +} + +// MustCreate builds a deviceLocation and inserts it into the database +// Relations objects are also inserted and placed in the .R field +// panics if an error occurs +func (o *DeviceLocationTemplate) MustCreate(ctx context.Context, exec bob.Executor) *models.DeviceLocation { + m, err := o.Create(ctx, exec) + if err != nil { + panic(err) + } + return m +} + +// CreateOrFail builds a deviceLocation and inserts it into the database +// Relations objects are also inserted and placed in the .R field +// It calls `tb.Fatal(err)` on the test/benchmark if an error occurs +func (o *DeviceLocationTemplate) CreateOrFail(ctx context.Context, tb testing.TB, exec bob.Executor) *models.DeviceLocation { + tb.Helper() + m, err := o.Create(ctx, exec) + if err != nil { + tb.Fatal(err) + return nil + } + return m +} + +// CreateMany builds multiple deviceLocations and inserts them into the database +// Relations objects are also inserted and placed in the .R field +func (o DeviceLocationTemplate) CreateMany(ctx context.Context, exec bob.Executor, number int) (models.DeviceLocationSlice, error) { + var err error + m := make(models.DeviceLocationSlice, number) + + for i := range m { + m[i], err = o.Create(ctx, exec) + if err != nil { + return nil, err + } + } + + return m, nil +} + +// MustCreateMany builds multiple deviceLocations and inserts them into the database +// Relations objects are also inserted and placed in the .R field +// panics if an error occurs +func (o DeviceLocationTemplate) MustCreateMany(ctx context.Context, exec bob.Executor, number int) models.DeviceLocationSlice { + m, err := o.CreateMany(ctx, exec, number) + if err != nil { + panic(err) + } + return m +} + +// CreateManyOrFail builds multiple deviceLocations and inserts them into the database +// Relations objects are also inserted and placed in the .R field +// It calls `tb.Fatal(err)` on the test/benchmark if an error occurs +func (o DeviceLocationTemplate) CreateManyOrFail(ctx context.Context, tb testing.TB, exec bob.Executor, number int) models.DeviceLocationSlice { + tb.Helper() + m, err := o.CreateMany(ctx, exec, number) + if err != nil { + tb.Fatal(err) + return nil + } + return m +} + +// DeviceLocation has methods that act as mods for the DeviceLocationTemplate +var DeviceLocationMods deviceLocationMods + +type deviceLocationMods struct{} + +func (m deviceLocationMods) RandomizeAllColumns(f *faker.Faker) DeviceLocationMod { + return DeviceLocationModSlice{ + DeviceLocationMods.RandomTime(f), + DeviceLocationMods.RandomDeviceID(f), + DeviceLocationMods.RandomSpaceSlug(f), + DeviceLocationMods.RandomLatitude(f), + DeviceLocationMods.RandomLongitude(f), + DeviceLocationMods.RandomAccuracy(f), + } +} + +// Set the model columns to this value +func (m deviceLocationMods) Time(val time.Time) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Time = func() time.Time { return val } + }) +} + +// Set the Column from the function +func (m deviceLocationMods) TimeFunc(f func() time.Time) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Time = f + }) +} + +// Clear any values for the column +func (m deviceLocationMods) UnsetTime() DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Time = nil + }) +} + +// Generates a random value for the column using the given faker +// if faker is nil, a default faker is used +func (m deviceLocationMods) RandomTime(f *faker.Faker) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Time = func() time.Time { + return random_time_Time(f) + } + }) +} + +// Set the model columns to this value +func (m deviceLocationMods) DeviceID(val string) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.DeviceID = func() string { return val } + }) +} + +// Set the Column from the function +func (m deviceLocationMods) DeviceIDFunc(f func() string) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.DeviceID = f + }) +} + +// Clear any values for the column +func (m deviceLocationMods) UnsetDeviceID() DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.DeviceID = nil + }) +} + +// Generates a random value for the column using the given faker +// if faker is nil, a default faker is used +func (m deviceLocationMods) RandomDeviceID(f *faker.Faker) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.DeviceID = func() string { + return random_string(f, "255") + } + }) +} + +// Set the model columns to this value +func (m deviceLocationMods) SpaceSlug(val string) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.SpaceSlug = func() string { return val } + }) +} + +// Set the Column from the function +func (m deviceLocationMods) SpaceSlugFunc(f func() string) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.SpaceSlug = f + }) +} + +// Clear any values for the column +func (m deviceLocationMods) UnsetSpaceSlug() DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.SpaceSlug = nil + }) +} + +// Generates a random value for the column using the given faker +// if faker is nil, a default faker is used +func (m deviceLocationMods) RandomSpaceSlug(f *faker.Faker) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.SpaceSlug = func() string { + return random_string(f, "255") + } + }) +} + +// Set the model columns to this value +func (m deviceLocationMods) Latitude(val float64) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Latitude = func() float64 { return val } + }) +} + +// Set the Column from the function +func (m deviceLocationMods) LatitudeFunc(f func() float64) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Latitude = f + }) +} + +// Clear any values for the column +func (m deviceLocationMods) UnsetLatitude() DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Latitude = nil + }) +} + +// Generates a random value for the column using the given faker +// if faker is nil, a default faker is used +func (m deviceLocationMods) RandomLatitude(f *faker.Faker) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Latitude = func() float64 { + return random_float64(f) + } + }) +} + +// Set the model columns to this value +func (m deviceLocationMods) Longitude(val float64) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Longitude = func() float64 { return val } + }) +} + +// Set the Column from the function +func (m deviceLocationMods) LongitudeFunc(f func() float64) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Longitude = f + }) +} + +// Clear any values for the column +func (m deviceLocationMods) UnsetLongitude() DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Longitude = nil + }) +} + +// Generates a random value for the column using the given faker +// if faker is nil, a default faker is used +func (m deviceLocationMods) RandomLongitude(f *faker.Faker) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Longitude = func() float64 { + return random_float64(f) + } + }) +} + +// Set the model columns to this value +func (m deviceLocationMods) Accuracy(val float64) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Accuracy = func() float64 { return val } + }) +} + +// Set the Column from the function +func (m deviceLocationMods) AccuracyFunc(f func() float64) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Accuracy = f + }) +} + +// Clear any values for the column +func (m deviceLocationMods) UnsetAccuracy() DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Accuracy = nil + }) +} + +// Generates a random value for the column using the given faker +// if faker is nil, a default faker is used +func (m deviceLocationMods) RandomAccuracy(f *faker.Faker) DeviceLocationMod { + return DeviceLocationModFunc(func(_ context.Context, o *DeviceLocationTemplate) { + o.Accuracy = func() float64 { + return random_float64(f) + } + }) +} + +func (m deviceLocationMods) WithParentsCascading() DeviceLocationMod { + return DeviceLocationModFunc(func(ctx context.Context, o *DeviceLocationTemplate) { + if isDone, _ := deviceLocationWithParentsCascadingCtx.Value(ctx); isDone { + return + } + ctx = deviceLocationWithParentsCascadingCtx.WithValue(ctx, true) + }) +} diff --git a/pkgs/db/factory/schema_migrations.bob.go b/pkgs/db/factory/schema_migrations.bob.go new file mode 100644 index 0000000..bf158be --- /dev/null +++ b/pkgs/db/factory/schema_migrations.bob.go @@ -0,0 +1,255 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package factory + +import ( + "context" + "testing" + + models "github.com/Space-DF/telemetry-service/pkgs/db/models" + "github.com/aarondl/opt/omit" + "github.com/jaswdr/faker/v2" + "github.com/stephenafamo/bob" +) + +type SchemaMigrationMod interface { + Apply(context.Context, *SchemaMigrationTemplate) +} + +type SchemaMigrationModFunc func(context.Context, *SchemaMigrationTemplate) + +func (f SchemaMigrationModFunc) Apply(ctx context.Context, n *SchemaMigrationTemplate) { + f(ctx, n) +} + +type SchemaMigrationModSlice []SchemaMigrationMod + +func (mods SchemaMigrationModSlice) Apply(ctx context.Context, n *SchemaMigrationTemplate) { + for _, f := range mods { + f.Apply(ctx, n) + } +} + +// SchemaMigrationTemplate is an object representing the database table. +// all columns are optional and should be set by mods +type SchemaMigrationTemplate struct { + Version func() string + + f *Factory + + alreadyPersisted bool +} + +// Apply mods to the SchemaMigrationTemplate +func (o *SchemaMigrationTemplate) Apply(ctx context.Context, mods ...SchemaMigrationMod) { + for _, mod := range mods { + mod.Apply(ctx, o) + } +} + +// setModelRels creates and sets the relationships on *models.SchemaMigration +// according to the relationships in the template. Nothing is inserted into the db +func (t SchemaMigrationTemplate) setModelRels(o *models.SchemaMigration) {} + +// BuildSetter returns an *models.SchemaMigrationSetter +// this does nothing with the relationship templates +func (o SchemaMigrationTemplate) BuildSetter() *models.SchemaMigrationSetter { + m := &models.SchemaMigrationSetter{} + + if o.Version != nil { + val := o.Version() + m.Version = omit.From(val) + } + + return m +} + +// BuildManySetter returns an []*models.SchemaMigrationSetter +// this does nothing with the relationship templates +func (o SchemaMigrationTemplate) BuildManySetter(number int) []*models.SchemaMigrationSetter { + m := make([]*models.SchemaMigrationSetter, number) + + for i := range m { + m[i] = o.BuildSetter() + } + + return m +} + +// Build returns an *models.SchemaMigration +// Related objects are also created and placed in the .R field +// NOTE: Objects are not inserted into the database. Use SchemaMigrationTemplate.Create +func (o SchemaMigrationTemplate) Build() *models.SchemaMigration { + m := &models.SchemaMigration{} + + if o.Version != nil { + m.Version = o.Version() + } + + o.setModelRels(m) + + return m +} + +// BuildMany returns an models.SchemaMigrationSlice +// Related objects are also created and placed in the .R field +// NOTE: Objects are not inserted into the database. Use SchemaMigrationTemplate.CreateMany +func (o SchemaMigrationTemplate) BuildMany(number int) models.SchemaMigrationSlice { + m := make(models.SchemaMigrationSlice, number) + + for i := range m { + m[i] = o.Build() + } + + return m +} + +func ensureCreatableSchemaMigration(m *models.SchemaMigrationSetter) { + if !(m.Version.IsValue()) { + val := random_string(nil) + m.Version = omit.From(val) + } +} + +// insertOptRels creates and inserts any optional the relationships on *models.SchemaMigration +// according to the relationships in the template. +// any required relationship should have already exist on the model +func (o *SchemaMigrationTemplate) insertOptRels(ctx context.Context, exec bob.Executor, m *models.SchemaMigration) error { + var err error + + return err +} + +// Create builds a schemaMigration and inserts it into the database +// Relations objects are also inserted and placed in the .R field +func (o *SchemaMigrationTemplate) Create(ctx context.Context, exec bob.Executor) (*models.SchemaMigration, error) { + var err error + opt := o.BuildSetter() + ensureCreatableSchemaMigration(opt) + + m, err := models.SchemaMigrations.Insert(opt).One(ctx, exec) + if err != nil { + return nil, err + } + + if err := o.insertOptRels(ctx, exec, m); err != nil { + return nil, err + } + return m, err +} + +// MustCreate builds a schemaMigration and inserts it into the database +// Relations objects are also inserted and placed in the .R field +// panics if an error occurs +func (o *SchemaMigrationTemplate) MustCreate(ctx context.Context, exec bob.Executor) *models.SchemaMigration { + m, err := o.Create(ctx, exec) + if err != nil { + panic(err) + } + return m +} + +// CreateOrFail builds a schemaMigration and inserts it into the database +// Relations objects are also inserted and placed in the .R field +// It calls `tb.Fatal(err)` on the test/benchmark if an error occurs +func (o *SchemaMigrationTemplate) CreateOrFail(ctx context.Context, tb testing.TB, exec bob.Executor) *models.SchemaMigration { + tb.Helper() + m, err := o.Create(ctx, exec) + if err != nil { + tb.Fatal(err) + return nil + } + return m +} + +// CreateMany builds multiple schemaMigrations and inserts them into the database +// Relations objects are also inserted and placed in the .R field +func (o SchemaMigrationTemplate) CreateMany(ctx context.Context, exec bob.Executor, number int) (models.SchemaMigrationSlice, error) { + var err error + m := make(models.SchemaMigrationSlice, number) + + for i := range m { + m[i], err = o.Create(ctx, exec) + if err != nil { + return nil, err + } + } + + return m, nil +} + +// MustCreateMany builds multiple schemaMigrations and inserts them into the database +// Relations objects are also inserted and placed in the .R field +// panics if an error occurs +func (o SchemaMigrationTemplate) MustCreateMany(ctx context.Context, exec bob.Executor, number int) models.SchemaMigrationSlice { + m, err := o.CreateMany(ctx, exec, number) + if err != nil { + panic(err) + } + return m +} + +// CreateManyOrFail builds multiple schemaMigrations and inserts them into the database +// Relations objects are also inserted and placed in the .R field +// It calls `tb.Fatal(err)` on the test/benchmark if an error occurs +func (o SchemaMigrationTemplate) CreateManyOrFail(ctx context.Context, tb testing.TB, exec bob.Executor, number int) models.SchemaMigrationSlice { + tb.Helper() + m, err := o.CreateMany(ctx, exec, number) + if err != nil { + tb.Fatal(err) + return nil + } + return m +} + +// SchemaMigration has methods that act as mods for the SchemaMigrationTemplate +var SchemaMigrationMods schemaMigrationMods + +type schemaMigrationMods struct{} + +func (m schemaMigrationMods) RandomizeAllColumns(f *faker.Faker) SchemaMigrationMod { + return SchemaMigrationModSlice{ + SchemaMigrationMods.RandomVersion(f), + } +} + +// Set the model columns to this value +func (m schemaMigrationMods) Version(val string) SchemaMigrationMod { + return SchemaMigrationModFunc(func(_ context.Context, o *SchemaMigrationTemplate) { + o.Version = func() string { return val } + }) +} + +// Set the Column from the function +func (m schemaMigrationMods) VersionFunc(f func() string) SchemaMigrationMod { + return SchemaMigrationModFunc(func(_ context.Context, o *SchemaMigrationTemplate) { + o.Version = f + }) +} + +// Clear any values for the column +func (m schemaMigrationMods) UnsetVersion() SchemaMigrationMod { + return SchemaMigrationModFunc(func(_ context.Context, o *SchemaMigrationTemplate) { + o.Version = nil + }) +} + +// Generates a random value for the column using the given faker +// if faker is nil, a default faker is used +func (m schemaMigrationMods) RandomVersion(f *faker.Faker) SchemaMigrationMod { + return SchemaMigrationModFunc(func(_ context.Context, o *SchemaMigrationTemplate) { + o.Version = func() string { + return random_string(f) + } + }) +} + +func (m schemaMigrationMods) WithParentsCascading() SchemaMigrationMod { + return SchemaMigrationModFunc(func(ctx context.Context, o *SchemaMigrationTemplate) { + if isDone, _ := schemaMigrationWithParentsCascadingCtx.Value(ctx); isDone { + return + } + ctx = schemaMigrationWithParentsCascadingCtx.WithValue(ctx, true) + }) +} diff --git a/pkgs/db/migrate.go b/pkgs/db/migrate.go new file mode 100644 index 0000000..823593a --- /dev/null +++ b/pkgs/db/migrate.go @@ -0,0 +1,28 @@ +package db + +import ( + "net/url" + "time" + + "github.com/amacneil/dbmate/v2/pkg/dbmate" + _ "github.com/amacneil/dbmate/v2/pkg/driver/postgres" +) + +//go:generate bobgen-psql -c ../../configs/config.yaml + +func Migrate(conn *url.URL, migrationPath string) error { + dbMate := dbmate.New(conn) + + dbMate.Verbose = true + dbMate.WaitBefore = true + dbMate.AutoDumpSchema = false + + // so that checkWaitCalled returns quickly + dbMate.WaitInterval = time.Millisecond + dbMate.WaitTimeout = 5 * time.Millisecond + + // setting the path where the migration scripts are + dbMate.MigrationsDir = []string{migrationPath} + + return dbMate.Migrate() +} diff --git a/pkgs/db/migrations/20251114041904_create_device_locations.sql b/pkgs/db/migrations/20251114041904_create_device_locations.sql new file mode 100644 index 0000000..d9bed3e --- /dev/null +++ b/pkgs/db/migrations/20251114041904_create_device_locations.sql @@ -0,0 +1,33 @@ +-- migrate:up +CREATE TABLE IF NOT EXISTS device_locations ( + time TIMESTAMPTZ NOT NULL, + device_id VARCHAR(255) NOT NULL, + organization_slug VARCHAR(255) NOT NULL, + latitude DOUBLE PRECISION NOT NULL, + longitude DOUBLE PRECISION NOT NULL, + accuracy DOUBLE PRECISION NOT NULL, + CONSTRAINT device_locations_pk PRIMARY KEY (time, device_id) +); + +CREATE INDEX idx_device_locations_org_device_time + ON device_locations (organization_slug, device_id, + time DESC); + + +-- Create a hypertable to enable TimescaleDB features +SELECT create_hypertable('device_locations', 'time', + chunk_time_interval => INTERVAL '1 day', + if_not_exists => TRUE +); + +ALTER TABLE device_locations + SET ( + timescaledb.compress = true, + timescaledb.compress_segmentby = 'organization_slug, device_id' + ); + +SELECT add_compression_policy('device_locations', INTERVAL '7 days'); + + +-- migrate:down +DROP TABLE IF EXISTS device_locations CASCADE ; diff --git a/pkgs/db/migrations/20251126082459_add-space-slug-column.sql b/pkgs/db/migrations/20251126082459_add-space-slug-column.sql new file mode 100644 index 0000000..e824c58 --- /dev/null +++ b/pkgs/db/migrations/20251126082459_add-space-slug-column.sql @@ -0,0 +1,5 @@ +-- migrate:up +ALTER TABLE device_locations RENAME COLUMN organization_slug TO space_slug; + +-- migrate:down +ALTER TABLE device_locations RENAME COLUMN space_slug TO organization_slug; diff --git a/pkgs/db/migrations/20251208120000_create_entities_schema.sql b/pkgs/db/migrations/20251208120000_create_entities_schema.sql new file mode 100644 index 0000000..069316d --- /dev/null +++ b/pkgs/db/migrations/20251208120000_create_entities_schema.sql @@ -0,0 +1,64 @@ +-- migrate:up + +-- Create entities schema tables alongside existing device_locations table +-- Both are needed: device_locations for location history, entities for device state/attributes + +CREATE TABLE IF NOT EXISTS entity_types ( + id UUID PRIMARY KEY, + name TEXT NOT NULL, + unique_key TEXT NOT NULL, + image_url TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE UNIQUE INDEX IF NOT EXISTS idx_entity_types_unique_key ON entity_types (unique_key); + +CREATE TABLE IF NOT EXISTS entities ( + id UUID PRIMARY KEY, + space_slug TEXT, + device_id UUID NOT NULL, + unique_key TEXT NOT NULL, + category TEXT, + entity_type_id UUID NOT NULL REFERENCES entity_types(id) ON DELETE CASCADE, + name TEXT, + unit_of_measurement TEXT, + display_type TEXT[] NOT NULL DEFAULT ARRAY['unknown'], + image_url TEXT, + is_enabled BOOLEAN NOT NULL DEFAULT true, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE UNIQUE INDEX IF NOT EXISTS idx_entities_unique_key ON entities (unique_key); +CREATE INDEX IF NOT EXISTS idx_entities_space_slug ON entities (space_slug); +CREATE INDEX IF NOT EXISTS idx_entities_device_id ON entities (device_id); + +CREATE TABLE IF NOT EXISTS entity_state_attributes ( + id UUID PRIMARY KEY, + hash BIGINT NOT NULL, + shared_attrs JSONB NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS idx_entity_state_attributes_hash ON entity_state_attributes (hash); + +CREATE TABLE IF NOT EXISTS entity_states ( + id UUID PRIMARY KEY, + entity_id UUID NOT NULL REFERENCES entities(id) ON DELETE CASCADE, + state TEXT NOT NULL, + attributes_id UUID REFERENCES entity_state_attributes(id), + old_state_id UUID REFERENCES entity_states(id), + reported_at TIMESTAMPTZ NOT NULL, + last_changed_at TIMESTAMPTZ NOT NULL +); + +CREATE INDEX IF NOT EXISTS idx_entity_states_entity_reported_at ON entity_states (entity_id, reported_at DESC); + + +-- migrate:down + +-- Remove entities schema tables, leaving device_locations intact +DROP TABLE IF EXISTS entity_states CASCADE; +DROP TABLE IF EXISTS entity_state_attributes CASCADE; +DROP TABLE IF EXISTS entities CASCADE; +DROP TABLE IF EXISTS entity_types CASCADE; diff --git a/pkgs/db/migrations/20251209120000_drop_device_locations.sql b/pkgs/db/migrations/20251209120000_drop_device_locations.sql new file mode 100644 index 0000000..2f43a8b --- /dev/null +++ b/pkgs/db/migrations/20251209120000_drop_device_locations.sql @@ -0,0 +1,15 @@ +-- migrate:up + +DROP TABLE IF EXISTS device_locations CASCADE; + +-- migrate:down + +CREATE TABLE IF NOT EXISTS device_locations ( + time TIMESTAMPTZ NOT NULL, + device_id UUID NOT NULL, + space_slug TEXT, + latitude DOUBLE PRECISION, + longitude DOUBLE PRECISION, + accuracy DOUBLE PRECISION, + PRIMARY KEY (time, device_id) +); diff --git a/pkgs/db/models/bob_joins.bob.go b/pkgs/db/models/bob_joins.bob.go new file mode 100644 index 0000000..57e8b1b --- /dev/null +++ b/pkgs/db/models/bob_joins.bob.go @@ -0,0 +1,70 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "hash/maphash" + + "github.com/stephenafamo/bob" + "github.com/stephenafamo/bob/clause" + "github.com/stephenafamo/bob/dialect/psql/dialect" +) + +var ( + SelectJoins = getJoins[*dialect.SelectQuery]() + UpdateJoins = getJoins[*dialect.UpdateQuery]() + DeleteJoins = getJoins[*dialect.DeleteQuery]() +) + +type joinSet[Q interface{ aliasedAs(string) Q }] struct { + InnerJoin Q + LeftJoin Q + RightJoin Q +} + +func (j joinSet[Q]) AliasedAs(alias string) joinSet[Q] { + return joinSet[Q]{ + InnerJoin: j.InnerJoin.aliasedAs(alias), + LeftJoin: j.LeftJoin.aliasedAs(alias), + RightJoin: j.RightJoin.aliasedAs(alias), + } +} + +type joins[Q dialect.Joinable] struct{} + +func buildJoinSet[Q interface{ aliasedAs(string) Q }, C any, F func(C, string) Q](c C, f F) joinSet[Q] { + return joinSet[Q]{ + InnerJoin: f(c, clause.InnerJoin), + LeftJoin: f(c, clause.LeftJoin), + RightJoin: f(c, clause.RightJoin), + } +} + +func getJoins[Q dialect.Joinable]() joins[Q] { + return joins[Q]{} +} + +type modAs[Q any, C interface{ AliasedAs(string) C }] struct { + c C + f func(C) bob.Mod[Q] +} + +func (m modAs[Q, C]) Apply(q Q) { + m.f(m.c).Apply(q) +} + +func (m modAs[Q, C]) AliasedAs(alias string) bob.Mod[Q] { + m.c = m.c.AliasedAs(alias) + return m +} + +func randInt() int64 { + out := int64(new(maphash.Hash).Sum64()) // #nosec G115 + + if out < 0 { + return -out % 10000 + } + + return out % 10000 +} diff --git a/pkgs/db/models/bob_loaders.bob.go b/pkgs/db/models/bob_loaders.bob.go new file mode 100644 index 0000000..eff99e6 --- /dev/null +++ b/pkgs/db/models/bob_loaders.bob.go @@ -0,0 +1,55 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/stephenafamo/bob" + "github.com/stephenafamo/bob/dialect/psql/dialect" + "github.com/stephenafamo/bob/orm" +) + +var Preload = getPreloaders() + +type preloaders struct{} + +func getPreloaders() preloaders { + return preloaders{} +} + +var ( + SelectThenLoad = getThenLoaders[*dialect.SelectQuery]() + InsertThenLoad = getThenLoaders[*dialect.InsertQuery]() + UpdateThenLoad = getThenLoaders[*dialect.UpdateQuery]() +) + +type thenLoaders[Q orm.Loadable] struct{} + +func getThenLoaders[Q orm.Loadable]() thenLoaders[Q] { + return thenLoaders[Q]{} +} + +func thenLoadBuilder[Q orm.Loadable, T any](name string, f func(context.Context, bob.Executor, T, ...bob.Mod[*dialect.SelectQuery]) error) func(...bob.Mod[*dialect.SelectQuery]) orm.Loader[Q] { + return func(queryMods ...bob.Mod[*dialect.SelectQuery]) orm.Loader[Q] { + return func(ctx context.Context, exec bob.Executor, retrieved any) error { + loader, isLoader := retrieved.(T) + if !isLoader { + return fmt.Errorf("object %T cannot load %q", retrieved, name) + } + + err := f(ctx, exec, loader, queryMods...) + + // Don't cause an issue due to missing relationships + if errors.Is(err, sql.ErrNoRows) { + return nil + } + + return err + } + } +} diff --git a/pkgs/db/models/bob_types.bob_test.go b/pkgs/db/models/bob_types.bob_test.go new file mode 100644 index 0000000..ef5fc08 --- /dev/null +++ b/pkgs/db/models/bob_types.bob_test.go @@ -0,0 +1,15 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import "github.com/stephenafamo/bob" + +// Set the testDB to enable tests that use the database +var testDB bob.Transactor[bob.Tx] + +// Make sure the type DeviceLocation runs hooks after queries +var _ bob.HookableType = &DeviceLocation{} + +// Make sure the type SchemaMigration runs hooks after queries +var _ bob.HookableType = &SchemaMigration{} diff --git a/pkgs/db/models/bob_where.bob.go b/pkgs/db/models/bob_where.bob.go new file mode 100644 index 0000000..5f88f6a --- /dev/null +++ b/pkgs/db/models/bob_where.bob.go @@ -0,0 +1,30 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "github.com/stephenafamo/bob/clause" + "github.com/stephenafamo/bob/dialect/psql" + "github.com/stephenafamo/bob/dialect/psql/dialect" +) + +var ( + SelectWhere = Where[*dialect.SelectQuery]() + UpdateWhere = Where[*dialect.UpdateQuery]() + DeleteWhere = Where[*dialect.DeleteQuery]() + OnConflictWhere = Where[*clause.ConflictClause]() // Used in ON CONFLICT DO UPDATE +) + +func Where[Q psql.Filterable]() struct { + DeviceLocations deviceLocationWhere[Q] + SchemaMigrations schemaMigrationWhere[Q] +} { + return struct { + DeviceLocations deviceLocationWhere[Q] + SchemaMigrations schemaMigrationWhere[Q] + }{ + DeviceLocations: buildDeviceLocationWhere[Q](DeviceLocations.Columns), + SchemaMigrations: buildSchemaMigrationWhere[Q](SchemaMigrations.Columns), + } +} diff --git a/pkgs/db/models/device_locations.bob.go b/pkgs/db/models/device_locations.bob.go new file mode 100644 index 0000000..5de53e6 --- /dev/null +++ b/pkgs/db/models/device_locations.bob.go @@ -0,0 +1,484 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "context" + "io" + "time" + + "github.com/aarondl/opt/omit" + "github.com/stephenafamo/bob" + "github.com/stephenafamo/bob/dialect/psql" + "github.com/stephenafamo/bob/dialect/psql/dialect" + "github.com/stephenafamo/bob/dialect/psql/dm" + "github.com/stephenafamo/bob/dialect/psql/sm" + "github.com/stephenafamo/bob/dialect/psql/um" + "github.com/stephenafamo/bob/expr" +) + +// DeviceLocation is an object representing the database table. +type DeviceLocation struct { + Time time.Time `db:"time,pk" ` + DeviceID string `db:"device_id,pk" ` + SpaceSlug string `db:"space_slug" ` + Latitude float64 `db:"latitude" ` + Longitude float64 `db:"longitude" ` + Accuracy float64 `db:"accuracy" ` +} + +// DeviceLocationSlice is an alias for a slice of pointers to DeviceLocation. +// This should almost always be used instead of []*DeviceLocation. +type DeviceLocationSlice []*DeviceLocation + +// DeviceLocations contains methods to work with the device_locations table +var DeviceLocations = psql.NewTablex[*DeviceLocation, DeviceLocationSlice, *DeviceLocationSetter]("", "device_locations", buildDeviceLocationColumns("device_locations")) + +// DeviceLocationsQuery is a query on the device_locations table +type DeviceLocationsQuery = *psql.ViewQuery[*DeviceLocation, DeviceLocationSlice] + +func buildDeviceLocationColumns(alias string) deviceLocationColumns { + return deviceLocationColumns{ + ColumnsExpr: expr.NewColumnsExpr( + "time", "device_id", "space_slug", "latitude", "longitude", "accuracy", + ).WithParent("device_locations"), + tableAlias: alias, + Time: psql.Quote(alias, "time"), + DeviceID: psql.Quote(alias, "device_id"), + SpaceSlug: psql.Quote(alias, "space_slug"), + Latitude: psql.Quote(alias, "latitude"), + Longitude: psql.Quote(alias, "longitude"), + Accuracy: psql.Quote(alias, "accuracy"), + } +} + +type deviceLocationColumns struct { + expr.ColumnsExpr + tableAlias string + Time psql.Expression + DeviceID psql.Expression + SpaceSlug psql.Expression + Latitude psql.Expression + Longitude psql.Expression + Accuracy psql.Expression +} + +func (c deviceLocationColumns) Alias() string { + return c.tableAlias +} + +func (deviceLocationColumns) AliasedAs(alias string) deviceLocationColumns { + return buildDeviceLocationColumns(alias) +} + +// DeviceLocationSetter is used for insert/upsert/update operations +// All values are optional, and do not have to be set +// Generated columns are not included +type DeviceLocationSetter struct { + Time omit.Val[time.Time] `db:"time,pk" ` + DeviceID omit.Val[string] `db:"device_id,pk" ` + SpaceSlug omit.Val[string] `db:"space_slug" ` + Latitude omit.Val[float64] `db:"latitude" ` + Longitude omit.Val[float64] `db:"longitude" ` + Accuracy omit.Val[float64] `db:"accuracy" ` +} + +func (s DeviceLocationSetter) SetColumns() []string { + vals := make([]string, 0, 6) + if s.Time.IsValue() { + vals = append(vals, "time") + } + if s.DeviceID.IsValue() { + vals = append(vals, "device_id") + } + if s.SpaceSlug.IsValue() { + vals = append(vals, "space_slug") + } + if s.Latitude.IsValue() { + vals = append(vals, "latitude") + } + if s.Longitude.IsValue() { + vals = append(vals, "longitude") + } + if s.Accuracy.IsValue() { + vals = append(vals, "accuracy") + } + return vals +} + +func (s DeviceLocationSetter) Overwrite(t *DeviceLocation) { + if s.Time.IsValue() { + t.Time = s.Time.MustGet() + } + if s.DeviceID.IsValue() { + t.DeviceID = s.DeviceID.MustGet() + } + if s.SpaceSlug.IsValue() { + t.SpaceSlug = s.SpaceSlug.MustGet() + } + if s.Latitude.IsValue() { + t.Latitude = s.Latitude.MustGet() + } + if s.Longitude.IsValue() { + t.Longitude = s.Longitude.MustGet() + } + if s.Accuracy.IsValue() { + t.Accuracy = s.Accuracy.MustGet() + } +} + +func (s *DeviceLocationSetter) Apply(q *dialect.InsertQuery) { + q.AppendHooks(func(ctx context.Context, exec bob.Executor) (context.Context, error) { + return DeviceLocations.BeforeInsertHooks.RunHooks(ctx, exec, s) + }) + + q.AppendValues(bob.ExpressionFunc(func(ctx context.Context, w io.Writer, d bob.Dialect, start int) ([]any, error) { + vals := make([]bob.Expression, 6) + if s.Time.IsValue() { + vals[0] = psql.Arg(s.Time.MustGet()) + } else { + vals[0] = psql.Raw("DEFAULT") + } + + if s.DeviceID.IsValue() { + vals[1] = psql.Arg(s.DeviceID.MustGet()) + } else { + vals[1] = psql.Raw("DEFAULT") + } + + if s.SpaceSlug.IsValue() { + vals[2] = psql.Arg(s.SpaceSlug.MustGet()) + } else { + vals[2] = psql.Raw("DEFAULT") + } + + if s.Latitude.IsValue() { + vals[3] = psql.Arg(s.Latitude.MustGet()) + } else { + vals[3] = psql.Raw("DEFAULT") + } + + if s.Longitude.IsValue() { + vals[4] = psql.Arg(s.Longitude.MustGet()) + } else { + vals[4] = psql.Raw("DEFAULT") + } + + if s.Accuracy.IsValue() { + vals[5] = psql.Arg(s.Accuracy.MustGet()) + } else { + vals[5] = psql.Raw("DEFAULT") + } + + return bob.ExpressSlice(ctx, w, d, start, vals, "", ", ", "") + })) +} + +func (s DeviceLocationSetter) UpdateMod() bob.Mod[*dialect.UpdateQuery] { + return um.Set(s.Expressions()...) +} + +func (s DeviceLocationSetter) Expressions(prefix ...string) []bob.Expression { + exprs := make([]bob.Expression, 0, 6) + + if s.Time.IsValue() { + exprs = append(exprs, expr.Join{Sep: " = ", Exprs: []bob.Expression{ + psql.Quote(append(prefix, "time")...), + psql.Arg(s.Time), + }}) + } + + if s.DeviceID.IsValue() { + exprs = append(exprs, expr.Join{Sep: " = ", Exprs: []bob.Expression{ + psql.Quote(append(prefix, "device_id")...), + psql.Arg(s.DeviceID), + }}) + } + + if s.SpaceSlug.IsValue() { + exprs = append(exprs, expr.Join{Sep: " = ", Exprs: []bob.Expression{ + psql.Quote(append(prefix, "space_slug")...), + psql.Arg(s.SpaceSlug), + }}) + } + + if s.Latitude.IsValue() { + exprs = append(exprs, expr.Join{Sep: " = ", Exprs: []bob.Expression{ + psql.Quote(append(prefix, "latitude")...), + psql.Arg(s.Latitude), + }}) + } + + if s.Longitude.IsValue() { + exprs = append(exprs, expr.Join{Sep: " = ", Exprs: []bob.Expression{ + psql.Quote(append(prefix, "longitude")...), + psql.Arg(s.Longitude), + }}) + } + + if s.Accuracy.IsValue() { + exprs = append(exprs, expr.Join{Sep: " = ", Exprs: []bob.Expression{ + psql.Quote(append(prefix, "accuracy")...), + psql.Arg(s.Accuracy), + }}) + } + + return exprs +} + +// FindDeviceLocation retrieves a single record by primary key +// If cols is empty Find will return all columns. +func FindDeviceLocation(ctx context.Context, exec bob.Executor, TimePK time.Time, DeviceIDPK string, cols ...string) (*DeviceLocation, error) { + if len(cols) == 0 { + return DeviceLocations.Query( + sm.Where(DeviceLocations.Columns.Time.EQ(psql.Arg(TimePK))), + sm.Where(DeviceLocations.Columns.DeviceID.EQ(psql.Arg(DeviceIDPK))), + ).One(ctx, exec) + } + + return DeviceLocations.Query( + sm.Where(DeviceLocations.Columns.Time.EQ(psql.Arg(TimePK))), + sm.Where(DeviceLocations.Columns.DeviceID.EQ(psql.Arg(DeviceIDPK))), + sm.Columns(DeviceLocations.Columns.Only(cols...)), + ).One(ctx, exec) +} + +// DeviceLocationExists checks the presence of a single record by primary key +func DeviceLocationExists(ctx context.Context, exec bob.Executor, TimePK time.Time, DeviceIDPK string) (bool, error) { + return DeviceLocations.Query( + sm.Where(DeviceLocations.Columns.Time.EQ(psql.Arg(TimePK))), + sm.Where(DeviceLocations.Columns.DeviceID.EQ(psql.Arg(DeviceIDPK))), + ).Exists(ctx, exec) +} + +// AfterQueryHook is called after DeviceLocation is retrieved from the database +func (o *DeviceLocation) AfterQueryHook(ctx context.Context, exec bob.Executor, queryType bob.QueryType) error { + var err error + + switch queryType { + case bob.QueryTypeSelect: + ctx, err = DeviceLocations.AfterSelectHooks.RunHooks(ctx, exec, DeviceLocationSlice{o}) + case bob.QueryTypeInsert: + ctx, err = DeviceLocations.AfterInsertHooks.RunHooks(ctx, exec, DeviceLocationSlice{o}) + case bob.QueryTypeUpdate: + ctx, err = DeviceLocations.AfterUpdateHooks.RunHooks(ctx, exec, DeviceLocationSlice{o}) + case bob.QueryTypeDelete: + ctx, err = DeviceLocations.AfterDeleteHooks.RunHooks(ctx, exec, DeviceLocationSlice{o}) + } + + return err +} + +// primaryKeyVals returns the primary key values of the DeviceLocation +func (o *DeviceLocation) primaryKeyVals() bob.Expression { + return psql.ArgGroup( + o.Time, + o.DeviceID, + ) +} + +func (o *DeviceLocation) pkEQ() dialect.Expression { + return psql.Group(psql.Quote("device_locations", "time"), psql.Quote("device_locations", "device_id")).EQ(bob.ExpressionFunc(func(ctx context.Context, w io.Writer, d bob.Dialect, start int) ([]any, error) { + return o.primaryKeyVals().WriteSQL(ctx, w, d, start) + })) +} + +// Update uses an executor to update the DeviceLocation +func (o *DeviceLocation) Update(ctx context.Context, exec bob.Executor, s *DeviceLocationSetter) error { + v, err := DeviceLocations.Update(s.UpdateMod(), um.Where(o.pkEQ())).One(ctx, exec) + if err != nil { + return err + } + + *o = *v + + return nil +} + +// Delete deletes a single DeviceLocation record with an executor +func (o *DeviceLocation) Delete(ctx context.Context, exec bob.Executor) error { + _, err := DeviceLocations.Delete(dm.Where(o.pkEQ())).Exec(ctx, exec) + return err +} + +// Reload refreshes the DeviceLocation using the executor +func (o *DeviceLocation) Reload(ctx context.Context, exec bob.Executor) error { + o2, err := DeviceLocations.Query( + sm.Where(DeviceLocations.Columns.Time.EQ(psql.Arg(o.Time))), + sm.Where(DeviceLocations.Columns.DeviceID.EQ(psql.Arg(o.DeviceID))), + ).One(ctx, exec) + if err != nil { + return err + } + + *o = *o2 + + return nil +} + +// AfterQueryHook is called after DeviceLocationSlice is retrieved from the database +func (o DeviceLocationSlice) AfterQueryHook(ctx context.Context, exec bob.Executor, queryType bob.QueryType) error { + var err error + + switch queryType { + case bob.QueryTypeSelect: + ctx, err = DeviceLocations.AfterSelectHooks.RunHooks(ctx, exec, o) + case bob.QueryTypeInsert: + ctx, err = DeviceLocations.AfterInsertHooks.RunHooks(ctx, exec, o) + case bob.QueryTypeUpdate: + ctx, err = DeviceLocations.AfterUpdateHooks.RunHooks(ctx, exec, o) + case bob.QueryTypeDelete: + ctx, err = DeviceLocations.AfterDeleteHooks.RunHooks(ctx, exec, o) + } + + return err +} + +func (o DeviceLocationSlice) pkIN() dialect.Expression { + if len(o) == 0 { + return psql.Raw("NULL") + } + + return psql.Group(psql.Quote("device_locations", "time"), psql.Quote("device_locations", "device_id")).In(bob.ExpressionFunc(func(ctx context.Context, w io.Writer, d bob.Dialect, start int) ([]any, error) { + pkPairs := make([]bob.Expression, len(o)) + for i, row := range o { + pkPairs[i] = row.primaryKeyVals() + } + return bob.ExpressSlice(ctx, w, d, start, pkPairs, "", ", ", "") + })) +} + +// copyMatchingRows finds models in the given slice that have the same primary key +// then it first copies the existing relationships from the old model to the new model +// and then replaces the old model in the slice with the new model +func (o DeviceLocationSlice) copyMatchingRows(from ...*DeviceLocation) { + for i, old := range o { + for _, new := range from { + if new.Time.Equal(old.Time) { + continue + } + if new.DeviceID != old.DeviceID { + continue + } + + o[i] = new + break + } + } +} + +// UpdateMod modifies an update query with "WHERE primary_key IN (o...)" +func (o DeviceLocationSlice) UpdateMod() bob.Mod[*dialect.UpdateQuery] { + return bob.ModFunc[*dialect.UpdateQuery](func(q *dialect.UpdateQuery) { + q.AppendHooks(func(ctx context.Context, exec bob.Executor) (context.Context, error) { + return DeviceLocations.BeforeUpdateHooks.RunHooks(ctx, exec, o) + }) + + q.AppendLoader(bob.LoaderFunc(func(ctx context.Context, exec bob.Executor, retrieved any) error { + var err error + switch retrieved := retrieved.(type) { + case *DeviceLocation: + o.copyMatchingRows(retrieved) + case []*DeviceLocation: + o.copyMatchingRows(retrieved...) + case DeviceLocationSlice: + o.copyMatchingRows(retrieved...) + default: + // If the retrieved value is not a DeviceLocation or a slice of DeviceLocation + // then run the AfterUpdateHooks on the slice + _, err = DeviceLocations.AfterUpdateHooks.RunHooks(ctx, exec, o) + } + + return err + })) + + q.AppendWhere(o.pkIN()) + }) +} + +// DeleteMod modifies an delete query with "WHERE primary_key IN (o...)" +func (o DeviceLocationSlice) DeleteMod() bob.Mod[*dialect.DeleteQuery] { + return bob.ModFunc[*dialect.DeleteQuery](func(q *dialect.DeleteQuery) { + q.AppendHooks(func(ctx context.Context, exec bob.Executor) (context.Context, error) { + return DeviceLocations.BeforeDeleteHooks.RunHooks(ctx, exec, o) + }) + + q.AppendLoader(bob.LoaderFunc(func(ctx context.Context, exec bob.Executor, retrieved any) error { + var err error + switch retrieved := retrieved.(type) { + case *DeviceLocation: + o.copyMatchingRows(retrieved) + case []*DeviceLocation: + o.copyMatchingRows(retrieved...) + case DeviceLocationSlice: + o.copyMatchingRows(retrieved...) + default: + // If the retrieved value is not a DeviceLocation or a slice of DeviceLocation + // then run the AfterDeleteHooks on the slice + _, err = DeviceLocations.AfterDeleteHooks.RunHooks(ctx, exec, o) + } + + return err + })) + + q.AppendWhere(o.pkIN()) + }) +} + +func (o DeviceLocationSlice) UpdateAll(ctx context.Context, exec bob.Executor, vals DeviceLocationSetter) error { + if len(o) == 0 { + return nil + } + + _, err := DeviceLocations.Update(vals.UpdateMod(), o.UpdateMod()).All(ctx, exec) + return err +} + +func (o DeviceLocationSlice) DeleteAll(ctx context.Context, exec bob.Executor) error { + if len(o) == 0 { + return nil + } + + _, err := DeviceLocations.Delete(o.DeleteMod()).Exec(ctx, exec) + return err +} + +func (o DeviceLocationSlice) ReloadAll(ctx context.Context, exec bob.Executor) error { + if len(o) == 0 { + return nil + } + + o2, err := DeviceLocations.Query(sm.Where(o.pkIN())).All(ctx, exec) + if err != nil { + return err + } + + o.copyMatchingRows(o2...) + + return nil +} + +type deviceLocationWhere[Q psql.Filterable] struct { + Time psql.WhereMod[Q, time.Time] + DeviceID psql.WhereMod[Q, string] + SpaceSlug psql.WhereMod[Q, string] + Latitude psql.WhereMod[Q, float64] + Longitude psql.WhereMod[Q, float64] + Accuracy psql.WhereMod[Q, float64] +} + +func (deviceLocationWhere[Q]) AliasedAs(alias string) deviceLocationWhere[Q] { + return buildDeviceLocationWhere[Q](buildDeviceLocationColumns(alias)) +} + +func buildDeviceLocationWhere[Q psql.Filterable](cols deviceLocationColumns) deviceLocationWhere[Q] { + return deviceLocationWhere[Q]{ + Time: psql.Where[Q, time.Time](cols.Time), + DeviceID: psql.Where[Q, string](cols.DeviceID), + SpaceSlug: psql.Where[Q, string](cols.SpaceSlug), + Latitude: psql.Where[Q, float64](cols.Latitude), + Longitude: psql.Where[Q, float64](cols.Longitude), + Accuracy: psql.Where[Q, float64](cols.Accuracy), + } +} diff --git a/pkgs/db/models/schema_migrations.bob.go b/pkgs/db/models/schema_migrations.bob.go new file mode 100644 index 0000000..ea6c75d --- /dev/null +++ b/pkgs/db/models/schema_migrations.bob.go @@ -0,0 +1,348 @@ +// Code generated by BobGen psql v0.41.1. DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package models + +import ( + "context" + "io" + + "github.com/aarondl/opt/omit" + "github.com/stephenafamo/bob" + "github.com/stephenafamo/bob/dialect/psql" + "github.com/stephenafamo/bob/dialect/psql/dialect" + "github.com/stephenafamo/bob/dialect/psql/dm" + "github.com/stephenafamo/bob/dialect/psql/sm" + "github.com/stephenafamo/bob/dialect/psql/um" + "github.com/stephenafamo/bob/expr" +) + +// SchemaMigration is an object representing the database table. +type SchemaMigration struct { + Version string `db:"version,pk" ` +} + +// SchemaMigrationSlice is an alias for a slice of pointers to SchemaMigration. +// This should almost always be used instead of []*SchemaMigration. +type SchemaMigrationSlice []*SchemaMigration + +// SchemaMigrations contains methods to work with the schema_migrations table +var SchemaMigrations = psql.NewTablex[*SchemaMigration, SchemaMigrationSlice, *SchemaMigrationSetter]("", "schema_migrations", buildSchemaMigrationColumns("schema_migrations")) + +// SchemaMigrationsQuery is a query on the schema_migrations table +type SchemaMigrationsQuery = *psql.ViewQuery[*SchemaMigration, SchemaMigrationSlice] + +func buildSchemaMigrationColumns(alias string) schemaMigrationColumns { + return schemaMigrationColumns{ + ColumnsExpr: expr.NewColumnsExpr( + "version", + ).WithParent("schema_migrations"), + tableAlias: alias, + Version: psql.Quote(alias, "version"), + } +} + +type schemaMigrationColumns struct { + expr.ColumnsExpr + tableAlias string + Version psql.Expression +} + +func (c schemaMigrationColumns) Alias() string { + return c.tableAlias +} + +func (schemaMigrationColumns) AliasedAs(alias string) schemaMigrationColumns { + return buildSchemaMigrationColumns(alias) +} + +// SchemaMigrationSetter is used for insert/upsert/update operations +// All values are optional, and do not have to be set +// Generated columns are not included +type SchemaMigrationSetter struct { + Version omit.Val[string] `db:"version,pk" ` +} + +func (s SchemaMigrationSetter) SetColumns() []string { + vals := make([]string, 0, 1) + if s.Version.IsValue() { + vals = append(vals, "version") + } + return vals +} + +func (s SchemaMigrationSetter) Overwrite(t *SchemaMigration) { + if s.Version.IsValue() { + t.Version = s.Version.MustGet() + } +} + +func (s *SchemaMigrationSetter) Apply(q *dialect.InsertQuery) { + q.AppendHooks(func(ctx context.Context, exec bob.Executor) (context.Context, error) { + return SchemaMigrations.BeforeInsertHooks.RunHooks(ctx, exec, s) + }) + + q.AppendValues(bob.ExpressionFunc(func(ctx context.Context, w io.Writer, d bob.Dialect, start int) ([]any, error) { + vals := make([]bob.Expression, 1) + if s.Version.IsValue() { + vals[0] = psql.Arg(s.Version.MustGet()) + } else { + vals[0] = psql.Raw("DEFAULT") + } + + return bob.ExpressSlice(ctx, w, d, start, vals, "", ", ", "") + })) +} + +func (s SchemaMigrationSetter) UpdateMod() bob.Mod[*dialect.UpdateQuery] { + return um.Set(s.Expressions()...) +} + +func (s SchemaMigrationSetter) Expressions(prefix ...string) []bob.Expression { + exprs := make([]bob.Expression, 0, 1) + + if s.Version.IsValue() { + exprs = append(exprs, expr.Join{Sep: " = ", Exprs: []bob.Expression{ + psql.Quote(append(prefix, "version")...), + psql.Arg(s.Version), + }}) + } + + return exprs +} + +// FindSchemaMigration retrieves a single record by primary key +// If cols is empty Find will return all columns. +func FindSchemaMigration(ctx context.Context, exec bob.Executor, VersionPK string, cols ...string) (*SchemaMigration, error) { + if len(cols) == 0 { + return SchemaMigrations.Query( + sm.Where(SchemaMigrations.Columns.Version.EQ(psql.Arg(VersionPK))), + ).One(ctx, exec) + } + + return SchemaMigrations.Query( + sm.Where(SchemaMigrations.Columns.Version.EQ(psql.Arg(VersionPK))), + sm.Columns(SchemaMigrations.Columns.Only(cols...)), + ).One(ctx, exec) +} + +// SchemaMigrationExists checks the presence of a single record by primary key +func SchemaMigrationExists(ctx context.Context, exec bob.Executor, VersionPK string) (bool, error) { + return SchemaMigrations.Query( + sm.Where(SchemaMigrations.Columns.Version.EQ(psql.Arg(VersionPK))), + ).Exists(ctx, exec) +} + +// AfterQueryHook is called after SchemaMigration is retrieved from the database +func (o *SchemaMigration) AfterQueryHook(ctx context.Context, exec bob.Executor, queryType bob.QueryType) error { + var err error + + switch queryType { + case bob.QueryTypeSelect: + ctx, err = SchemaMigrations.AfterSelectHooks.RunHooks(ctx, exec, SchemaMigrationSlice{o}) + case bob.QueryTypeInsert: + ctx, err = SchemaMigrations.AfterInsertHooks.RunHooks(ctx, exec, SchemaMigrationSlice{o}) + case bob.QueryTypeUpdate: + ctx, err = SchemaMigrations.AfterUpdateHooks.RunHooks(ctx, exec, SchemaMigrationSlice{o}) + case bob.QueryTypeDelete: + ctx, err = SchemaMigrations.AfterDeleteHooks.RunHooks(ctx, exec, SchemaMigrationSlice{o}) + } + + return err +} + +// primaryKeyVals returns the primary key values of the SchemaMigration +func (o *SchemaMigration) primaryKeyVals() bob.Expression { + return psql.Arg(o.Version) +} + +func (o *SchemaMigration) pkEQ() dialect.Expression { + return psql.Quote("schema_migrations", "version").EQ(bob.ExpressionFunc(func(ctx context.Context, w io.Writer, d bob.Dialect, start int) ([]any, error) { + return o.primaryKeyVals().WriteSQL(ctx, w, d, start) + })) +} + +// Update uses an executor to update the SchemaMigration +func (o *SchemaMigration) Update(ctx context.Context, exec bob.Executor, s *SchemaMigrationSetter) error { + v, err := SchemaMigrations.Update(s.UpdateMod(), um.Where(o.pkEQ())).One(ctx, exec) + if err != nil { + return err + } + + *o = *v + + return nil +} + +// Delete deletes a single SchemaMigration record with an executor +func (o *SchemaMigration) Delete(ctx context.Context, exec bob.Executor) error { + _, err := SchemaMigrations.Delete(dm.Where(o.pkEQ())).Exec(ctx, exec) + return err +} + +// Reload refreshes the SchemaMigration using the executor +func (o *SchemaMigration) Reload(ctx context.Context, exec bob.Executor) error { + o2, err := SchemaMigrations.Query( + sm.Where(SchemaMigrations.Columns.Version.EQ(psql.Arg(o.Version))), + ).One(ctx, exec) + if err != nil { + return err + } + + *o = *o2 + + return nil +} + +// AfterQueryHook is called after SchemaMigrationSlice is retrieved from the database +func (o SchemaMigrationSlice) AfterQueryHook(ctx context.Context, exec bob.Executor, queryType bob.QueryType) error { + var err error + + switch queryType { + case bob.QueryTypeSelect: + ctx, err = SchemaMigrations.AfterSelectHooks.RunHooks(ctx, exec, o) + case bob.QueryTypeInsert: + ctx, err = SchemaMigrations.AfterInsertHooks.RunHooks(ctx, exec, o) + case bob.QueryTypeUpdate: + ctx, err = SchemaMigrations.AfterUpdateHooks.RunHooks(ctx, exec, o) + case bob.QueryTypeDelete: + ctx, err = SchemaMigrations.AfterDeleteHooks.RunHooks(ctx, exec, o) + } + + return err +} + +func (o SchemaMigrationSlice) pkIN() dialect.Expression { + if len(o) == 0 { + return psql.Raw("NULL") + } + + return psql.Quote("schema_migrations", "version").In(bob.ExpressionFunc(func(ctx context.Context, w io.Writer, d bob.Dialect, start int) ([]any, error) { + pkPairs := make([]bob.Expression, len(o)) + for i, row := range o { + pkPairs[i] = row.primaryKeyVals() + } + return bob.ExpressSlice(ctx, w, d, start, pkPairs, "", ", ", "") + })) +} + +// copyMatchingRows finds models in the given slice that have the same primary key +// then it first copies the existing relationships from the old model to the new model +// and then replaces the old model in the slice with the new model +func (o SchemaMigrationSlice) copyMatchingRows(from ...*SchemaMigration) { + for i, old := range o { + for _, new := range from { + if new.Version != old.Version { + continue + } + + o[i] = new + break + } + } +} + +// UpdateMod modifies an update query with "WHERE primary_key IN (o...)" +func (o SchemaMigrationSlice) UpdateMod() bob.Mod[*dialect.UpdateQuery] { + return bob.ModFunc[*dialect.UpdateQuery](func(q *dialect.UpdateQuery) { + q.AppendHooks(func(ctx context.Context, exec bob.Executor) (context.Context, error) { + return SchemaMigrations.BeforeUpdateHooks.RunHooks(ctx, exec, o) + }) + + q.AppendLoader(bob.LoaderFunc(func(ctx context.Context, exec bob.Executor, retrieved any) error { + var err error + switch retrieved := retrieved.(type) { + case *SchemaMigration: + o.copyMatchingRows(retrieved) + case []*SchemaMigration: + o.copyMatchingRows(retrieved...) + case SchemaMigrationSlice: + o.copyMatchingRows(retrieved...) + default: + // If the retrieved value is not a SchemaMigration or a slice of SchemaMigration + // then run the AfterUpdateHooks on the slice + _, err = SchemaMigrations.AfterUpdateHooks.RunHooks(ctx, exec, o) + } + + return err + })) + + q.AppendWhere(o.pkIN()) + }) +} + +// DeleteMod modifies an delete query with "WHERE primary_key IN (o...)" +func (o SchemaMigrationSlice) DeleteMod() bob.Mod[*dialect.DeleteQuery] { + return bob.ModFunc[*dialect.DeleteQuery](func(q *dialect.DeleteQuery) { + q.AppendHooks(func(ctx context.Context, exec bob.Executor) (context.Context, error) { + return SchemaMigrations.BeforeDeleteHooks.RunHooks(ctx, exec, o) + }) + + q.AppendLoader(bob.LoaderFunc(func(ctx context.Context, exec bob.Executor, retrieved any) error { + var err error + switch retrieved := retrieved.(type) { + case *SchemaMigration: + o.copyMatchingRows(retrieved) + case []*SchemaMigration: + o.copyMatchingRows(retrieved...) + case SchemaMigrationSlice: + o.copyMatchingRows(retrieved...) + default: + // If the retrieved value is not a SchemaMigration or a slice of SchemaMigration + // then run the AfterDeleteHooks on the slice + _, err = SchemaMigrations.AfterDeleteHooks.RunHooks(ctx, exec, o) + } + + return err + })) + + q.AppendWhere(o.pkIN()) + }) +} + +func (o SchemaMigrationSlice) UpdateAll(ctx context.Context, exec bob.Executor, vals SchemaMigrationSetter) error { + if len(o) == 0 { + return nil + } + + _, err := SchemaMigrations.Update(vals.UpdateMod(), o.UpdateMod()).All(ctx, exec) + return err +} + +func (o SchemaMigrationSlice) DeleteAll(ctx context.Context, exec bob.Executor) error { + if len(o) == 0 { + return nil + } + + _, err := SchemaMigrations.Delete(o.DeleteMod()).Exec(ctx, exec) + return err +} + +func (o SchemaMigrationSlice) ReloadAll(ctx context.Context, exec bob.Executor) error { + if len(o) == 0 { + return nil + } + + o2, err := SchemaMigrations.Query(sm.Where(o.pkIN())).All(ctx, exec) + if err != nil { + return err + } + + o.copyMatchingRows(o2...) + + return nil +} + +type schemaMigrationWhere[Q psql.Filterable] struct { + Version psql.WhereMod[Q, string] +} + +func (schemaMigrationWhere[Q]) AliasedAs(alias string) schemaMigrationWhere[Q] { + return buildSchemaMigrationWhere[Q](buildSchemaMigrationColumns(alias)) +} + +func buildSchemaMigrationWhere[Q psql.Filterable](cols schemaMigrationColumns) schemaMigrationWhere[Q] { + return schemaMigrationWhere[Q]{ + Version: psql.Where[Q, string](cols.Version), + } +} From 552b41bb5c7693e4407b5de13e5489a587b62f3c Mon Sep 17 00:00:00 2001 From: ngovinh2k2 Date: Tue, 13 Jan 2026 14:45:16 +0700 Subject: [PATCH 02/10] feat: add license apache 2.0 to source --- CLA.md | 67 ++++++++++++++ LICENSE | 202 +++++++++++++++++++++++++++++++++++++++++ README.md | 4 + cmd/telemetry/main.go | 16 ++++ cmd/telemetry/serve.go | 16 ++++ 5 files changed, 305 insertions(+) create mode 100644 CLA.md create mode 100644 LICENSE diff --git a/CLA.md b/CLA.md new file mode 100644 index 0000000..093c6b7 --- /dev/null +++ b/CLA.md @@ -0,0 +1,67 @@ +# Contributor License Agreement (CLA) + +**Version 1.0** + +This Contributor License Agreement (“**Agreement**”) is entered into by **you** (“**Contributor**”) and **Digital Fortress** (“**Company**”) regarding your contributions to the **SpaceDF** project (“**Project**”). + +By submitting any Contribution to the Project, you agree to the following terms: + +## 1. Definitions + +- **“Contribution”** means any source code, documentation, design, or other material submitted by you to the Project. +- **“Submit”** means any form of electronic, written, or verbal communication intended to be included in the Project, including but not limited to pull requests, patches, issues, or comments. + +## 2. Copyright Ownership + +- You retain ownership of the copyright in your Contributions. +- Nothing in this Agreement transfers ownership of your intellectual property to the Company. + +## 3. License Grant + +You grant **Digital Fortress** a **perpetual, worldwide, non-exclusive, royalty-free, and irrevocable license** to: + +- Use +- Modify +- Distribute +- Re-license +- Sublicense +- Commercialize + +your Contributions as part of the Project or in any related products or services. + +This includes, but is not limited to, use in **proprietary**, **SaaS**, and **enterprise** offerings. + +## 4. Patent Grant + +You grant Digital Fortress a **perpetual, worldwide, royalty-free license** to any patent claims you own that are necessarily infringed by your Contributions. + +## 5. Representations + +You represent and warrant that: + +- You have the legal right to submit the Contributions. +- The Contributions do not violate or infringe upon any third-party rights. +- If your employer or organization has intellectual property policies, you have obtained all necessary permissions to make the Contributions. + +## 6. No Obligation + +The Company is **not obligated** to: + +- Accept your Contributions. +- Provide any form of compensation. +- Include your Contributions in any release or distribution. + +## 7. Public Attribution + +The Company **may**, but is not required to, publicly acknowledge or attribute your Contributions. + +## 8. License Compatibility + +- Your Contributions will be licensed to users under the Project’s open-source license (e.g., **Apache License 2.0**). +- This Agreement governs only the relationship between you and the Company and does not modify the Project’s open-source license. + +## 9. Governing Law + +This Agreement shall be governed by and construed in accordance with the laws of **Vietnam**. + +By submitting a Contribution, you confirm that you have read, understood, and agree to the terms of this Agreement. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..10398cd --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2026 Digital Fortress + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md index 788667c..e10e0ff 100644 --- a/README.md +++ b/README.md @@ -214,4 +214,8 @@ dbmate down 4. Add tests 5. Submit a pull request +## License +Licensed under the Apache License, Version 2.0 +See the LICENSE file for details. + [![SpaceDF - A project from Digital Fortress](https://df.technology/images/SpaceDF.png)](https://df.technology/) \ No newline at end of file diff --git a/cmd/telemetry/main.go b/cmd/telemetry/main.go index 75b9704..9aba600 100644 --- a/cmd/telemetry/main.go +++ b/cmd/telemetry/main.go @@ -1,3 +1,19 @@ +/* +Copyright 2026 Digital Fortress. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package main import ( diff --git a/cmd/telemetry/serve.go b/cmd/telemetry/serve.go index ae17b7d..d321324 100644 --- a/cmd/telemetry/serve.go +++ b/cmd/telemetry/serve.go @@ -1,3 +1,19 @@ +/* +Copyright 2026 Digital Fortress. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package main import ( From ab80f8f16832053ea88720d627db237b7b1250d4 Mon Sep 17 00:00:00 2001 From: lethanhdat762003 Date: Thu, 22 Jan 2026 15:05:29 +0700 Subject: [PATCH 03/10] feat: implement events feature --- .github/workflows/cd-build-image.yml | 52 ++ configs/config.yaml | 23 +- configs/event_rules/rak4630.yaml | 71 +++ internal/api/entities/handler.go | 54 ++ internal/api/entities/router.go | 3 + internal/api/events/handler.go | 212 ++++++++ internal/api/events/router.go | 22 + internal/config/config.go | 1 + internal/config/event_rules.go | 208 ++++++++ internal/models/events.go | 253 +++++++++ internal/timescaledb/events.go | 486 ++++++++++++++++++ internal/timescaledb/telemetry.go | 33 ++ .../20251225000000_create_events_schema.sql | 91 ++++ 13 files changed, 1498 insertions(+), 11 deletions(-) create mode 100644 .github/workflows/cd-build-image.yml create mode 100644 configs/event_rules/rak4630.yaml create mode 100644 internal/api/events/handler.go create mode 100644 internal/api/events/router.go create mode 100644 internal/config/event_rules.go create mode 100644 internal/models/events.go create mode 100644 internal/timescaledb/events.go create mode 100644 pkgs/db/migrations/20251225000000_create_events_schema.sql diff --git a/.github/workflows/cd-build-image.yml b/.github/workflows/cd-build-image.yml new file mode 100644 index 0000000..f29764a --- /dev/null +++ b/.github/workflows/cd-build-image.yml @@ -0,0 +1,52 @@ +name: Build & Publish Docker Image + +on: + release: + types: [published] + +jobs: + deploy: + if: github.event.release.target_commitish == 'main' + name: Build & Deploy spacedf-backend Docker Image + runs-on: ubuntu-latest + + permissions: + contents: read + packages: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.event.release.tag_name }} + + # Login to GHCR + - name: Login to GHCR + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Generate Docker metadata + - name: Docker metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ghcr.io/${{ github.repository }} + tags: | + type=semver,pattern={{version}},value=${{ github.event.release.tag_name }} + type=semver,pattern={{major}}.{{minor}},value=${{ github.event.release.tag_name }} + type=semver,pattern={{major}},value=${{ github.event.release.tag_name }} + + # Build & Push image + - name: Build & Push Docker image + uses: docker/build-push-action@v5 + with: + context: . + file: ./Dockerfile + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + secrets: | + GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }} diff --git a/configs/config.yaml b/configs/config.yaml index cbb8152..093d69f 100644 --- a/configs/config.yaml +++ b/configs/config.yaml @@ -1,22 +1,23 @@ # Server Configuration server: - log_level: "info" # debug, info, warn, error - api_port: 8080 # API server port (includes health endpoints) + log_level: "info" # debug, info, warn, error + api_port: 8080 # API server port (includes health endpoints) alerts_processors_path: "configs/alerts_processors.yaml" + event_rules_dir: "configs/event_rules" # Directory containing device model event rules # AMQP/RabbitMQ Configuration amqp: - broker_url: "" # Set via environment variable AMQP_BROKER_URL + broker_url: "" # Set via environment variable AMQP_BROKER_URL consumer_tag: "telemetry-service" prefetch_count: 100 - allowed_vhosts: [] # Empty means process all vhosts + allowed_vhosts: [] # Empty means process all vhosts reconnect_delay: "5s" # Organization Events Configuration org_events: - exchange: "org.events" # Topic exchange for org events - queue: "telemetry.org.events.queue" # Telemetry's queue for org events - routing_key: "org.#" # Listen to all org events + exchange: "org.events" # Topic exchange for org events + queue: "telemetry.org.events.queue" # Telemetry's queue for org events + routing_key: "org.#" # Listen to all org events consumer_tag: "telemetry-org-events" # TimescaleDB Configuration @@ -26,10 +27,10 @@ db: password: "postgres" host: "localhost" port: 5437 - batch_size: 1000 # Number of locations to batch before writing - flush_interval: "1s" # Maximum time between batch writes - max_connections: 25 # Maximum number of open database connections - max_idle_conns: 5 # Maximum number of idle connections in the pool + batch_size: 1000 # Number of locations to batch before writing + flush_interval: "1s" # Maximum time between batch writes + max_connections: 25 # Maximum number of open database connections + max_idle_conns: 5 # Maximum number of idle connections in the pool psql: dsn: "postgres://postgres:postgres@localhost:5437/spacedf_telemetry?sslmode=disable" diff --git a/configs/event_rules/rak4630.yaml b/configs/event_rules/rak4630.yaml new file mode 100644 index 0000000..f5ca86a --- /dev/null +++ b/configs/event_rules/rak4630.yaml @@ -0,0 +1,71 @@ +# RAK4630 Event Rules +device_model: "rak4630" +device_model_id: "" # Will be resolved from device service +display_name: "RAK4630 WisBlock" + +rules: + # Battery Low Warning + - rule_key: "battery_v" + entity_id_pattern: "" + operator: "lt" + operand: "3.3" + event_type: "device_event" + event_level: "system" + description: "Battery voltage is low (< 3.3V)" + status: "active" + is_active: true + + # Battery Critical Warning + - rule_key: "battery_v" + entity_id_pattern: "" + operator: "lt" + operand: "3.0" + event_type: "device_event" + event_level: "system" + description: "Battery voltage is critically low (< 3.0V)" + status: "active" + is_active: true + + # High Temperature Warning + - rule_key: "temperature" + entity_id_pattern: "" + operator: "gt" + operand: "50" + event_type: "device_event" + event_level: "system" + description: "Temperature is high (> 50°C)" + status: "active" + is_active: true + + # Low Temperature Warning + - rule_key: "temperature" + entity_id_pattern: "" + operator: "lt" + operand: "-10" + event_type: "device_event" + event_level: "system" + description: "Temperature is very low (< -10°C)" + status: "active" + is_active: true + + # High Humidity Warning + - rule_key: "humidity" + entity_id_pattern: "" + operator: "gt" + operand: "90" + event_type: "device_event" + event_level: "system" + description: "Humidity is very high (> 90%)" + status: "active" + is_active: true + + # Low Humidity Warning + - rule_key: "humidity" + entity_id_pattern: "" + operator: "lt" + operand: "10" + event_type: "device_event" + event_level: "system" + description: "Humidity is very low (< 10%)" + status: "active" + is_active: true diff --git a/internal/api/entities/handler.go b/internal/api/entities/handler.go index cc4af1a..05fd42d 100644 --- a/internal/api/entities/handler.go +++ b/internal/api/entities/handler.go @@ -11,6 +11,11 @@ import ( "go.uber.org/zap" ) +// updateDeviceTriggerEventRequest represents the request to update an device's trigger event type +type updateDeviceTriggerEventRequest struct { + TriggerEventType string `json:"trigger_event_type"` +} + func getEntities(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { return func(c echo.Context) error { // Parse query params @@ -87,3 +92,52 @@ func parseDisplayTypes(param string) []string { } return parts[:j] } + +// updateDeviceTriggerEvent updates the trigger event type for an device +func updateDeviceTriggerEvent(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { + return func(c echo.Context) error { + deviceID := strings.TrimSpace(c.Param("device_id")) + if deviceID == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "device_id is required", + }) + } + + var req updateDeviceTriggerEventRequest + if err := c.Bind(&req); err != nil { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "invalid request body", + }) + } + + if req.TriggerEventType == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "trigger_event_type is required", + }) + } + + orgToUse := common.ResolveOrgFromRequest(c) + if orgToUse == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "Could not determine organization from hostname or X-Organization header", + }) + } + + ctx := timescaledb.ContextWithOrg(c.Request().Context(), orgToUse) + err := tsClient.UpdateDeviceTriggerEventType(ctx, deviceID, req.TriggerEventType) + if err != nil { + logger.Error("failed to update device trigger event type", + zap.String("device_id", deviceID), + zap.String("trigger_event_type", req.TriggerEventType), + zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{ + "error": "failed to update device trigger event type", + }) + } + + return c.JSON(http.StatusOK, map[string]interface{}{ + "device_id": deviceID, + "trigger_event_type": req.TriggerEventType, + }) + } +} diff --git a/internal/api/entities/router.go b/internal/api/entities/router.go index dea8f01..a51b442 100644 --- a/internal/api/entities/router.go +++ b/internal/api/entities/router.go @@ -9,4 +9,7 @@ import ( func RegisterRoutes(e *echo.Group, logger *zap.Logger, tsClient *timescaledb.Client) { group := e.Group("/entities") group.GET("", getEntities(logger, tsClient)) + + // Update device trigger event configuration + group.PUT("/:device_id/trigger-event", updateDeviceTriggerEvent(logger, tsClient)) } diff --git a/internal/api/events/handler.go b/internal/api/events/handler.go new file mode 100644 index 0000000..3e7e8ed --- /dev/null +++ b/internal/api/events/handler.go @@ -0,0 +1,212 @@ +package events + +import ( + "net/http" + "strconv" + "strings" + + "github.com/Space-DF/telemetry-service/internal/api/common" + "github.com/Space-DF/telemetry-service/internal/models" + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +// ============================================================================ +// Events API Handlers +// ============================================================================ + +// getEventsByDevice returns all events for a specific device +func getEventsByDevice(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { + return func(c echo.Context) error { + orgToUse := common.ResolveOrgFromRequest(c) + if orgToUse == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "Could not determine organization from hostname or X-Organization header", + }) + } + + deviceID := strings.TrimSpace(c.Param("device_id")) + if deviceID == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "device_id is required", + }) + } + + limit := 100 + if limitStr := c.QueryParam("limit"); limitStr != "" { + if l, err := strconv.Atoi(limitStr); err == nil && l > 0 { + limit = l + } + } + + ctx := timescaledb.ContextWithOrg(c.Request().Context(), orgToUse) + events, err := tsClient.GetEventsByDevice(ctx, orgToUse, deviceID, limit) + if err != nil { + logger.Error("failed to get events by device", + zap.String("device_id", deviceID), + zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{ + "error": "failed to get events", + }) + } + + return c.JSON(http.StatusOK, map[string]interface{}{ + "device_id": deviceID, + "events": events, + "count": len(events), + }) + } +} + +// ============================================================================ +// Event Rules API Handlers +// ============================================================================ + +// getEventRules returns all event rules +func getEventRules(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { + return func(c echo.Context) error { + orgToUse := common.ResolveOrgFromRequest(c) + if orgToUse == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "Could not determine organization from hostname or X-Organization header", + }) + } + + page := 1 + if pageStr := c.QueryParam("page"); pageStr != "" { + if p, err := strconv.Atoi(pageStr); err == nil && p > 0 { + page = p + } + } + + pageSize := 20 + if sizeStr := c.QueryParam("page_size"); sizeStr != "" { + if s, err := strconv.Atoi(sizeStr); err == nil && s > 0 && s <= 100 { + pageSize = s + } + } + + deviceID := c.QueryParam("device_id") + + ctx := timescaledb.ContextWithOrg(c.Request().Context(), orgToUse) + rules, total, err := tsClient.GetEventRules(ctx, deviceID, page, pageSize) + if err != nil { + logger.Error("failed to get event rules", + zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{ + "error": "failed to get event rules", + }) + } + + return c.JSON(http.StatusOK, models.EventRulesListResponse{ + Rules: rules, + TotalCount: total, + Page: page, + PageSize: pageSize, + }) + } +} + +// createEventRule creates a new event rule +func createEventRule(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { + return func(c echo.Context) error { + orgToUse := common.ResolveOrgFromRequest(c) + if orgToUse == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "Could not determine organization from hostname or X-Organization header", + }) + } + + var req models.EventRuleRequest + if err := c.Bind(&req); err != nil { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "invalid request body", + }) + } + + ctx := timescaledb.ContextWithOrg(c.Request().Context(), orgToUse) + rule, err := tsClient.CreateEventRule(ctx, &req) + if err != nil { + logger.Error("failed to create event rule", + zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{ + "error": "failed to create event rule", + }) + } + + return c.JSON(http.StatusCreated, rule) + } +} + +// updateEventRule updates an existing event rule +func updateEventRule(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { + return func(c echo.Context) error { + orgToUse := common.ResolveOrgFromRequest(c) + if orgToUse == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "Could not determine organization from hostname or X-Organization header", + }) + } + + ruleID := strings.TrimSpace(c.Param("rule_id")) + if ruleID == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "rule_id is required", + }) + } + + var req models.EventRuleRequest + if err := c.Bind(&req); err != nil { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "invalid request body", + }) + } + + ctx := timescaledb.ContextWithOrg(c.Request().Context(), orgToUse) + rule, err := tsClient.UpdateEventRule(ctx, ruleID, &req) + if err != nil { + logger.Error("failed to update event rule", + zap.String("rule_id", ruleID), + zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{ + "error": "failed to update event rule", + }) + } + + return c.JSON(http.StatusOK, rule) + } +} + +// deleteEventRule deletes an event rule +func deleteEventRule(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { + return func(c echo.Context) error { + orgToUse := common.ResolveOrgFromRequest(c) + if orgToUse == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "Could not determine organization from hostname or X-Organization header", + }) + } + + ruleID := strings.TrimSpace(c.Param("rule_id")) + if ruleID == "" { + return c.JSON(http.StatusBadRequest, map[string]string{ + "error": "rule_id is required", + }) + } + + ctx := timescaledb.ContextWithOrg(c.Request().Context(), orgToUse) + if err := tsClient.DeleteEventRule(ctx, ruleID); err != nil { + logger.Error("failed to delete event rule", + zap.String("rule_id", ruleID), + zap.Error(err)) + return c.JSON(http.StatusInternalServerError, map[string]string{ + "error": "failed to delete event rule", + }) + } + + return c.JSON(http.StatusOK, map[string]string{ + "message": "event rule deleted successfully", + }) + } +} \ No newline at end of file diff --git a/internal/api/events/router.go b/internal/api/events/router.go new file mode 100644 index 0000000..d33b4d1 --- /dev/null +++ b/internal/api/events/router.go @@ -0,0 +1,22 @@ +package events + +import ( + "github.com/Space-DF/telemetry-service/internal/config" + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "github.com/labstack/echo/v4" + "go.uber.org/zap" +) + +func RegisterRoutes(e *echo.Group, cfg *config.Config, logger *zap.Logger, tsClient *timescaledb.Client) { + // Get events for a specific device + e.GET("/events/device/:device_id", getEventsByDevice(logger, tsClient)) + + // Get event rules + e.GET("/event-rules", getEventRules(logger, tsClient)) + // Create a new event rule + e.POST("/event-rules", createEventRule(logger, tsClient)) + // Update an event rule + e.PUT("/event-rules/:rule_id", updateEventRule(logger, tsClient)) + // Delete an event rule + e.DELETE("/event-rules/:rule_id", deleteEventRule(logger, tsClient)) +} diff --git a/internal/config/config.go b/internal/config/config.go index 4850eda..d1aa427 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -32,6 +32,7 @@ type Server struct { LogLevel string `mapstructure:"log_level"` APIPort int `mapstructure:"api_port"` AlertsProcessorsCfg string `mapstructure:"alerts_processors_path"` + EventRulesDir string `mapstructure:"event_rules_dir"` } // AMQP contains RabbitMQ configuration diff --git a/internal/config/event_rules.go b/internal/config/event_rules.go new file mode 100644 index 0000000..0530748 --- /dev/null +++ b/internal/config/event_rules.go @@ -0,0 +1,208 @@ +package config + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "gopkg.in/yaml.v3" +) + +// EventRuleConfig represents a single event rule configuration +type EventRuleConfig struct { + RuleKey string `yaml:"rule_key"` + EntityIDPattern string `yaml:"entity_id_pattern"` + Operator string `yaml:"operator"` + Operand string `yaml:"operand"` + EventType string `yaml:"event_type"` + EventLevel string `yaml:"event_level"` + Description string `yaml:"description"` + Status string `yaml:"status"` + IsActive bool `yaml:"is_active"` +} + +// DeviceModelRules represents event rules for a specific device model +type DeviceModelRules struct { + DeviceModel string `yaml:"device_model"` + DeviceModelID string `yaml:"device_model_id"` + DisplayName string `yaml:"display_name"` + Rules []EventRuleConfig `yaml:"rules"` +} + +// EventRulesConfig represents the aggregated event rules configuration +type EventRulesConfig struct { + DeviceModels []DeviceModelRules `yaml:"device_models"` +} + +// LoadEventRulesConfig loads event rules from a YAML file +func LoadEventRulesConfig(path string) (*EventRulesConfig, error) { + if path == "" { + return nil, fmt.Errorf("event rules config path is empty") + } + + // Validate the path is within allowed directories (security: prevent path traversal) + absPath, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf("invalid path: %w", err) + } + absPath = filepath.Clean(absPath) + + if !isPathAllowed(absPath) { + return nil, fmt.Errorf("path traversal detected: file must be within configs directory: %s", path) + } + + data, err := os.ReadFile(absPath) + if err != nil { + return nil, fmt.Errorf("failed to read event rules config file: %w", err) + } + + var cfg EventRulesConfig + if err := yaml.Unmarshal(data, &cfg); err != nil { + return nil, fmt.Errorf("failed to parse event rules config: %w", err) + } + + return &cfg, nil +} + +// LoadEventRulesFromDir loads all event rule YAML files from a directory +// Each file should contain a single device model's rules +func LoadEventRulesFromDir(dir string) (*EventRulesConfig, error) { + if dir == "" { + return nil, fmt.Errorf("event rules directory is empty") + } + + entries, err := os.ReadDir(dir) + if err != nil { + return nil, fmt.Errorf("failed to read event rules directory: %w", err) + } + + var cfg EventRulesConfig + + for _, entry := range entries { + // Skip directories and non-YAML files + if entry.IsDir() { + continue + } + + // Check for .yaml or .yml extension + name := entry.Name() + ext := filepath.Ext(name) + if ext != ".yaml" && ext != ".yml" { + continue + } + + // Load device model rules from file + path := filepath.Join(dir, name) + dmRules, err := loadDeviceModelRules(path) + if err != nil { + // Log warning but continue loading other files + fmt.Printf("Warning: failed to load %s: %v\n", name, err) + continue + } + + cfg.DeviceModels = append(cfg.DeviceModels, *dmRules) + } + + if len(cfg.DeviceModels) == 0 { + return nil, fmt.Errorf("no valid event rules found in directory: %s", dir) + } + + return &cfg, nil +} + +// loadDeviceModelRules loads a single device model's rules from a YAML file +func loadDeviceModelRules(path string) (*DeviceModelRules, error) { + // Validate the path is within allowed directories (security: prevent path traversal) + absPath, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf("invalid path: %w", err) + } + absPath = filepath.Clean(absPath) + + // Check if file is within an allowed directory (configs/event_rules or configs) + // This prevents directory traversal attacks + if !isPathAllowed(absPath) { + return nil, fmt.Errorf("path traversal detected: file must be within configs directory: %s", path) + } + + data, err := os.ReadFile(absPath) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w", err) + } + + var dm DeviceModelRules + if err := yaml.Unmarshal(data, &dm); err != nil { + return nil, fmt.Errorf("failed to parse device model rules: %w", err) + } + + return &dm, nil +} + +// isPathAllowed checks if a path is within the configs directory +func isPathAllowed(path string) bool { + absPath := filepath.Clean(path) + // Check for common allowed prefixes + allowedPrefixes := []string{ + "configs/event_rules", + "configs" + string(filepath.Separator) + "event_rules", + } + + for _, prefix := range allowedPrefixes { + allowedPath, _ := filepath.Abs(prefix) + if strings.HasPrefix(absPath, allowedPath) { + return true + } + } + return false +} + +// GetRulesForDeviceModel returns all rules for a specific device model +func (c *EventRulesConfig) GetRulesForDeviceModel(deviceModel string) []EventRuleConfig { + for _, dm := range c.DeviceModels { + if dm.DeviceModel == deviceModel { + return dm.Rules + } + } + return nil +} + +// GetDeviceModelRules returns the DeviceModelRules for a specific device model +func (c *EventRulesConfig) GetDeviceModelRules(deviceModel string) *DeviceModelRules { + for i := range c.DeviceModels { + if c.DeviceModels[i].DeviceModel == deviceModel { + return &c.DeviceModels[i] + } + } + return nil +} + +// ToRawMap converts the EventRulesConfig to a map[string]interface{} for use with SeedDefaultEventRules +func (c *EventRulesConfig) ToRawMap() map[string]interface{} { + deviceModels := make([]interface{}, 0, len(c.DeviceModels)) + for _, dm := range c.DeviceModels { + rules := make([]interface{}, 0, len(dm.Rules)) + for _, r := range dm.Rules { + rules = append(rules, map[string]interface{}{ + "rule_key": r.RuleKey, + "entity_id_pattern": r.EntityIDPattern, + "operator": r.Operator, + "operand": r.Operand, + "event_type": r.EventType, + "event_level": r.EventLevel, + "description": r.Description, + "status": r.Status, + "is_active": r.IsActive, + }) + } + deviceModels = append(deviceModels, map[string]interface{}{ + "device_model": dm.DeviceModel, + "device_model_id": dm.DeviceModelID, + "display_name": dm.DisplayName, + "rules": rules, + }) + } + return map[string]interface{}{ + "device_models": deviceModels, + } +} diff --git a/internal/models/events.go b/internal/models/events.go new file mode 100644 index 0000000..fc0a19c --- /dev/null +++ b/internal/models/events.go @@ -0,0 +1,253 @@ +package models + +import ( + "encoding/json" + "time" +) + +// EventType represents a type of event (e.g., "state_changed", "service_call") +type EventType struct { + EventTypeID int `json:"event_type_id" db:"event_type_id"` + EventType string `json:"event_type" db:"event_type"` + CreatedAt time.Time `json:"created_at" db:"created_at"` +} + +// EventData represents shared event data (deduplicated by hash) +type EventData struct { + DataID int64 `json:"data_id" db:"data_id"` + Hash int64 `json:"hash" db:"hash"` + SharedData json.RawMessage `json:"shared_data" db:"shared_data"` + CreatedAt time.Time `json:"created_at" db:"created_at"` +} + +// EventRule represents a rule for triggering events based on conditions +type EventRule struct { + EventRuleID string `json:"event_rule_id" db:"event_rule_id"` + EntityID *string `json:"entity_id,omitempty" db:"entity_id"` + DeviceModelID *string `json:"device_model_id,omitempty" db:"device_model_id"` + RuleKey *string `json:"rule_key,omitempty" db:"rule_key"` // e.g., 'battery_low', 'temperature_low' + Operator *string `json:"operator,omitempty" db:"operator"` // eq, ne, gt, lt, gte, lte, contains + Operand string `json:"operand" db:"operand"` + Status *string `json:"status,omitempty" db:"status"` // active, inactive, paused + IsActive *bool `json:"is_active,omitempty" db:"is_active"` + StartTime *time.Time `json:"start_time,omitempty" db:"start_time"` + EndTime *time.Time `json:"end_time,omitempty" db:"end_time"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + UpdatedAt time.Time `json:"updated_at" db:"updated_at"` +} + +// Event represents an event occurrence +type Event struct { + EventID int64 `json:"event_id" db:"event_id"` + EventTypeID int `json:"event_type_id" db:"event_type_id"` + DataID *int64 `json:"data_id,omitempty" db:"data_id"` + EventLevel *string `json:"event_level,omitempty" db:"event_level"` // manufacturer, system, user + EventRuleID *string `json:"event_rule_id,omitempty" db:"event_rule_id"` + SpaceSlug string `json:"space_slug,omitempty" db:"space_slug"` + EntityID *string `json:"entity_id,omitempty" db:"entity_id"` + StateID *int64 `json:"state_id,omitempty" db:"state_id"` + ContextID []byte `json:"context_id_bin,omitempty" db:"context_id_bin"` + TriggerID *string `json:"trigger_id,omitempty" db:"trigger_id"` + AllowNewEvent *bool `json:"allow_new_event,omitempty" db:"allow_new_event"` + TimeFiredTs int64 `json:"time_fired_ts" db:"time_fired_ts"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + + // Joined fields + EventType string `json:"event_type,omitempty" db:"event_type"` + SharedData json.RawMessage `json:"shared_data,omitempty" db:"shared_data"` +} + +// StatesMeta represents metadata about entity states +type StatesMeta struct { + MetadataID int `json:"metadata_id" db:"metadata_id"` + EntityID string `json:"entity_id" db:"entity_id"` + CreatedAt time.Time `json:"created_at" db:"created_at"` +} + +// StateAttributes represents shared state attributes (deduplicated by hash) +type StateAttributes struct { + AttributesID int `json:"attributes_id" db:"attributes_id"` + Hash int64 `json:"hash" db:"hash"` + SharedAttrs json.RawMessage `json:"shared_attrs" db:"shared_attrs"` + CreatedAt time.Time `json:"created_at" db:"created_at"` +} + +// State represents an entity state with event linkage +type State struct { + StateID int64 `json:"state_id" db:"state_id"` + MetadataID int `json:"metadata_id" db:"metadata_id"` + State string `json:"state" db:"state"` + AttributesID *int `json:"attributes_id,omitempty" db:"attributes_id"` + EventID *int64 `json:"event_id,omitempty" db:"event_id"` + LastChangedTs int64 `json:"last_changed_ts" db:"last_changed_ts"` + LastUpdatedTs int64 `json:"last_updated_ts" db:"last_updated_ts"` + CreatedAt time.Time `json:"created_at" db:"created_at"` + + // Joined fields + EntityID string `json:"entity_id,omitempty" db:"entity_id"` + SharedAttrs json.RawMessage `json:"shared_attrs,omitempty" db:"shared_attrs"` +} + +// StateChangeRequest represents a request to record a state change +type StateChangeRequest struct { + EntityID string `json:"entity_id"` + SpaceSlug string `json:"space_slug,omitempty"` // optional, will be resolved from headers if not provided + NewState string `json:"new_state"` + OldState string `json:"old_state,omitempty"` + Attributes map[string]interface{} `json:"attributes,omitempty"` + EventType string `json:"event_type,omitempty"` // defaults to "state_changed" + TimeFiredTs *int64 `json:"time_fired_ts,omitempty"` + ContextID []byte `json:"context_id,omitempty"` + TriggerID *string `json:"trigger_id,omitempty"` // for future automations reference + AllowNewEvent *bool `json:"allow_new_event,omitempty"` // flag to control duplicate event creation +} + +// StateHistoryResponse represents historical state data for an entity +type StateHistoryResponse struct { + EntityID string `json:"entity_id"` + State string `json:"state"` + Attributes map[string]interface{} `json:"attributes,omitempty"` + LastChanged time.Time `json:"last_changed"` + LastUpdated time.Time `json:"last_updated"` + EventID *int64 `json:"event_id,omitempty"` +} + +// EventsListResponse represents paginated events +type EventsListResponse struct { + Events []Event `json:"events"` + TotalCount int `json:"total_count"` + Page int `json:"page"` + PageSize int `json:"page_size"` +} + +// StateDetailResponse combines state with metadata and attributes +type StateDetailResponse struct { + StateID int64 `json:"state_id"` + EntityID string `json:"entity_id"` + State string `json:"state"` + Attributes map[string]interface{} `json:"attributes,omitempty"` + LastChanged time.Time `json:"last_changed"` + LastUpdated time.Time `json:"last_updated"` + TriggeringEvent *EventDetail `json:"triggering_event,omitempty"` +} + +// EventDetail represents a full event with its type and data +type EventDetail struct { + EventID int64 `json:"event_id"` + EventType string `json:"event_type"` + LevelEventID *string `json:"level_event_id,omitempty"` + EventRuleID *string `json:"event_rule_id,omitempty"` + SpaceSlug string `json:"space_slug,omitempty"` + EntityID *string `json:"entity_id,omitempty"` + StateID *int64 `json:"state_id,omitempty"` + TriggerID *string `json:"trigger_id,omitempty"` + AllowNewEvent *bool `json:"allow_new_event,omitempty"` + TimeFired time.Time `json:"time_fired"` + EventData map[string]interface{} `json:"event_data,omitempty"` + ContextID []byte `json:"context_id,omitempty"` + + // Joined fields + EventRule *EventRule `json:"event_rule,omitempty"` +} + +// TimestampsToTime converts timestamp fields to time.Time +func (s *State) LastChangedTime() time.Time { + return time.UnixMilli(s.LastChangedTs) +} + +// LastUpdatedTime converts the last_updated_ts to time.Time +func (s *State) LastUpdatedTime() time.Time { + return time.UnixMilli(s.LastUpdatedTs) +} + +// TimeFired converts the time_fired_ts to time.Time +func (e *Event) TimeFired() time.Time { + return time.UnixMilli(e.TimeFiredTs) +} + +// ParseAttributes parses the shared_attrs JSON into a map +func (s *State) ParseAttributes() (map[string]interface{}, error) { + if s.SharedAttrs == nil { + return nil, nil + } + var attrs map[string]interface{} + if err := json.Unmarshal(s.SharedAttrs, &attrs); err != nil { + return nil, err + } + return attrs, nil +} + +// ParseEventData parses the shared_data JSON into a map +func (e *Event) ParseEventData() (map[string]interface{}, error) { + if e.SharedData == nil { + return nil, nil + } + var data map[string]interface{} + if err := json.Unmarshal(e.SharedData, &data); err != nil { + return nil, err + } + return data, nil +} + +// SetSharedAttrs sets the shared_attrs from a map +func (s *StateAttributes) SetSharedAttrs(attrs map[string]interface{}) error { + if attrs == nil { + return nil + } + data, err := json.Marshal(attrs) + if err != nil { + return err + } + s.SharedAttrs = data + return nil +} + +// SetSharedData sets the shared_data from a map +func (e *EventData) SetSharedData(data map[string]interface{}) error { + if data == nil { + return nil + } + raw, err := json.Marshal(data) + if err != nil { + return err + } + e.SharedData = raw + return nil +} + +// EventRuleRequest represents a request to create or update an event rule +type EventRuleRequest struct { + EntityID *string `json:"entity_id,omitempty" validate:"omitempty,uuid"` + DeviceModelID *string `json:"device_model_id,omitempty" validate:"omitempty,uuid"` + RuleKey *string `json:"rule_key,omitempty"` + Operator *string `json:"operator,omitempty" validate:"omitempty,oneof=eq ne gt lt gte lte contains"` + Operand string `json:"operand" validate:"required"` + Status *string `json:"status,omitempty" validate:"omitempty,oneof=active inactive paused"` + IsActive *bool `json:"is_active,omitempty"` + StartTime *string `json:"start_time,omitempty" validate:"omitempty,datetime=2006-01-02T15:04:05Z07:00"` + EndTime *string `json:"end_time,omitempty" validate:"omitempty,datetime=2006-01-02T15:04:05Z07:00"` +} + +// EventRuleResponse represents an event rule response +type EventRuleResponse struct { + EventRuleID string `json:"event_rule_id"` + EntityID *string `json:"entity_id,omitempty"` + DeviceModelID *string `json:"device_model_id,omitempty"` + RuleKey *string `json:"rule_key,omitempty"` + Operator *string `json:"operator,omitempty"` + Operand string `json:"operand"` + Status *string `json:"status,omitempty"` + IsActive *bool `json:"is_active,omitempty"` + StartTime *time.Time `json:"start_time,omitempty"` + EndTime *time.Time `json:"end_time,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// EventRulesListResponse represents paginated event rules +type EventRulesListResponse struct { + Rules []EventRule `json:"rules"` + TotalCount int `json:"total_count"` + Page int `json:"page"` + PageSize int `json:"page_size"` +} diff --git a/internal/timescaledb/events.go b/internal/timescaledb/events.go new file mode 100644 index 0000000..67eabcb --- /dev/null +++ b/internal/timescaledb/events.go @@ -0,0 +1,486 @@ +package timescaledb + +import ( + "context" + "database/sql" + "fmt" + "log" + "time" + + "github.com/Space-DF/telemetry-service/internal/models" + "github.com/stephenafamo/bob" +) + +// EventType constants +const ( + EventTypeStateChanged = "state_changed" + EventTypeServiceCall = "service_call" + EventTypeAutomation = "automation_triggered" + EventTypeDeviceTriggered = "device_triggered" +) + +// Pagination constants +const ( + DefaultPage = 1 + DefaultPageSize = 20 + MaxPageSize = 100 + DefaultEventLimit = 100 +) + +// GetEventsByDevice retrieves all events for a specific entity. +func (c *Client) GetEventsByDevice(ctx context.Context, org, deviceID string, limit int) ([]models.Event, error) { + if org == "" { + return nil, fmt.Errorf("organization is required") + } + if deviceID == "" { + return nil, fmt.Errorf("device_id is required") + } + if limit <= 0 { + limit = DefaultEventLimit + } + + var events []models.Event + + err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + // Query events where the event_data contains this device_id + query := ` + SELECT e.event_id, e.event_type_id, e.data_id, e.space_slug, e.context_id_bin, + e.trigger_id, e.allow_new_event, e.time_fired_ts, et.event_type, ed.shared_data + FROM events e + JOIN event_types et ON e.event_type_id = et.event_type_id + LEFT JOIN event_data ed ON e.data_id = ed.data_id + WHERE ed.shared_data->>'device_id' = $1 + ORDER BY e.time_fired_ts DESC + LIMIT $2 + ` + + rows, err := tx.QueryContext(txCtx, query, deviceID, limit) + if err != nil { + return fmt.Errorf("failed to query events by device: %w", err) + } + defer func(){ + _ = rows.Close() + }() + + for rows.Next() { + var e models.Event + var dataID sql.NullInt64 + var slug sql.NullString + var contextID []byte + var triggerID sql.NullString + var allowNewEvent sql.NullBool + var sharedData []byte + + if err := rows.Scan(&e.EventID, &e.EventTypeID, &dataID, &slug, &contextID, &triggerID, &allowNewEvent, &e.TimeFiredTs, &e.EventType, &sharedData); err != nil { + return err + } + + if dataID.Valid { + e.DataID = &dataID.Int64 + } + if slug.Valid { + e.SpaceSlug = slug.String + } + if len(contextID) > 0 { + e.ContextID = contextID + } + if triggerID.Valid { + e.TriggerID = &triggerID.String + } + if allowNewEvent.Valid { + e.AllowNewEvent = &allowNewEvent.Bool + } + if len(sharedData) > 0 { + e.SharedData = sharedData + } + + events = append(events, e) + } + + return rows.Err() + }) + + if err != nil { + return nil, err + } + + return events, nil +} + +// ============================================================================ +// Event Rules +// ============================================================================ + +// populateEventRuleResponse populates an EventRuleResponse from request data and times +func populateEventRuleResponse(result *models.EventRuleResponse, req *models.EventRuleRequest, startTime, endTime *time.Time) { + if req.EntityID != nil { + result.EntityID = req.EntityID + } + if req.DeviceModelID != nil { + result.DeviceModelID = req.DeviceModelID + } + if req.RuleKey != nil { + result.RuleKey = req.RuleKey + } + if req.Operator != nil { + result.Operator = req.Operator + } + result.Operand = req.Operand + if req.Status != nil { + result.Status = req.Status + } + if req.IsActive != nil { + result.IsActive = req.IsActive + } + result.StartTime = startTime + result.EndTime = endTime +} + +// GetEventRules retrieves event rules with pagination +func (c *Client) GetEventRules(ctx context.Context, entityID string, page, pageSize int) ([]models.EventRule, int, error) { + if page <= 0 { + page = DefaultPage + } + if pageSize <= 0 || pageSize > MaxPageSize { + pageSize = DefaultPageSize + } + + offset := (page - 1) * pageSize + + var rules []models.EventRule + var total int + + err := c.WithOrgTx(ctx, "", func(txCtx context.Context, tx bob.Tx) error { + // Count total + countQuery := `SELECT COUNT(*) FROM event_rules` + args := []interface{}{} + + whereClause := "" + if entityID != "" { + whereClause = " WHERE entity_id = $1" + args = append(args, entityID) + } + + countQuery += whereClause + err := tx.QueryRowContext(txCtx, countQuery, args...).Scan(&total) + if err != nil { + return fmt.Errorf("failed to count event rules: %w", err) + } + + // Query rules + query := ` + SELECT er.event_rule_id, er.entity_id, er.device_model_id, + er.rule_key, er.operator, er.operand, er.status, er.is_active, + er.start_time, er.end_time, er.created_at, er.updated_at + FROM event_rules er + ` + whereClause + ` ORDER BY er.created_at DESC LIMIT $` + fmt.Sprintf("%d", len(args)+1) + ` OFFSET $` + fmt.Sprintf("%d", len(args)+2) + args = append(args, pageSize, offset) + + rows, err := tx.QueryContext(txCtx, query, args...) + if err != nil { + return fmt.Errorf("failed to query event rules: %w", err) + } + defer func() { _ = rows.Close() }() + + for rows.Next() { + var r models.EventRule + if err := rows.Scan( + &r.EventRuleID, &r.EntityID, &r.DeviceModelID, + &r.RuleKey, &r.Operator, &r.Operand, &r.Status, &r.IsActive, + &r.StartTime, &r.EndTime, &r.CreatedAt, &r.UpdatedAt, + ); err != nil { + return err + } + + rules = append(rules, r) + } + + return rows.Err() + }) + + if err != nil { + return nil, 0, err + } + + return rules, total, nil +} + +// CreateEventRule creates a new event rule +func (c *Client) CreateEventRule(ctx context.Context, req *models.EventRuleRequest) (*models.EventRuleResponse, error) { + if req == nil { + return nil, fmt.Errorf("nil request") + } + + var result models.EventRuleResponse + + err := c.WithOrgTx(ctx, "", func(txCtx context.Context, tx bob.Tx) error { + // Parse start and end times + var startTime, endTime *time.Time + if req.StartTime != nil { + t, parseErr := time.Parse(time.RFC3339, *req.StartTime) + if parseErr != nil { + return fmt.Errorf("invalid start_time format: %w", parseErr) + } + startTime = &t + } + if req.EndTime != nil { + t, parseErr := time.Parse(time.RFC3339, *req.EndTime) + if parseErr != nil { + return fmt.Errorf("invalid end_time format: %w", parseErr) + } + endTime = &t + } + + // Insert event rule + err := tx.QueryRowContext(txCtx, ` + INSERT INTO event_rules (entity_id, device_model_id, rule_key, operator, operand, status, is_active, start_time, end_time) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + RETURNING event_rule_id, created_at, updated_at + `, req.EntityID, req.DeviceModelID, req.RuleKey, req.Operator, req.Operand, + req.Status, req.IsActive, startTime, endTime).Scan( + &result.EventRuleID, &result.CreatedAt, &result.UpdatedAt, + ) + + if err != nil { + return fmt.Errorf("failed to insert event rule: %w", err) + } + populateEventRuleResponse(&result, req, startTime, endTime) + + return nil + }) + + if err != nil { + return nil, err + } + + return &result, nil +} + +// UpdateEventRule updates an existing event rule +func (c *Client) UpdateEventRule(ctx context.Context, ruleID string, req *models.EventRuleRequest) (*models.EventRuleResponse, error) { + if req == nil { + return nil, fmt.Errorf("nil request") + } + if ruleID == "" { + return nil, fmt.Errorf("rule_id is required") + } + + var result models.EventRuleResponse + + err := c.WithOrgTx(ctx, "", func(txCtx context.Context, tx bob.Tx) error { + // Parse start and end times + var startTime, endTime *time.Time + if req.StartTime != nil { + t, parseErr := time.Parse(time.RFC3339, *req.StartTime) + if parseErr != nil { + return fmt.Errorf("invalid start_time format: %w", parseErr) + } + startTime = &t + } + if req.EndTime != nil { + t, parseErr := time.Parse(time.RFC3339, *req.EndTime) + if parseErr != nil { + return fmt.Errorf("invalid end_time format: %w", parseErr) + } + endTime = &t + } + + // Update event rule + err := tx.QueryRowContext(txCtx, ` + UPDATE event_rules + SET entity_id = $1, device_model_id = $2, rule_key = $3, + operator = $4, operand = $5, status = $6, is_active = $7, + start_time = $8, end_time = $9, updated_at = NOW() + WHERE event_rule_id = $10 + RETURNING event_rule_id, created_at, updated_at + `, req.EntityID, req.DeviceModelID, req.RuleKey, req.Operator, req.Operand, + req.Status, req.IsActive, startTime, endTime, ruleID).Scan( + &result.EventRuleID, &result.CreatedAt, &result.UpdatedAt, + ) + + if err != nil { + return fmt.Errorf("failed to update event rule: %w", err) + } + populateEventRuleResponse(&result, req, startTime, endTime) + + return nil + }) + + if err != nil { + return nil, err + } + + return &result, nil +} + +// DeleteEventRule deletes an event rule +func (c *Client) DeleteEventRule(ctx context.Context, ruleID string) error { + if ruleID == "" { + return fmt.Errorf("rule_id is required") + } + + return c.WithOrgTx(ctx, "", func(txCtx context.Context, tx bob.Tx) error { + result, err := tx.ExecContext(txCtx, `DELETE FROM event_rules WHERE event_rule_id = $1`, ruleID) + if err != nil { + return fmt.Errorf("failed to delete event rule: %w", err) + } + + rows, _ := result.RowsAffected() + if rows == 0 { + return fmt.Errorf("event rule not found") + } + + return nil + }) +} + +// ruleConfig represents a single event rule configuration for seeding +type ruleConfig struct { + RuleKey string + EntityIDPattern string + Operator string + Operand string + EventType string + EventLevel string + Description string + Status string + IsActive bool +} + +// deviceModelRules represents event rules for a specific device model +type deviceModelRules struct { + DeviceModel string + DeviceModelID string + Rules []ruleConfig +} + +// SeedDefaultEventRules seeds default event rules from configuration +// This is typically called on service startup to ensure default rules exist +func (c *Client) SeedDefaultEventRules(ctx context.Context, rulesConfig interface{}) error { + var config struct { + DeviceModels []deviceModelRules + } + + // Use type assertion to handle different config types + // This allows passing either the raw YAML unmarshalled struct or our custom config type + switch cfg := rulesConfig.(type) { + case map[string]interface{}: + // Handle raw YAML map + if dms, ok := cfg["device_models"].([]interface{}); ok { + for _, dm := range dms { + dmMap, ok := dm.(map[string]interface{}) + if !ok { + continue + } + dmr := deviceModelRules{ + DeviceModel: getString(dmMap, "device_model"), + DeviceModelID: getString(dmMap, "device_model_id"), + } + if rules, ok := dmMap["rules"].([]interface{}); ok { + for _, r := range rules { + rMap, ok := r.(map[string]interface{}) + if !ok { + continue + } + dmr.Rules = append(dmr.Rules, ruleConfig{ + RuleKey: getString(rMap, "rule_key"), + EntityIDPattern: getString(rMap, "entity_id_pattern"), + Operator: getString(rMap, "operator"), + Operand: getString(rMap, "operand"), + EventType: getString(rMap, "event_type"), + EventLevel: getString(rMap, "event_level"), + Description: getString(rMap, "description"), + Status: getString(rMap, "status"), + IsActive: getBool(rMap, "is_active"), + }) + } + } + config.DeviceModels = append(config.DeviceModels, dmr) + } + } + default: + return fmt.Errorf("unsupported config type: %T", rulesConfig) + } + + // Seed rules for each device model + for _, dm := range config.DeviceModels { + if err := c.seedDeviceModelRules(ctx, dm); err != nil { + return fmt.Errorf("failed to seed rules for device model %s: %w", dm.DeviceModel, err) + } + } + + return nil +} + +// seedDeviceModelRules seeds event rules for a specific device model +func (c *Client) seedDeviceModelRules(ctx context.Context, dm deviceModelRules) error { + return c.WithOrgTx(ctx, "", func(txCtx context.Context, tx bob.Tx) error { + for _, ruleCfg := range dm.Rules { + // Check if rule already exists (by device_model_id + rule_key + operator + operand) + var existingRuleID string + checkQuery := ` + SELECT event_rule_id FROM event_rules + WHERE device_model_id = $1 AND rule_key = $2 AND operator = $3 AND operand = $4 + LIMIT 1 + ` + err := tx.QueryRowContext(txCtx, checkQuery, dm.DeviceModelID, ruleCfg.RuleKey, ruleCfg.Operator, ruleCfg.Operand).Scan(&existingRuleID) + + if err == nil { + // Rule already exists, skip + log.Printf("[EventRules] Rule already exists for %s:%s (%s %s), skipping", dm.DeviceModel, ruleCfg.RuleKey, ruleCfg.Operator, ruleCfg.Operand) + continue + } + + if err != sql.ErrNoRows { + return fmt.Errorf("failed to check existing rule: %w", err) + } + + // Create new rule + var ruleID string + var entityID *string + if ruleCfg.EntityIDPattern != "" { + entityID = &ruleCfg.EntityIDPattern + } + ruleKey := &ruleCfg.RuleKey + operator := &ruleCfg.Operator + status := &ruleCfg.Status + isActive := &ruleCfg.IsActive + + insertQuery := ` + INSERT INTO event_rules (device_model_id, entity_id, rule_key, operator, operand, status, is_active) + VALUES ($1, $2, $3, $4, $5, $6, $7) + RETURNING event_rule_id + ` + err = tx.QueryRowContext(txCtx, insertQuery, + dm.DeviceModelID, entityID, ruleKey, operator, ruleCfg.Operand, status, isActive, + ).Scan(&ruleID) + + if err != nil { + return fmt.Errorf("failed to insert event rule: %w", err) + } + + log.Printf("[EventRules] Seeded rule: %s - %s %s %s", dm.DeviceModel, ruleCfg.RuleKey, ruleCfg.Operator, ruleCfg.Operand) + } + return nil + }) +} + +// getString safely extracts a string value from a map +func getString(m map[string]interface{}, key string) string { + if v, ok := m[key]; ok { + if s, ok := v.(string); ok { + return s + } + } + return "" +} + +// getBool safely extracts a bool value from a map +func getBool(m map[string]interface{}, key string) bool { + if v, ok := m[key]; ok { + if b, ok := v.(bool); ok { + return b + } + } + return false +} diff --git a/internal/timescaledb/telemetry.go b/internal/timescaledb/telemetry.go index 8c85a54..0c53066 100644 --- a/internal/timescaledb/telemetry.go +++ b/internal/timescaledb/telemetry.go @@ -207,3 +207,36 @@ func nullUUID(id sql.NullString) any { } return nil } + +// UpdateDeviceTriggerEventType updates the trigger event type for an device +func (c *Client) UpdateDeviceTriggerEventType(ctx context.Context, deviceID, triggerEventType string) error { + if deviceID == "" { + return fmt.Errorf("device_id is required") + } + if triggerEventType == "" { + return fmt.Errorf("trigger_event_type is required") + } + + org := orgFromContext(ctx) + if org == "" { + return fmt.Errorf("organization not found in context") + } + + return c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + _, err := tx.ExecContext(txCtx, ` + UPDATE entities + SET trigger_event_type = $1, + updated_at = NOW() + WHERE id = $2 + `, triggerEventType, deviceID) + + if err != nil { + return fmt.Errorf("failed to update device trigger event type: %w", err) + } + + log.Printf("[Telemetry] Updated device trigger event type: org=%s, device_id=%s, trigger_event_type=%s", + org, deviceID, triggerEventType) + + return nil + }) +} diff --git a/pkgs/db/migrations/20251225000000_create_events_schema.sql b/pkgs/db/migrations/20251225000000_create_events_schema.sql new file mode 100644 index 0000000..897ead9 --- /dev/null +++ b/pkgs/db/migrations/20251225000000_create_events_schema.sql @@ -0,0 +1,91 @@ +-- migrate:up +-- Events Schema +CREATE TABLE IF NOT EXISTS event_types ( + event_type_id SERIAL PRIMARY KEY, + event_type TEXT NOT NULL UNIQUE, + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +-- Insert common event types +INSERT INTO event_types (event_type) VALUES + ('state_changed'), + ('automation_triggered'), + ('device_event'), + ('user_action') +ON CONFLICT (event_type) DO NOTHING; + +-- Event Data: Shared data deduplicated by hash +CREATE TABLE IF NOT EXISTS event_data ( + data_id SERIAL PRIMARY KEY, + hash BIGINT NOT NULL UNIQUE, + shared_data JSONB NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_event_data_hash ON event_data (hash); + +-- Event Rules: Rules for triggering events based on conditions +CREATE TABLE IF NOT EXISTS event_rules ( + event_rule_id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + entity_id UUID REFERENCES entities(id) ON DELETE SET NULL, + device_model_id UUID, + rule_key TEXT, -- e.g., 'battery_low', 'temperature_low' + operator VARCHAR(16) CHECK (operator IN ('eq', 'ne', 'gt', 'lt', 'gte', 'lte', 'contains')), + operand TEXT NOT NULL, + status VARCHAR(16) CHECK (status IN ('active', 'inactive', 'paused')) DEFAULT 'active', + is_active BOOLEAN DEFAULT true, + start_time TIMESTAMPTZ, + end_time TIMESTAMPTZ, + allow_new_event BOOLEAN, -- flag to control duplicate event creation + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_event_rules_entity_id ON event_rules (entity_id); +CREATE INDEX IF NOT EXISTS idx_event_rules_device_model_id ON event_rules (device_model_id); +CREATE INDEX IF NOT EXISTS idx_event_rules_status ON event_rules (status); +CREATE INDEX IF NOT EXISTS idx_event_rules_is_active ON event_rules (is_active); + +-- Events: Event occurrences linking to event_type and event_data +-- space_slug is stored here for filtering events within a space +-- entity_id links the event to a specific entity for faster queries +CREATE TABLE IF NOT EXISTS events ( + event_id BIGSERIAL PRIMARY KEY, + event_type_id INTEGER NOT NULL REFERENCES event_types(event_type_id) ON DELETE CASCADE, + data_id INTEGER REFERENCES event_data(data_id) ON DELETE SET NULL, + event_level TEXT CHECK (event_level IN ('manufacturer', 'system', 'user')), + event_rule_id UUID REFERENCES event_rules(event_rule_id) ON DELETE SET NULL, + space_slug TEXT, + entity_id UUID REFERENCES entities(id) ON DELETE SET NULL, + state_id BIGINT REFERENCES entity_states(state_id) ON DELETE SET NULL, + context_id_bin BYTEA, + trigger_id UUID, -- for future automations table reference + time_fired_ts BIGINT NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE INDEX IF NOT EXISTS idx_events_event_type_id ON events (event_type_id); +CREATE INDEX IF NOT EXISTS idx_events_event_rule_id ON events (event_rule_id); +CREATE INDEX IF NOT EXISTS idx_events_space_slug ON events (space_slug); +CREATE INDEX IF NOT EXISTS idx_events_entity_id ON events (entity_id); +CREATE INDEX IF NOT EXISTS idx_events_state_id ON events (state_id); +CREATE INDEX IF NOT EXISTS idx_events_trigger_id ON events (trigger_id); +CREATE INDEX IF NOT EXISTS idx_events_time_fired_ts ON events (time_fired_ts DESC); +CREATE INDEX IF NOT EXISTS idx_events_data_id ON events (data_id); + +-- Add event_id column to entity_states to create bidirectional linkage +-- This allows states to reference their triggering event +ALTER TABLE entity_states ADD COLUMN IF NOT EXISTS event_id BIGINT REFERENCES events(event_id) ON DELETE SET NULL; + +CREATE INDEX IF NOT EXISTS idx_entity_states_event_id ON entity_states (event_id); + +-- migrate:down + +-- Remove event_id column from entity_states +ALTER TABLE entity_states DROP COLUMN IF EXISTS event_id; + +-- Drop events schema tables (entity_states and entity_state_attributes remain) +DROP TABLE IF EXISTS events CASCADE; +DROP TABLE IF EXISTS event_rules CASCADE; +DROP TABLE IF EXISTS event_data CASCADE; +DROP TABLE IF EXISTS event_types CASCADE; From 1856eb3650b240f9ce0db1b8232fd0c82eee9bed Mon Sep 17 00:00:00 2001 From: lethanhdat762003 Date: Mon, 26 Jan 2026 22:24:20 +0700 Subject: [PATCH 04/10] refactor: move the rules to its own particular component --- configs/event_rules/{rak4630.yaml => rak4630/rules.yaml} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename configs/event_rules/{rak4630.yaml => rak4630/rules.yaml} (97%) diff --git a/configs/event_rules/rak4630.yaml b/configs/event_rules/rak4630/rules.yaml similarity index 97% rename from configs/event_rules/rak4630.yaml rename to configs/event_rules/rak4630/rules.yaml index f5ca86a..6ad3847 100644 --- a/configs/event_rules/rak4630.yaml +++ b/configs/event_rules/rak4630/rules.yaml @@ -1,7 +1,7 @@ # RAK4630 Event Rules device_model: "rak4630" device_model_id: "" # Will be resolved from device service -display_name: "RAK4630 WisBlock" +display_name: "RAK4630 Event Rules" rules: # Battery Low Warning From baa6259fe65b9b8fa8ea20f26a0e677fd7ef20ba Mon Sep 17 00:00:00 2001 From: lethanhdat762003 Date: Mon, 2 Feb 2026 17:39:10 +0700 Subject: [PATCH 05/10] refactor: update default event rules bootstrap and optimize evaluating rule keys --- cmd/telemetry/serve.go | 20 +- .../rules.yaml => rakwireless/rak4630.yaml} | 4 +- internal/api/router.go | 2 + internal/events/evaluator/evaluator.go | 238 ++++++++++++++ internal/events/loader/system_rules_loader.go | 119 +++++++ internal/events/registry/cache.go | 197 ++++++++++++ internal/events/registry/registry.go | 243 +++++++++++++++ internal/models/events.go | 92 +++--- internal/services/processor.go | 45 ++- internal/timescaledb/events.go | 293 ++++++++---------- .../20251225000000_create_events_schema.sql | 24 +- 11 files changed, 1053 insertions(+), 224 deletions(-) rename configs/event_rules/{rak4630/rules.yaml => rakwireless/rak4630.yaml} (95%) create mode 100644 internal/events/evaluator/evaluator.go create mode 100644 internal/events/loader/system_rules_loader.go create mode 100644 internal/events/registry/cache.go create mode 100644 internal/events/registry/registry.go diff --git a/cmd/telemetry/serve.go b/cmd/telemetry/serve.go index d321324..6c4532e 100644 --- a/cmd/telemetry/serve.go +++ b/cmd/telemetry/serve.go @@ -29,6 +29,7 @@ import ( alertregistry "github.com/Space-DF/telemetry-service/internal/alerts/registry" amqp "github.com/Space-DF/telemetry-service/internal/amqp/multi-tenant" "github.com/Space-DF/telemetry-service/internal/api" + "github.com/Space-DF/telemetry-service/internal/events/registry" "github.com/Space-DF/telemetry-service/internal/health" "github.com/Space-DF/telemetry-service/internal/services" "github.com/Space-DF/telemetry-service/internal/timescaledb" @@ -88,8 +89,18 @@ func cmdServe(ctx *cli.Context, logger *zap.Logger) error { } }() + // Initialize rule registry + ruleRegistry := registry.NewRuleRegistry(tsClient, logger) + + // Load default rules from YAML + if appConfig.Server.EventRulesDir != "" { + if err := ruleRegistry.LoadDefaultRulesFromDir(appConfig.Server.EventRulesDir); err != nil { + logger.Warn("Failed to load default event rules", zap.Error(err)) + } + } + // Initialize location processor - processor := services.NewLocationProcessor(tsClient, logger) + processor := services.NewLocationProcessor(tsClient, ruleRegistry, logger) // Initialize multi-tenant AMQP consumer with schema initializer consumer := amqp.NewMultiTenantConsumer(appConfig.AMQP, appConfig.OrgEvents, processor, tsClient, logger) @@ -135,12 +146,17 @@ func cmdServe(ctx *cli.Context, logger *zap.Logger) error { } }() - // Setup reload signal for alert processors + // Setup reload signal for alert processors and event rules reloadChan := make(chan os.Signal, 1) signal.Notify(reloadChan, syscall.SIGHUP) go func() { for range reloadChan { loadAlertProcessors(logger, appConfig.Server.AlertsProcessorsCfg) + if appConfig.Server.EventRulesDir != "" { + if err := ruleRegistry.ReloadDefaultRules(appConfig.Server.EventRulesDir); err != nil { + logger.Warn("Failed to reload default event rules", zap.Error(err)) + } + } } }() diff --git a/configs/event_rules/rak4630/rules.yaml b/configs/event_rules/rakwireless/rak4630.yaml similarity index 95% rename from configs/event_rules/rak4630/rules.yaml rename to configs/event_rules/rakwireless/rak4630.yaml index 6ad3847..e040cff 100644 --- a/configs/event_rules/rak4630/rules.yaml +++ b/configs/event_rules/rakwireless/rak4630.yaml @@ -1,6 +1,6 @@ # RAK4630 Event Rules -device_model: "rak4630" -device_model_id: "" # Will be resolved from device service +model: "rak4630" +model_id: "" # Will be resolved from device service display_name: "RAK4630 Event Rules" rules: diff --git a/internal/api/router.go b/internal/api/router.go index e4dc72b..f11062c 100644 --- a/internal/api/router.go +++ b/internal/api/router.go @@ -4,6 +4,7 @@ import ( "github.com/Space-DF/telemetry-service/internal/api/alerts" "github.com/Space-DF/telemetry-service/internal/api/data" "github.com/Space-DF/telemetry-service/internal/api/entities" + "github.com/Space-DF/telemetry-service/internal/api/events" "github.com/Space-DF/telemetry-service/internal/api/location" "github.com/Space-DF/telemetry-service/internal/api/widget" "github.com/Space-DF/telemetry-service/internal/config" @@ -19,4 +20,5 @@ func Setup(cfg *config.Config, e *echo.Group, logger *zap.Logger, tsClient *time alerts.RegisterRoutes(group, logger, tsClient) widget.RegisterRoutes(group, logger, tsClient) data.RegisterRoutes(group, logger, tsClient) + events.RegisterRoutes(group, cfg, logger, tsClient) } diff --git a/internal/events/evaluator/evaluator.go b/internal/events/evaluator/evaluator.go new file mode 100644 index 0000000..7aee318 --- /dev/null +++ b/internal/events/evaluator/evaluator.go @@ -0,0 +1,238 @@ +package evaluator + +import ( + "fmt" + "strconv" + "strings" + + "github.com/Space-DF/telemetry-service/internal/events/loader" + "github.com/Space-DF/telemetry-service/internal/models" + "go.uber.org/zap" +) + +// Evaluator handles rule evaluation logic +type Evaluator struct { + logger *zap.Logger +} + +// NewEvaluator creates a new rule evaluator +func NewEvaluator(logger *zap.Logger) *Evaluator { + return &Evaluator{ + logger: logger, + } +} + +// EvaluateRule evaluates a single YAML rule against an entity +func (e *Evaluator) EvaluateRule(rule loader.YAMLRule, entity models.TelemetryEntity) *models.MatchedEvent { + // Skip inactive rules + if !rule.IsActive || rule.Status != "active" { + return nil + } + + // Check if rule applies to this entity (if entity_id_pattern is specified) + if rule.EntityIDPattern != "" { + if !contains(entity.EntityID, rule.EntityIDPattern) && entity.EntityID != rule.EntityIDPattern { + return nil + } + } + + // Get the value from entity attributes based on rule_key + value, exists := e.getEntityValue(entity, rule.RuleKey) + if !exists { + return nil + } + + // Parse the operand as float64 + operand, err := strconv.ParseFloat(rule.Operand, 64) + if err != nil { + e.logger.Warn("Failed to parse operand as float64", + zap.String("rule_key", rule.RuleKey), + zap.String("operand", rule.Operand), + zap.Error(err)) + return nil + } + + // Evaluate the condition + matched := e.compareValues(value, operand, rule.Operator) + if !matched { + return nil + } + + return &models.MatchedEvent{ + EntityID: entity.EntityID, + EntityType: entity.EntityType, + RuleKey: rule.RuleKey, + EventType: rule.EventType, + EventLevel: rule.EventLevel, + Description: rule.Description, + Value: value, + Threshold: operand, + Operator: rule.Operator, + RuleSource: "default", + } +} + +// EvaluateRuleDB evaluates a database rule against an entity +func (e *Evaluator) EvaluateRuleDB(rule models.EventRule, entity models.TelemetryEntity) *models.MatchedEvent { + // Skip inactive rules + if rule.IsActive != nil && !*rule.IsActive { + return nil + } + if rule.Status != nil && *rule.Status != "active" { + return nil + } + + // Get rule key from database rule + ruleKey := "" + if rule.RuleKey != nil { + ruleKey = *rule.RuleKey + } + if ruleKey == "" { + return nil + } + + // Get the value from entity attributes based on rule_key + value, exists := e.getEntityValue(entity, ruleKey) + if !exists { + return nil + } + + // Parse the operand as float64 + operand, err := strconv.ParseFloat(rule.Operand, 64) + if err != nil { + e.logger.Warn("Failed to parse operand as float64", + zap.String("rule_key", ruleKey), + zap.String("operand", rule.Operand), + zap.Error(err)) + return nil + } + + // Get operator + operator := "" + if rule.Operator != nil { + operator = *rule.Operator + } + if operator == "" { + operator = "eq" // default + } + + // Evaluate the condition + matched := e.compareValues(value, operand, operator) + if !matched { + return nil + } + + // Build description + description := fmt.Sprintf("Rule %s matched: %.2f %s %.2f", ruleKey, value, operator, operand) + + return &models.MatchedEvent{ + EntityID: entity.EntityID, + EntityType: entity.EntityType, + RuleKey: ruleKey, + EventType: "device_event", + EventLevel: "automation", + Description: description, + Value: value, + Threshold: operand, + Operator: operator, + RuleSource: "automation", + } +} + +// getEntityValue extracts a numeric value from an entity based on the rule key +func (e *Evaluator) getEntityValue(entity models.TelemetryEntity, ruleKey string) (float64, bool) { + // Try to get value from Attributes map first + if entity.Attributes != nil { + if val, ok := entity.Attributes[ruleKey]; ok { + return e.parseFloat64(val) + } + } + + // Try to get value from State (which could be a map or direct value) + if entity.State != nil { + switch s := entity.State.(type) { + case map[string]interface{}: + if val, ok := s[ruleKey]; ok { + return e.parseFloat64(val) + } + default: + // Check if rule_key matches or is a prefix of entity_type + // e.g., rule_key="battery_v" matches entity_type="battery" + // rule_key="temperature" matches entity_type="temperature" + if e.isRuleKeyRelevant(ruleKey, entity.EntityType, entity.Name) { + if val, ok := e.parseFloat64(s); ok { + return val, true + } + } + } + } + + return 0, false +} + +// isRuleKeyRelevant checks if a rule key is relevant to an entity +func (e *Evaluator) isRuleKeyRelevant(ruleKey, entityType, entityName string) bool { + if ruleKey == entityType { + return true + } + + if strings.HasPrefix(ruleKey, entityType) { + return true + } + + if strings.HasPrefix(entityType, ruleKey) { + return true + } + + return false +} + +// parseFloat64 converts various types to float64 +func (e *Evaluator) parseFloat64(value interface{}) (float64, bool) { + switch v := value.(type) { + case float64: + return v, true + case float32: + return float64(v), true + case int: + return float64(v), true + case int64: + return float64(v), true + case int32: + return float64(v), true + case string: + f, err := strconv.ParseFloat(v, 64) + if err != nil { + return 0, false + } + return f, true + default: + return 0, false + } +} + +// compareValues performs comparison based on operator +func (e *Evaluator) compareValues(value, threshold float64, operator string) bool { + switch operator { + case "gt": + return value > threshold + case "lt": + return value < threshold + case "gte": + return value >= threshold + case "lte": + return value <= threshold + case "eq": + return value == threshold + case "ne": + return value != threshold + default: + e.logger.Warn("Unknown operator", zap.String("operator", operator)) + return false + } +} + +// contains checks if the target string contains the substring (case-insensitive) +func contains(str, substr string) bool { + return strings.Contains(strings.ToLower(str), strings.ToLower(substr)) +} diff --git a/internal/events/loader/system_rules_loader.go b/internal/events/loader/system_rules_loader.go new file mode 100644 index 0000000..7f6be2a --- /dev/null +++ b/internal/events/loader/system_rules_loader.go @@ -0,0 +1,119 @@ +package loader + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "gopkg.in/yaml.v3" +) + +// YAMLRule represents a single event rule from YAML configuration +type YAMLRule struct { + RuleKey string `yaml:"rule_key"` + EntityIDPattern string `yaml:"entity_id_pattern"` + Operator string `yaml:"operator"` + Operand string `yaml:"operand"` + EventType string `yaml:"event_type"` + EventLevel string `yaml:"event_level"` + Description string `yaml:"description"` + Status string `yaml:"status"` + IsActive bool `yaml:"is_active"` +} + +// DeviceModelRules represents event rules for a specific device model +type DeviceModelRules struct { + Brand string `yaml:"brand"` // e.g., "rakwireless" + Model string `yaml:"model"` // e.g., "rak4630" + ModelID string `yaml:"model_id"` // Resolved from device service + DisplayName string `yaml:"display_name"` + Rules []YAMLRule `yaml:"rules"` +} + +// LoadSystemDefaultRules loads YAML files from brand/model directory structure +// These are system default rules that apply to all devices of a specific brand/model +func LoadSystemDefaultRules(dir string) (map[string]*DeviceModelRules, error) { + result := make(map[string]*DeviceModelRules) + + // Walk through the directory structure + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip directories and non-YAML files + if info.IsDir() { + return nil + } + + ext := strings.ToLower(filepath.Ext(path)) + if ext != ".yaml" && ext != ".yml" { + return nil + } + + // Load the device model rules from this file + rules, err := loadDeviceModelRules(path, dir) + if err != nil { + // Log warning but continue loading other files + fmt.Printf("Warning: failed to load %s: %v\n", path, err) + return nil + } + + // Index by brand/model + key := fmt.Sprintf("%s/%s", strings.ToLower(rules.Brand), strings.ToLower(rules.Model)) + result[key] = rules + + return nil + }) + + if err != nil { + return nil, fmt.Errorf("failed to walk directory: %w", err) + } + + if len(result) == 0 { + return nil, fmt.Errorf("no valid event rules found in directory: %s", dir) + } + + return result, nil +} + +// loadDeviceModelRules loads rules from a single YAML file +func loadDeviceModelRules(filePath, baseDir string) (*DeviceModelRules, error) { + data, err := os.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w", err) + } + + var rules DeviceModelRules + if err := yaml.Unmarshal(data, &rules); err != nil { + return nil, fmt.Errorf("failed to parse YAML: %w", err) + } + + // Validate required fields + if rules.Brand == "" { + return nil, fmt.Errorf("missing required field: brand") + } + if rules.Model == "" { + return nil, fmt.Errorf("missing required field: model") + } + + // Set defaults + if rules.DisplayName == "" { + rules.DisplayName = fmt.Sprintf("%s %s Rules", strings.Title(rules.Brand), strings.ToUpper(rules.Model)) + } + + for i := range rules.Rules { + if rules.Rules[i].Status == "" { + rules.Rules[i].Status = "active" + } + } + + return &rules, nil +} + +// GetRulesForDevice retrieves rules for a specific brand/model combination +func GetRulesForDevice(loadedRules map[string]*DeviceModelRules, brand, model string) *DeviceModelRules { + key := fmt.Sprintf("%s/%s", strings.ToLower(brand), strings.ToLower(model)) + return loadedRules[key] +} diff --git a/internal/events/registry/cache.go b/internal/events/registry/cache.go new file mode 100644 index 0000000..16dcc70 --- /dev/null +++ b/internal/events/registry/cache.go @@ -0,0 +1,197 @@ +package registry + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "github.com/Space-DF/telemetry-service/internal/models" + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "go.uber.org/zap" +) + +const ( + // DefaultCacheTTL is the default time-to-live for cached device rules + DefaultCacheTTL = 5 * time.Minute + // DefaultCacheCleanupInterval is how often to clean expired cache entries + DefaultCacheCleanupInterval = 1 * time.Minute +) + +// DeviceRulesCacheEntry represents a cached entry for device automation rules +type DeviceRulesCacheEntry struct { + Rules []models.EventRule + CachedAt time.Time + ExpiresAt time.Time +} + +// DeviceRulesCache manages caching of device automation rules +type DeviceRulesCache struct { + cache map[string]*DeviceRulesCacheEntry // key: deviceID + mu sync.RWMutex + ttl time.Duration + db *timescaledb.Client + logger *zap.Logger + + // Metrics (atomic counters for thread safety) + hits atomic.Int64 + misses atomic.Int64 + + // Cleanup management + ticker *time.Ticker + stopped chan struct{} +} + +// NewDeviceRulesCache creates a new device rules cache +func NewDeviceRulesCache(db *timescaledb.Client, logger *zap.Logger) *DeviceRulesCache { + return &DeviceRulesCache{ + cache: make(map[string]*DeviceRulesCacheEntry), + ttl: DefaultCacheTTL, + ticker: time.NewTicker(DefaultCacheCleanupInterval), + stopped: make(chan struct{}), + db: db, + logger: logger, + } +} + +// Start begins the background cleanup goroutine +func (c *DeviceRulesCache) Start() { + go func() { + for { + select { + case <-c.ticker.C: + c.cleanupExpired() + case <-c.stopped: + c.ticker.Stop() + return + } + } + }() +} + +// Stop stops the cache cleanup goroutine +func (c *DeviceRulesCache) Stop() { + close(c.stopped) +} + +// Get retrieves device automation rules from cache or database +func (c *DeviceRulesCache) Get(ctx context.Context, deviceID string) []models.EventRule { + now := time.Now() + + // Try cache first (read lock) + c.mu.RLock() + entry, found := c.cache[deviceID] + c.mu.RUnlock() + + if found && now.Before(entry.ExpiresAt) { + // Cache hit - valid entry + c.hits.Add(1) + c.logger.Debug("Device rules cache hit", + zap.String("device_id", deviceID), + zap.Int("rule_count", len(entry.Rules))) + return entry.Rules + } + + // Cache miss or expired + c.misses.Add(1) + + if found { + c.logger.Debug("Device rules cache expired", + zap.String("device_id", deviceID), + zap.Time("expired_at", entry.ExpiresAt)) + } + + // Fetch from database + rules, err := c.db.GetActiveRulesForDevice(ctx, deviceID) + if err != nil { + c.logger.Error("Failed to fetch automation rules for device", + zap.String("device_id", deviceID), + zap.Error(err)) + return nil + } + + // Store in cache (write lock) + entry = &DeviceRulesCacheEntry{ + Rules: rules, + CachedAt: now, + ExpiresAt: now.Add(c.ttl), + } + + c.mu.Lock() + c.cache[deviceID] = entry + c.mu.Unlock() + + c.logger.Debug("Cached device automation rules", + zap.String("device_id", deviceID), + zap.Int("rule_count", len(rules)), + zap.Duration("ttl", c.ttl)) + + return rules +} + +// Invalidate removes cached rules for a specific device +// Call this when automation rules are created, updated, or deleted +func (c *DeviceRulesCache) Invalidate(deviceID string) { + c.mu.Lock() + delete(c.cache, deviceID) + c.mu.Unlock() + + c.logger.Info("Invalidated device automation rules cache", + zap.String("device_id", deviceID)) +} + +// InvalidateAll clears the entire cache +func (c *DeviceRulesCache) InvalidateAll() { + c.mu.Lock() + c.cache = make(map[string]*DeviceRulesCacheEntry) + c.mu.Unlock() + + c.logger.Info("Invalidated all device automation rules cache") +} + +// GetMetrics returns cache performance metrics +func (c *DeviceRulesCache) GetMetrics() map[string]interface{} { + hits := c.hits.Load() + misses := c.misses.Load() + total := hits + misses + + hitRate := float64(0) + if total > 0 { + hitRate = float64(hits) / float64(total) * 100 + } + + c.mu.RLock() + cacheSize := len(c.cache) + c.mu.RUnlock() + + return map[string]interface{}{ + "hits": hits, + "misses": misses, + "total": total, + "hit_rate": hitRate, + "cache_size": cacheSize, + "ttl_seconds": c.ttl.Seconds(), + } +} + +// cleanupExpired removes expired entries from the cache +func (c *DeviceRulesCache) cleanupExpired() { + now := time.Now() + expiredCount := 0 + + c.mu.Lock() + defer c.mu.Unlock() + + for deviceID, entry := range c.cache { + if now.After(entry.ExpiresAt) { + delete(c.cache, deviceID) + expiredCount++ + } + } + + if expiredCount > 0 { + c.logger.Debug("Cleaned expired device rules cache entries", + zap.Int("expired_count", expiredCount), + zap.Int("remaining_cache_size", len(c.cache))) + } +} diff --git a/internal/events/registry/registry.go b/internal/events/registry/registry.go new file mode 100644 index 0000000..364739d --- /dev/null +++ b/internal/events/registry/registry.go @@ -0,0 +1,243 @@ +package registry + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/Space-DF/telemetry-service/internal/events/evaluator" + "github.com/Space-DF/telemetry-service/internal/events/loader" + "github.com/Space-DF/telemetry-service/internal/models" + "github.com/Space-DF/telemetry-service/internal/timescaledb" + "go.uber.org/zap" +) + +// RuleRegistry manages event rules from both YAML files and database +type RuleRegistry struct { + // Default rules from YAML (key: "brand/model" e.g., "rakwireless/rak4630") + defaultRules map[string]*loader.DeviceModelRules + defaultRulesMu sync.RWMutex + + // Cache for device automation rules + cache *DeviceRulesCache + + evaluator *evaluator.Evaluator + db *timescaledb.Client + logger *zap.Logger +} + +// NewRuleRegistry creates a new rule registry +func NewRuleRegistry(db *timescaledb.Client, logger *zap.Logger) *RuleRegistry { + r := &RuleRegistry{ + defaultRules: make(map[string]*loader.DeviceModelRules), + cache: NewDeviceRulesCache(db, logger), + evaluator: evaluator.NewEvaluator(logger), + db: db, + logger: logger, + } + + // Start background cache cleanup + r.cache.Start() + + return r +} + +// LoadDefaultRulesFromDir loads system default event rules from YAML files organized by brand/model +func (r *RuleRegistry) LoadDefaultRulesFromDir(dir string) error { + r.defaultRulesMu.Lock() + defer r.defaultRulesMu.Unlock() + + rules, err := loader.LoadSystemDefaultRules(dir) + if err != nil { + return fmt.Errorf("failed to load default rules from directory: %w", err) + } + + r.defaultRules = rules + + // Log loaded rules + for _, dm := range rules { + r.logger.Info("Loaded default event rules", + zap.String("brand", dm.Brand), + zap.String("model", dm.Model), + zap.Int("rule_count", len(dm.Rules))) + } + + r.logger.Info("Default event rules loaded successfully", + zap.Int("device_models", len(rules))) + + return nil +} + +// This function is the core of the rule evaluation process, It checks if there's any custom automation event rules that +// Created by the user for the the specific device. If there are, it evaluates those first. If not, it falls back to the default system rules +func (r *RuleRegistry) Evaluate(ctx context.Context, deviceID, brand, model string, entities []models.TelemetryEntity) []models.MatchedEvent { + var matchedEvents []models.MatchedEvent + matchedRuleKeys := make(map[string]bool) + + if len(entities) == 0 { + return matchedEvents + } + + // Try to get automation rules from cache first + customRules := r.cache.Get(ctx, deviceID) + + // Evaluate custom automation rules if they exist + if len(customRules) > 0 { + r.logger.Debug("Using custom automation rules for device", + zap.String("device_id", deviceID), + zap.Int("rule_count", len(customRules))) + + // Group rules by rule_key for O(1) lookup + rulesByKey := r.groupRulesByKey(customRules) + + // Match entity attributes to rules by rule_key + for _, entity := range entities { + // Track which rule_keys we've processed for this entity + processedKeys := make(map[string]bool) + + // Check each attribute in the entity + if entity.Attributes != nil { + for attrKey := range entity.Attributes { + // Skip if we already processed a rule with this key + if processedKeys[attrKey] { + continue + } + processedKeys[attrKey] = true + + // Find rules that match this attribute key + if rules, exists := rulesByKey[attrKey]; exists { + for _, rule := range rules { + if matched := r.evaluator.EvaluateRuleDB(rule, entity); matched != nil { + matchedEvents = append(matchedEvents, *matched) + matchedRuleKeys[matched.RuleKey] = true + } + } + } + } + } + + // Also check entity_type for state-based entities + if entity.EntityType != "" { + if rules, exists := rulesByKey[entity.EntityType]; exists { + if !processedKeys[entity.EntityType] { + processedKeys[entity.EntityType] = true + for _, rule := range rules { + if matched := r.evaluator.EvaluateRuleDB(rule, entity); matched != nil { + matchedEvents = append(matchedEvents, *matched) + matchedRuleKeys[matched.RuleKey] = true + } + } + } + } + } + } + } + + // Evaluate default system rules + // Only for rule_keys that didn't match custom automation rules + r.defaultRulesMu.RLock() + key := fmt.Sprintf("%s/%s", strings.ToLower(brand), strings.ToLower(model)) + defaultRules, exists := r.defaultRules[key] + r.defaultRulesMu.RUnlock() + + if exists { + // Group default rules by rule_key + defaultRulesByKey := make(map[string][]loader.YAMLRule) + for _, rule := range defaultRules.Rules { + defaultRulesByKey[rule.RuleKey] = append(defaultRulesByKey[rule.RuleKey], rule) + } + + for _, entity := range entities { + // Track processed keys to avoid duplicate evaluations + processedKeys := make(map[string]bool) + + // Check entity attributes + if entity.Attributes != nil { + for attrKey := range entity.Attributes { + // Skip if custom automation rule already matched for this rule_key + if matchedRuleKeys[attrKey] { + continue + } + processedKeys[attrKey] = true + + // Find default rules for this attribute + if rules, exists := defaultRulesByKey[attrKey]; exists { + for _, rule := range rules { + if matched := r.evaluator.EvaluateRule(rule, entity); matched != nil { + matchedEvents = append(matchedEvents, *matched) + } + } + } + } + } + + // Check entity_type for state-based entities + if entity.EntityType != "" { + // Skip if custom automation rule already matched for this rule_key + if !matchedRuleKeys[entity.EntityType] { + if rules, exists := defaultRulesByKey[entity.EntityType]; exists { + for _, rule := range rules { + if matched := r.evaluator.EvaluateRule(rule, entity); matched != nil { + matchedEvents = append(matchedEvents, *matched) + } + } + } + } + } + } + } + + return matchedEvents +} + +// groupRulesByKey groups database rules by rule_key for efficient O(1) lookup +func (r *RuleRegistry) groupRulesByKey(rules []models.EventRule) map[string][]models.EventRule { + rulesByKey := make(map[string][]models.EventRule) + for _, rule := range rules { + if rule.RuleKey != nil && *rule.RuleKey != "" { + rulesByKey[*rule.RuleKey] = append(rulesByKey[*rule.RuleKey], rule) + } + } + return rulesByKey +} + +// GetDefaultRules returns all loaded default rules (for debugging/inspection) +func (r *RuleRegistry) GetDefaultRules() map[string]*loader.DeviceModelRules { + r.defaultRulesMu.RLock() + defer r.defaultRulesMu.RUnlock() + + // Return a copy to avoid race conditions + result := make(map[string]*loader.DeviceModelRules, len(r.defaultRules)) + for k, v := range r.defaultRules { + result[k] = v + } + return result +} + +// ReloadDefaultRules reloads default rules from the configured directory +func (r *RuleRegistry) ReloadDefaultRules(dir string) error { + r.logger.Info("Reloading default event rules", zap.String("dir", dir)) + return r.LoadDefaultRulesFromDir(dir) +} + +// InvalidateDeviceCache removes cached rules for a specific device +// Call this when automation rules are created, updated, or deleted for a device +func (r *RuleRegistry) InvalidateDeviceCache(deviceID string) { + r.cache.Invalidate(deviceID) +} + +// InvalidateAllDeviceCache clears the entire device rules cache +func (r *RuleRegistry) InvalidateAllDeviceCache() { + r.cache.InvalidateAll() +} + +// GetCacheMetrics returns cache performance metrics +func (r *RuleRegistry) GetCacheMetrics() map[string]interface{} { + return r.cache.GetMetrics() +} + +// Stop stops the registry and cleanup goroutines +func (r *RuleRegistry) Stop() { + r.cache.Stop() +} diff --git a/internal/models/events.go b/internal/models/events.go index fc0a19c..f5b5cd1 100644 --- a/internal/models/events.go +++ b/internal/models/events.go @@ -20,11 +20,10 @@ type EventData struct { CreatedAt time.Time `json:"created_at" db:"created_at"` } -// EventRule represents a rule for triggering events based on conditions +// EventRule represents an automation rule for triggering events based on conditions type EventRule struct { EventRuleID string `json:"event_rule_id" db:"event_rule_id"` - EntityID *string `json:"entity_id,omitempty" db:"entity_id"` - DeviceModelID *string `json:"device_model_id,omitempty" db:"device_model_id"` + DeviceID *string `json:"device_id,omitempty" db:"device_id"` RuleKey *string `json:"rule_key,omitempty" db:"rule_key"` // e.g., 'battery_low', 'temperature_low' Operator *string `json:"operator,omitempty" db:"operator"` // eq, ne, gt, lt, gte, lte, contains Operand string `json:"operand" db:"operand"` @@ -36,12 +35,47 @@ type EventRule struct { UpdatedAt time.Time `json:"updated_at" db:"updated_at"` } +// EventRuleRequest represents a request to create or update an event rule +type EventRuleRequest struct { + DeviceID *string `json:"device_id,omitempty" validate:"omitempty,uuid"` + RuleKey *string `json:"rule_key,omitempty" validate:"required"` + Operator *string `json:"operator,omitempty" validate:"omitempty,oneof=eq ne gt lt gte lte contains"` + Operand string `json:"operand" validate:"required"` + Status *string `json:"status,omitempty" validate:"omitempty,oneof=active inactive paused"` + IsActive *bool `json:"is_active,omitempty"` + StartTime *string `json:"start_time,omitempty" validate:"omitempty,datetime=2006-01-02T15:04:05Z07:00"` + EndTime *string `json:"end_time,omitempty" validate:"omitempty,datetime=2006-01-02T15:04:05Z07:00"` +} + +// EventRuleResponse represents an event rule response +type EventRuleResponse struct { + EventRuleID string `json:"event_rule_id"` + DeviceID *string `json:"device_id,omitempty"` + RuleKey *string `json:"rule_key,omitempty"` + Operator *string `json:"operator,omitempty"` + Operand string `json:"operand"` + Status *string `json:"status,omitempty"` + IsActive *bool `json:"is_active,omitempty"` + StartTime *time.Time `json:"start_time,omitempty"` + EndTime *time.Time `json:"end_time,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// EventRulesListResponse represents a paginated list of event rules +type EventRulesListResponse struct { + Rules []EventRule `json:"rules"` + TotalCount int `json:"total_count"` + Page int `json:"page"` + PageSize int `json:"page_size"` +} + // Event represents an event occurrence type Event struct { EventID int64 `json:"event_id" db:"event_id"` EventTypeID int `json:"event_type_id" db:"event_type_id"` DataID *int64 `json:"data_id,omitempty" db:"data_id"` - EventLevel *string `json:"event_level,omitempty" db:"event_level"` // manufacturer, system, user + EventLevel *string `json:"event_level,omitempty" db:"event_level"` // manufacturer, system, automation EventRuleID *string `json:"event_rule_id,omitempty" db:"event_rule_id"` SpaceSlug string `json:"space_slug,omitempty" db:"space_slug"` EntityID *string `json:"entity_id,omitempty" db:"entity_id"` @@ -99,7 +133,6 @@ type StateChangeRequest struct { TimeFiredTs *int64 `json:"time_fired_ts,omitempty"` ContextID []byte `json:"context_id,omitempty"` TriggerID *string `json:"trigger_id,omitempty"` // for future automations reference - AllowNewEvent *bool `json:"allow_new_event,omitempty"` // flag to control duplicate event creation } // StateHistoryResponse represents historical state data for an entity @@ -141,7 +174,6 @@ type EventDetail struct { EntityID *string `json:"entity_id,omitempty"` StateID *int64 `json:"state_id,omitempty"` TriggerID *string `json:"trigger_id,omitempty"` - AllowNewEvent *bool `json:"allow_new_event,omitempty"` TimeFired time.Time `json:"time_fired"` EventData map[string]interface{} `json:"event_data,omitempty"` ContextID []byte `json:"context_id,omitempty"` @@ -215,39 +247,17 @@ func (e *EventData) SetSharedData(data map[string]interface{}) error { return nil } -// EventRuleRequest represents a request to create or update an event rule -type EventRuleRequest struct { - EntityID *string `json:"entity_id,omitempty" validate:"omitempty,uuid"` - DeviceModelID *string `json:"device_model_id,omitempty" validate:"omitempty,uuid"` - RuleKey *string `json:"rule_key,omitempty"` - Operator *string `json:"operator,omitempty" validate:"omitempty,oneof=eq ne gt lt gte lte contains"` - Operand string `json:"operand" validate:"required"` - Status *string `json:"status,omitempty" validate:"omitempty,oneof=active inactive paused"` - IsActive *bool `json:"is_active,omitempty"` - StartTime *string `json:"start_time,omitempty" validate:"omitempty,datetime=2006-01-02T15:04:05Z07:00"` - EndTime *string `json:"end_time,omitempty" validate:"omitempty,datetime=2006-01-02T15:04:05Z07:00"` -} - -// EventRuleResponse represents an event rule response -type EventRuleResponse struct { - EventRuleID string `json:"event_rule_id"` - EntityID *string `json:"entity_id,omitempty"` - DeviceModelID *string `json:"device_model_id,omitempty"` - RuleKey *string `json:"rule_key,omitempty"` - Operator *string `json:"operator,omitempty"` - Operand string `json:"operand"` - Status *string `json:"status,omitempty"` - IsActive *bool `json:"is_active,omitempty"` - StartTime *time.Time `json:"start_time,omitempty"` - EndTime *time.Time `json:"end_time,omitempty"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` -} - -// EventRulesListResponse represents paginated event rules -type EventRulesListResponse struct { - Rules []EventRule `json:"rules"` - TotalCount int `json:"total_count"` - Page int `json:"page"` - PageSize int `json:"page_size"` +// MatchedEvent represents an event rule that matched evaluation +type MatchedEvent struct { + EntityID string `json:"entity_id"` + EntityType string `json:"entity_type"` + RuleKey string `json:"rule_key"` + EventType string `json:"event_type"` + EventLevel string `json:"event_level"` + Description string `json:"description"` + Value float64 `json:"value"` + Threshold float64 `json:"threshold"` + Operator string `json:"operator"` + RuleSource string `json:"rule_source"` // "default" or "automation" + Timestamp int64 `json:"timestamp"` // Unix timestamp in milliseconds } diff --git a/internal/services/processor.go b/internal/services/processor.go index a6ea578..652cbb2 100644 --- a/internal/services/processor.go +++ b/internal/services/processor.go @@ -4,7 +4,9 @@ import ( "context" "fmt" "sync/atomic" + "time" + "github.com/Space-DF/telemetry-service/internal/events/registry" "github.com/Space-DF/telemetry-service/internal/models" timescaledb "github.com/Space-DF/telemetry-service/internal/timescaledb" "go.uber.org/zap" @@ -12,8 +14,9 @@ import ( // LocationProcessor processes device location messages and stores them in Psql type LocationProcessor struct { - tsClient *timescaledb.Client - logger *zap.Logger + tsClient *timescaledb.Client + ruleRegistry *registry.RuleRegistry + logger *zap.Logger // Counters for monitoring processedCount atomic.Int64 @@ -22,10 +25,11 @@ type LocationProcessor struct { } // NewLocationProcessor creates a new location processor -func NewLocationProcessor(tsClient *timescaledb.Client, logger *zap.Logger) *LocationProcessor { +func NewLocationProcessor(tsClient *timescaledb.Client, ruleRegistry *registry.RuleRegistry, logger *zap.Logger) *LocationProcessor { return &LocationProcessor{ - tsClient: tsClient, - logger: logger, + tsClient: tsClient, + ruleRegistry: ruleRegistry, + logger: logger, } } @@ -147,6 +151,37 @@ func (p *LocationProcessor) ProcessTelemetry(ctx context.Context, payload *model return err } + // Evaluate rules and create events for matched rules + if p.ruleRegistry != nil { + matchedEvents := p.ruleRegistry.Evaluate(ctx, + payload.DeviceID, + payload.DeviceInfo.Manufacturer, + payload.DeviceInfo.Model, + payload.Entities) + + for _, event := range matchedEvents { + // Set timestamp to current time if not set + if event.Timestamp == 0 { + event.Timestamp = time.Now().UnixMilli() + } + if err := p.tsClient.CreateEvent(ctx, payload.Organization, &event, payload.SpaceSlug); err != nil { + p.logger.Error("Failed to create event", + zap.Error(err), + zap.String("entity_id", event.EntityID), + zap.String("rule_key", event.RuleKey)) + } else { + p.logger.Info("Event created from rule match", + zap.String("entity_id", event.EntityID), + zap.String("rule_key", event.RuleKey), + zap.String("event_type", event.EventType), + zap.String("rule_source", event.RuleSource), + zap.String("event_level", event.EventLevel), + zap.Float64("value", event.Value), + zap.Float64("threshold", event.Threshold)) + } + } + } + return nil } diff --git a/internal/timescaledb/events.go b/internal/timescaledb/events.go index 67eabcb..bded5ba 100644 --- a/internal/timescaledb/events.go +++ b/internal/timescaledb/events.go @@ -4,7 +4,6 @@ import ( "context" "database/sql" "fmt" - "log" "time" "github.com/Space-DF/telemetry-service/internal/models" @@ -113,11 +112,8 @@ func (c *Client) GetEventsByDevice(ctx context.Context, org, deviceID string, li // populateEventRuleResponse populates an EventRuleResponse from request data and times func populateEventRuleResponse(result *models.EventRuleResponse, req *models.EventRuleRequest, startTime, endTime *time.Time) { - if req.EntityID != nil { - result.EntityID = req.EntityID - } - if req.DeviceModelID != nil { - result.DeviceModelID = req.DeviceModelID + if req.DeviceID != nil { + result.DeviceID = req.DeviceID } if req.RuleKey != nil { result.RuleKey = req.RuleKey @@ -137,7 +133,7 @@ func populateEventRuleResponse(result *models.EventRuleResponse, req *models.Eve } // GetEventRules retrieves event rules with pagination -func (c *Client) GetEventRules(ctx context.Context, entityID string, page, pageSize int) ([]models.EventRule, int, error) { +func (c *Client) GetEventRules(ctx context.Context, deviceID string, page, pageSize int) ([]models.EventRule, int, error) { if page <= 0 { page = DefaultPage } @@ -150,15 +146,20 @@ func (c *Client) GetEventRules(ctx context.Context, entityID string, page, pageS var rules []models.EventRule var total int - err := c.WithOrgTx(ctx, "", func(txCtx context.Context, tx bob.Tx) error { + org := orgFromContext(ctx) + if org == "" { + return nil, 0, fmt.Errorf("organization not found in context") + } + + err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { // Count total countQuery := `SELECT COUNT(*) FROM event_rules` args := []interface{}{} whereClause := "" - if entityID != "" { - whereClause = " WHERE entity_id = $1" - args = append(args, entityID) + if deviceID != "" { + whereClause = " WHERE device_id = $1" + args = append(args, deviceID) } countQuery += whereClause @@ -169,9 +170,8 @@ func (c *Client) GetEventRules(ctx context.Context, entityID string, page, pageS // Query rules query := ` - SELECT er.event_rule_id, er.entity_id, er.device_model_id, - er.rule_key, er.operator, er.operand, er.status, er.is_active, - er.start_time, er.end_time, er.created_at, er.updated_at + SELECT er.event_rule_id, er.device_id, er.rule_key, er.operator, er.operand, + er.status, er.is_active, er.start_time, er.end_time, er.created_at, er.updated_at FROM event_rules er ` + whereClause + ` ORDER BY er.created_at DESC LIMIT $` + fmt.Sprintf("%d", len(args)+1) + ` OFFSET $` + fmt.Sprintf("%d", len(args)+2) args = append(args, pageSize, offset) @@ -185,9 +185,8 @@ func (c *Client) GetEventRules(ctx context.Context, entityID string, page, pageS for rows.Next() { var r models.EventRule if err := rows.Scan( - &r.EventRuleID, &r.EntityID, &r.DeviceModelID, - &r.RuleKey, &r.Operator, &r.Operand, &r.Status, &r.IsActive, - &r.StartTime, &r.EndTime, &r.CreatedAt, &r.UpdatedAt, + &r.EventRuleID, &r.DeviceID, &r.RuleKey, &r.Operator, &r.Operand, + &r.Status, &r.IsActive, &r.StartTime, &r.EndTime, &r.CreatedAt, &r.UpdatedAt, ); err != nil { return err } @@ -205,6 +204,59 @@ func (c *Client) GetEventRules(ctx context.Context, entityID string, page, pageS return rules, total, nil } +// GetActiveRulesForDevice retrieves active automation rules for a specific device +// Returns only device-specific automation rules created by users +// If no automation rules exist, the caller should fall back to default system rules +func (c *Client) GetActiveRulesForDevice(ctx context.Context, deviceID string) ([]models.EventRule, error) { + var rules []models.EventRule + + org := orgFromContext(ctx) + if org == "" { + return nil, fmt.Errorf("organization not found in context") + } + + err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + // Query automation rules for this specific device only + // Filter by time range to exclude expired rules + query := ` + SELECT er.event_rule_id, er.device_id, er.rule_key, er.operator, er.operand, + er.status, er.is_active, er.start_time, er.end_time, er.created_at, er.updated_at + FROM event_rules er + WHERE er.is_active = true + AND er.device_id = $1 + AND (er.start_time IS NULL OR er.start_time <= NOW()) + AND (er.end_time IS NULL OR er.end_time > NOW()) + ORDER BY er.created_at DESC + ` + + rows, err := tx.QueryContext(txCtx, query, deviceID) + if err != nil { + return fmt.Errorf("failed to query event rules: %w", err) + } + defer func() { _ = rows.Close() }() + + for rows.Next() { + var r models.EventRule + if err := rows.Scan( + &r.EventRuleID, &r.DeviceID, &r.RuleKey, &r.Operator, &r.Operand, + &r.Status, &r.IsActive, &r.StartTime, &r.EndTime, &r.CreatedAt, &r.UpdatedAt, + ); err != nil { + return err + } + + rules = append(rules, r) + } + + return rows.Err() + }) + + if err != nil { + return nil, err + } + + return rules, nil +} + // CreateEventRule creates a new event rule func (c *Client) CreateEventRule(ctx context.Context, req *models.EventRuleRequest) (*models.EventRuleResponse, error) { if req == nil { @@ -213,7 +265,12 @@ func (c *Client) CreateEventRule(ctx context.Context, req *models.EventRuleReque var result models.EventRuleResponse - err := c.WithOrgTx(ctx, "", func(txCtx context.Context, tx bob.Tx) error { + org := orgFromContext(ctx) + if org == "" { + return nil, fmt.Errorf("organization not found in context") + } + + err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { // Parse start and end times var startTime, endTime *time.Time if req.StartTime != nil { @@ -233,10 +290,10 @@ func (c *Client) CreateEventRule(ctx context.Context, req *models.EventRuleReque // Insert event rule err := tx.QueryRowContext(txCtx, ` - INSERT INTO event_rules (entity_id, device_model_id, rule_key, operator, operand, status, is_active, start_time, end_time) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + INSERT INTO event_rules (device_id, rule_key, operator, operand, status, is_active, start_time, end_time) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING event_rule_id, created_at, updated_at - `, req.EntityID, req.DeviceModelID, req.RuleKey, req.Operator, req.Operand, + `, req.DeviceID, req.RuleKey, req.Operator, req.Operand, req.Status, req.IsActive, startTime, endTime).Scan( &result.EventRuleID, &result.CreatedAt, &result.UpdatedAt, ) @@ -267,7 +324,12 @@ func (c *Client) UpdateEventRule(ctx context.Context, ruleID string, req *models var result models.EventRuleResponse - err := c.WithOrgTx(ctx, "", func(txCtx context.Context, tx bob.Tx) error { + org := orgFromContext(ctx) + if org == "" { + return nil, fmt.Errorf("organization not found in context") + } + + err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { // Parse start and end times var startTime, endTime *time.Time if req.StartTime != nil { @@ -288,12 +350,11 @@ func (c *Client) UpdateEventRule(ctx context.Context, ruleID string, req *models // Update event rule err := tx.QueryRowContext(txCtx, ` UPDATE event_rules - SET entity_id = $1, device_model_id = $2, rule_key = $3, - operator = $4, operand = $5, status = $6, is_active = $7, - start_time = $8, end_time = $9, updated_at = NOW() - WHERE event_rule_id = $10 + SET device_id = $1, rule_key = $2, operator = $3, operand = $4, + status = $5, is_active = $6, start_time = $7, end_time = $8, updated_at = NOW() + WHERE event_rule_id = $9 RETURNING event_rule_id, created_at, updated_at - `, req.EntityID, req.DeviceModelID, req.RuleKey, req.Operator, req.Operand, + `, req.DeviceID, req.RuleKey, req.Operator, req.Operand, req.Status, req.IsActive, startTime, endTime, ruleID).Scan( &result.EventRuleID, &result.CreatedAt, &result.UpdatedAt, ) @@ -319,7 +380,12 @@ func (c *Client) DeleteEventRule(ctx context.Context, ruleID string) error { return fmt.Errorf("rule_id is required") } - return c.WithOrgTx(ctx, "", func(txCtx context.Context, tx bob.Tx) error { + org := orgFromContext(ctx) + if org == "" { + return fmt.Errorf("organization not found in context") + } + + return c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { result, err := tx.ExecContext(txCtx, `DELETE FROM event_rules WHERE event_rule_id = $1`, ruleID) if err != nil { return fmt.Errorf("failed to delete event rule: %w", err) @@ -334,153 +400,50 @@ func (c *Client) DeleteEventRule(ctx context.Context, ruleID string) error { }) } -// ruleConfig represents a single event rule configuration for seeding -type ruleConfig struct { - RuleKey string - EntityIDPattern string - Operator string - Operand string - EventType string - EventLevel string - Description string - Status string - IsActive bool -} - -// deviceModelRules represents event rules for a specific device model -type deviceModelRules struct { - DeviceModel string - DeviceModelID string - Rules []ruleConfig -} - -// SeedDefaultEventRules seeds default event rules from configuration -// This is typically called on service startup to ensure default rules exist -func (c *Client) SeedDefaultEventRules(ctx context.Context, rulesConfig interface{}) error { - var config struct { - DeviceModels []deviceModelRules +// CreateEvent creates a new event from a matched event rule +func (c *Client) CreateEvent(ctx context.Context, org string, event *models.MatchedEvent, spaceSlug string) error { + if event == nil { + return fmt.Errorf("nil event") } - - // Use type assertion to handle different config types - // This allows passing either the raw YAML unmarshalled struct or our custom config type - switch cfg := rulesConfig.(type) { - case map[string]interface{}: - // Handle raw YAML map - if dms, ok := cfg["device_models"].([]interface{}); ok { - for _, dm := range dms { - dmMap, ok := dm.(map[string]interface{}) - if !ok { - continue - } - dmr := deviceModelRules{ - DeviceModel: getString(dmMap, "device_model"), - DeviceModelID: getString(dmMap, "device_model_id"), - } - if rules, ok := dmMap["rules"].([]interface{}); ok { - for _, r := range rules { - rMap, ok := r.(map[string]interface{}) - if !ok { - continue - } - dmr.Rules = append(dmr.Rules, ruleConfig{ - RuleKey: getString(rMap, "rule_key"), - EntityIDPattern: getString(rMap, "entity_id_pattern"), - Operator: getString(rMap, "operator"), - Operand: getString(rMap, "operand"), - EventType: getString(rMap, "event_type"), - EventLevel: getString(rMap, "event_level"), - Description: getString(rMap, "description"), - Status: getString(rMap, "status"), - IsActive: getBool(rMap, "is_active"), - }) - } - } - config.DeviceModels = append(config.DeviceModels, dmr) - } - } - default: - return fmt.Errorf("unsupported config type: %T", rulesConfig) + if org == "" { + return fmt.Errorf("organization is required") } - // Seed rules for each device model - for _, dm := range config.DeviceModels { - if err := c.seedDeviceModelRules(ctx, dm); err != nil { - return fmt.Errorf("failed to seed rules for device model %s: %w", dm.DeviceModel, err) + return c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { + // Step 1: Get or create event_type + var eventTypeID int + err := tx.QueryRowContext(txCtx, ` + SELECT event_type_id FROM event_types WHERE event_type = $1 + `, event.EventType).Scan(&eventTypeID) + + if err == sql.ErrNoRows { + // Create new event_type + err = tx.QueryRowContext(txCtx, ` + INSERT INTO event_types (event_type) VALUES ($1) + RETURNING event_type_id + `, event.EventType).Scan(&eventTypeID) } - } - return nil -} - -// seedDeviceModelRules seeds event rules for a specific device model -func (c *Client) seedDeviceModelRules(ctx context.Context, dm deviceModelRules) error { - return c.WithOrgTx(ctx, "", func(txCtx context.Context, tx bob.Tx) error { - for _, ruleCfg := range dm.Rules { - // Check if rule already exists (by device_model_id + rule_key + operator + operand) - var existingRuleID string - checkQuery := ` - SELECT event_rule_id FROM event_rules - WHERE device_model_id = $1 AND rule_key = $2 AND operator = $3 AND operand = $4 - LIMIT 1 - ` - err := tx.QueryRowContext(txCtx, checkQuery, dm.DeviceModelID, ruleCfg.RuleKey, ruleCfg.Operator, ruleCfg.Operand).Scan(&existingRuleID) - - if err == nil { - // Rule already exists, skip - log.Printf("[EventRules] Rule already exists for %s:%s (%s %s), skipping", dm.DeviceModel, ruleCfg.RuleKey, ruleCfg.Operator, ruleCfg.Operand) - continue - } + if err != nil { + return fmt.Errorf("failed to get/create event_type: %w", err) + } - if err != sql.ErrNoRows { - return fmt.Errorf("failed to check existing rule: %w", err) - } + // Step 2: Create event_data with the event information + dataID := sql.NullInt64{Valid: false} - // Create new rule - var ruleID string - var entityID *string - if ruleCfg.EntityIDPattern != "" { - entityID = &ruleCfg.EntityIDPattern - } - ruleKey := &ruleCfg.RuleKey - operator := &ruleCfg.Operator - status := &ruleCfg.Status - isActive := &ruleCfg.IsActive - - insertQuery := ` - INSERT INTO event_rules (device_model_id, entity_id, rule_key, operator, operand, status, is_active) - VALUES ($1, $2, $3, $4, $5, $6, $7) - RETURNING event_rule_id - ` - err = tx.QueryRowContext(txCtx, insertQuery, - dm.DeviceModelID, entityID, ruleKey, operator, ruleCfg.Operand, status, isActive, - ).Scan(&ruleID) - - if err != nil { - return fmt.Errorf("failed to insert event rule: %w", err) - } + // Step 3: Create the event + _, err = tx.ExecContext(txCtx, ` + INSERT INTO events ( + event_type_id, data_id, event_level, event_rule_id, + space_slug, entity_id, time_fired_ts + ) VALUES ($1, $2, $3, $4, $5, $6, $7) + `, eventTypeID, dataID, event.EventLevel, nil, spaceSlug, event.EntityID, event.Timestamp) - log.Printf("[EventRules] Seeded rule: %s - %s %s %s", dm.DeviceModel, ruleCfg.RuleKey, ruleCfg.Operator, ruleCfg.Operand) + if err != nil { + return fmt.Errorf("failed to create event: %w", err) } + return nil }) } -// getString safely extracts a string value from a map -func getString(m map[string]interface{}, key string) string { - if v, ok := m[key]; ok { - if s, ok := v.(string); ok { - return s - } - } - return "" -} - -// getBool safely extracts a bool value from a map -func getBool(m map[string]interface{}, key string) bool { - if v, ok := m[key]; ok { - if b, ok := v.(bool); ok { - return b - } - } - return false -} diff --git a/pkgs/db/migrations/20251225000000_create_events_schema.sql b/pkgs/db/migrations/20251225000000_create_events_schema.sql index 897ead9..b6931ed 100644 --- a/pkgs/db/migrations/20251225000000_create_events_schema.sql +++ b/pkgs/db/migrations/20251225000000_create_events_schema.sql @@ -27,24 +27,27 @@ CREATE INDEX IF NOT EXISTS idx_event_data_hash ON event_data (hash); -- Event Rules: Rules for triggering events based on conditions CREATE TABLE IF NOT EXISTS event_rules ( event_rule_id UUID PRIMARY KEY DEFAULT gen_random_uuid(), - entity_id UUID REFERENCES entities(id) ON DELETE SET NULL, - device_model_id UUID, - rule_key TEXT, -- e.g., 'battery_low', 'temperature_low' + device_id UUID, -- Device-specific automation rules + rule_key TEXT NOT NULL, -- e.g., 'battery_low', 'temperature_low' operator VARCHAR(16) CHECK (operator IN ('eq', 'ne', 'gt', 'lt', 'gte', 'lte', 'contains')), operand TEXT NOT NULL, status VARCHAR(16) CHECK (status IN ('active', 'inactive', 'paused')) DEFAULT 'active', is_active BOOLEAN DEFAULT true, start_time TIMESTAMPTZ, end_time TIMESTAMPTZ, - allow_new_event BOOLEAN, -- flag to control duplicate event creation created_at TIMESTAMPTZ NOT NULL DEFAULT now(), updated_at TIMESTAMPTZ NOT NULL DEFAULT now() ); -CREATE INDEX IF NOT EXISTS idx_event_rules_entity_id ON event_rules (entity_id); -CREATE INDEX IF NOT EXISTS idx_event_rules_device_model_id ON event_rules (device_model_id); +-- Indexes for event_rules +CREATE INDEX IF NOT EXISTS idx_event_rules_device_id ON event_rules (device_id); CREATE INDEX IF NOT EXISTS idx_event_rules_status ON event_rules (status); CREATE INDEX IF NOT EXISTS idx_event_rules_is_active ON event_rules (is_active); +-- Composite index for active device rules query (performance optimization) +CREATE INDEX IF NOT EXISTS idx_event_rules_active_device ON event_rules (is_active, device_id, created_at DESC) +WHERE is_active = true; +CREATE INDEX IF NOT EXISTS idx_event_rules_time_range ON event_rules (start_time, end_time) +WHERE is_active = true; -- Events: Event occurrences linking to event_type and event_data -- space_slug is stored here for filtering events within a space @@ -53,13 +56,15 @@ CREATE TABLE IF NOT EXISTS events ( event_id BIGSERIAL PRIMARY KEY, event_type_id INTEGER NOT NULL REFERENCES event_types(event_type_id) ON DELETE CASCADE, data_id INTEGER REFERENCES event_data(data_id) ON DELETE SET NULL, - event_level TEXT CHECK (event_level IN ('manufacturer', 'system', 'user')), + event_level TEXT CHECK (event_level IN ('manufacturer', 'system', 'automation')), event_rule_id UUID REFERENCES event_rules(event_rule_id) ON DELETE SET NULL, space_slug TEXT, - entity_id UUID REFERENCES entities(id) ON DELETE SET NULL, - state_id BIGINT REFERENCES entity_states(state_id) ON DELETE SET NULL, + entity_id TEXT, + device_model_id TEXT, + state_id UUID REFERENCES entity_states(id) ON DELETE SET NULL, context_id_bin BYTEA, trigger_id UUID, -- for future automations table reference + allow_new_event BOOLEAN DEFAULT true, time_fired_ts BIGINT NOT NULL, created_at TIMESTAMPTZ NOT NULL DEFAULT now() ); @@ -68,6 +73,7 @@ CREATE INDEX IF NOT EXISTS idx_events_event_type_id ON events (event_type_id); CREATE INDEX IF NOT EXISTS idx_events_event_rule_id ON events (event_rule_id); CREATE INDEX IF NOT EXISTS idx_events_space_slug ON events (space_slug); CREATE INDEX IF NOT EXISTS idx_events_entity_id ON events (entity_id); +CREATE INDEX IF NOT EXISTS idx_events_device_model_id ON events (device_model_id); CREATE INDEX IF NOT EXISTS idx_events_state_id ON events (state_id); CREATE INDEX IF NOT EXISTS idx_events_trigger_id ON events (trigger_id); CREATE INDEX IF NOT EXISTS idx_events_time_fired_ts ON events (time_fired_ts DESC); From cdcb9b49af9a96897eefc91b065d49c4453d3992 Mon Sep 17 00:00:00 2001 From: lethanhdat762003 Date: Mon, 2 Feb 2026 17:51:39 +0700 Subject: [PATCH 06/10] fix: ci issues --- internal/events/loader/system_rules_loader.go | 24 +++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/internal/events/loader/system_rules_loader.go b/internal/events/loader/system_rules_loader.go index 7f6be2a..8d766e0 100644 --- a/internal/events/loader/system_rules_loader.go +++ b/internal/events/loader/system_rules_loader.go @@ -6,6 +6,8 @@ import ( "path/filepath" "strings" + "golang.org/x/text/cases" + "golang.org/x/text/language" "gopkg.in/yaml.v3" ) @@ -80,7 +82,24 @@ func LoadSystemDefaultRules(dir string) (map[string]*DeviceModelRules, error) { // loadDeviceModelRules loads rules from a single YAML file func loadDeviceModelRules(filePath, baseDir string) (*DeviceModelRules, error) { - data, err := os.ReadFile(filePath) + absBaseDir, err := filepath.Abs(baseDir) + if err != nil { + return nil, fmt.Errorf("failed to resolve base directory: %w", err) + } + + absFilePath, err := filepath.Abs(filePath) + if err != nil { + return nil, fmt.Errorf("failed to resolve file path: %w", err) + } + + // Check if the file path is within the base directory + relPath, err := filepath.Rel(absBaseDir, absFilePath) + if err != nil || strings.HasPrefix(relPath, "..") { + return nil, fmt.Errorf("file path is outside base directory: %s", filePath) + } + + // #nosec G304 -- Path is validated above to ensure it's within baseDir + data, err := os.ReadFile(absFilePath) if err != nil { return nil, fmt.Errorf("failed to read file: %w", err) } @@ -100,7 +119,8 @@ func loadDeviceModelRules(filePath, baseDir string) (*DeviceModelRules, error) { // Set defaults if rules.DisplayName == "" { - rules.DisplayName = fmt.Sprintf("%s %s Rules", strings.Title(rules.Brand), strings.ToUpper(rules.Model)) + caser := cases.Title(language.English) + rules.DisplayName = fmt.Sprintf("%s %s Rules", caser.String(rules.Brand), strings.ToUpper(rules.Model)) } for i := range rules.Rules { From cbc6ff835a64f8a94a389172490484dcb0ba0fcd Mon Sep 17 00:00:00 2001 From: lethanhdat762003 Date: Tue, 3 Feb 2026 15:44:31 +0700 Subject: [PATCH 07/10] fix: resolve comments which are removing unnecessary functions, variables --- configs/event_rules/rakwireless/rak4630.yaml | 21 ++- internal/alerts/registry/loader.go | 16 --- internal/api/entities/handler.go | 54 -------- internal/api/entities/router.go | 3 - internal/api/events/handler.go | 15 ++- internal/config/event_rules.go | 53 +------- internal/events/evaluator/evaluator.go | 122 ++++++++---------- internal/events/helpers.go | 60 +++++++++ internal/events/loader/system_rules_loader.go | 16 +-- internal/models/events.go | 19 +-- internal/timescaledb/attributes.go | 58 --------- internal/timescaledb/events.go | 68 ++++++---- internal/timescaledb/telemetry.go | 33 ----- .../20251225000000_create_events_schema.sql | 4 +- 14 files changed, 188 insertions(+), 354 deletions(-) create mode 100644 internal/events/helpers.go diff --git a/configs/event_rules/rakwireless/rak4630.yaml b/configs/event_rules/rakwireless/rak4630.yaml index e040cff..ebc881d 100644 --- a/configs/event_rules/rakwireless/rak4630.yaml +++ b/configs/event_rules/rakwireless/rak4630.yaml @@ -1,4 +1,5 @@ # RAK4630 Event Rules +brand: "rakwireless" model: "rak4630" model_id: "" # Will be resolved from device service display_name: "RAK4630 Event Rules" @@ -6,66 +7,60 @@ display_name: "RAK4630 Event Rules" rules: # Battery Low Warning - rule_key: "battery_v" - entity_id_pattern: "" operator: "lt" operand: "3.3" event_type: "device_event" event_level: "system" description: "Battery voltage is low (< 3.3V)" - status: "active" + allow_new_event: true is_active: true # Battery Critical Warning - rule_key: "battery_v" - entity_id_pattern: "" operator: "lt" operand: "3.0" event_type: "device_event" event_level: "system" description: "Battery voltage is critically low (< 3.0V)" - status: "active" + allow_new_event: true is_active: true # High Temperature Warning - rule_key: "temperature" - entity_id_pattern: "" operator: "gt" operand: "50" event_type: "device_event" event_level: "system" description: "Temperature is high (> 50°C)" - status: "active" + allow_new_event: true is_active: true # Low Temperature Warning - rule_key: "temperature" - entity_id_pattern: "" operator: "lt" - operand: "-10" + oper`nd: "-10" event_type: "device_event" event_level: "system" description: "Temperature is very low (< -10°C)" - status: "active" + allow_new_event: true is_active: true # High Humidity Warning - rule_key: "humidity" - entity_id_pattern: "" operator: "gt" operand: "90" event_type: "device_event" event_level: "system" description: "Humidity is very high (> 90%)" - status: "active" + allow_new_event: true is_active: true # Low Humidity Warning - rule_key: "humidity" - entity_id_pattern: "" operator: "lt" operand: "10" event_type: "device_event" event_level: "system" description: "Humidity is very low (< 10%)" - status: "active" + allow_new_event: true is_active: true diff --git a/internal/alerts/registry/loader.go b/internal/alerts/registry/loader.go index 4d96be0..42b4ca1 100644 --- a/internal/alerts/registry/loader.go +++ b/internal/alerts/registry/loader.go @@ -63,19 +63,3 @@ func LoadFromConfig(path string) (map[string]Processor, error) { return result, nil } - -// RegisterFromConfig merges processors from YAML into the global registry, overriding existing categories. -func RegisterFromConfig(path string) error { - processors, err := LoadFromConfig(path) - if err != nil { - return err - } - - globalRegistry.mu.Lock() - defer globalRegistry.mu.Unlock() - - for category, processor := range processors { - globalRegistry.processors[category] = processor - } - return nil -} diff --git a/internal/api/entities/handler.go b/internal/api/entities/handler.go index 05fd42d..cc4af1a 100644 --- a/internal/api/entities/handler.go +++ b/internal/api/entities/handler.go @@ -11,11 +11,6 @@ import ( "go.uber.org/zap" ) -// updateDeviceTriggerEventRequest represents the request to update an device's trigger event type -type updateDeviceTriggerEventRequest struct { - TriggerEventType string `json:"trigger_event_type"` -} - func getEntities(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { return func(c echo.Context) error { // Parse query params @@ -92,52 +87,3 @@ func parseDisplayTypes(param string) []string { } return parts[:j] } - -// updateDeviceTriggerEvent updates the trigger event type for an device -func updateDeviceTriggerEvent(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { - return func(c echo.Context) error { - deviceID := strings.TrimSpace(c.Param("device_id")) - if deviceID == "" { - return c.JSON(http.StatusBadRequest, map[string]string{ - "error": "device_id is required", - }) - } - - var req updateDeviceTriggerEventRequest - if err := c.Bind(&req); err != nil { - return c.JSON(http.StatusBadRequest, map[string]string{ - "error": "invalid request body", - }) - } - - if req.TriggerEventType == "" { - return c.JSON(http.StatusBadRequest, map[string]string{ - "error": "trigger_event_type is required", - }) - } - - orgToUse := common.ResolveOrgFromRequest(c) - if orgToUse == "" { - return c.JSON(http.StatusBadRequest, map[string]string{ - "error": "Could not determine organization from hostname or X-Organization header", - }) - } - - ctx := timescaledb.ContextWithOrg(c.Request().Context(), orgToUse) - err := tsClient.UpdateDeviceTriggerEventType(ctx, deviceID, req.TriggerEventType) - if err != nil { - logger.Error("failed to update device trigger event type", - zap.String("device_id", deviceID), - zap.String("trigger_event_type", req.TriggerEventType), - zap.Error(err)) - return c.JSON(http.StatusInternalServerError, map[string]string{ - "error": "failed to update device trigger event type", - }) - } - - return c.JSON(http.StatusOK, map[string]interface{}{ - "device_id": deviceID, - "trigger_event_type": req.TriggerEventType, - }) - } -} diff --git a/internal/api/entities/router.go b/internal/api/entities/router.go index a51b442..dea8f01 100644 --- a/internal/api/entities/router.go +++ b/internal/api/entities/router.go @@ -9,7 +9,4 @@ import ( func RegisterRoutes(e *echo.Group, logger *zap.Logger, tsClient *timescaledb.Client) { group := e.Group("/entities") group.GET("", getEntities(logger, tsClient)) - - // Update device trigger event configuration - group.PUT("/:device_id/trigger-event", updateDeviceTriggerEvent(logger, tsClient)) } diff --git a/internal/api/events/handler.go b/internal/api/events/handler.go index 3e7e8ed..b46acbd 100644 --- a/internal/api/events/handler.go +++ b/internal/api/events/handler.go @@ -40,8 +40,21 @@ func getEventsByDevice(logger *zap.Logger, tsClient *timescaledb.Client) echo.Ha } } + // Parse start_time and end_time query parameters + var startTime, endTime *int64 + if startTimeStr := c.QueryParam("start_time"); startTimeStr != "" { + if ms, err := strconv.ParseInt(startTimeStr, 10, 64); err == nil { + startTime = &ms + } + } + if endTimeStr := c.QueryParam("end_time"); endTimeStr != "" { + if ms, err := strconv.ParseInt(endTimeStr, 10, 64); err == nil { + endTime = &ms + } + } + ctx := timescaledb.ContextWithOrg(c.Request().Context(), orgToUse) - events, err := tsClient.GetEventsByDevice(ctx, orgToUse, deviceID, limit) + events, err := tsClient.GetEventsByDevice(ctx, orgToUse, deviceID, limit, startTime, endTime) if err != nil { logger.Error("failed to get events by device", zap.String("device_id", deviceID), diff --git a/internal/config/event_rules.go b/internal/config/event_rules.go index 0530748..a272ace 100644 --- a/internal/config/event_rules.go +++ b/internal/config/event_rules.go @@ -25,7 +25,6 @@ type EventRuleConfig struct { // DeviceModelRules represents event rules for a specific device model type DeviceModelRules struct { DeviceModel string `yaml:"device_model"` - DeviceModelID string `yaml:"device_model_id"` DisplayName string `yaml:"display_name"` Rules []EventRuleConfig `yaml:"rules"` } @@ -155,54 +154,4 @@ func isPathAllowed(path string) bool { } } return false -} - -// GetRulesForDeviceModel returns all rules for a specific device model -func (c *EventRulesConfig) GetRulesForDeviceModel(deviceModel string) []EventRuleConfig { - for _, dm := range c.DeviceModels { - if dm.DeviceModel == deviceModel { - return dm.Rules - } - } - return nil -} - -// GetDeviceModelRules returns the DeviceModelRules for a specific device model -func (c *EventRulesConfig) GetDeviceModelRules(deviceModel string) *DeviceModelRules { - for i := range c.DeviceModels { - if c.DeviceModels[i].DeviceModel == deviceModel { - return &c.DeviceModels[i] - } - } - return nil -} - -// ToRawMap converts the EventRulesConfig to a map[string]interface{} for use with SeedDefaultEventRules -func (c *EventRulesConfig) ToRawMap() map[string]interface{} { - deviceModels := make([]interface{}, 0, len(c.DeviceModels)) - for _, dm := range c.DeviceModels { - rules := make([]interface{}, 0, len(dm.Rules)) - for _, r := range dm.Rules { - rules = append(rules, map[string]interface{}{ - "rule_key": r.RuleKey, - "entity_id_pattern": r.EntityIDPattern, - "operator": r.Operator, - "operand": r.Operand, - "event_type": r.EventType, - "event_level": r.EventLevel, - "description": r.Description, - "status": r.Status, - "is_active": r.IsActive, - }) - } - deviceModels = append(deviceModels, map[string]interface{}{ - "device_model": dm.DeviceModel, - "device_model_id": dm.DeviceModelID, - "display_name": dm.DisplayName, - "rules": rules, - }) - } - return map[string]interface{}{ - "device_models": deviceModels, - } -} +} \ No newline at end of file diff --git a/internal/events/evaluator/evaluator.go b/internal/events/evaluator/evaluator.go index 7aee318..956d995 100644 --- a/internal/events/evaluator/evaluator.go +++ b/internal/events/evaluator/evaluator.go @@ -2,9 +2,10 @@ package evaluator import ( "fmt" - "strconv" "strings" + "time" + "github.com/Space-DF/telemetry-service/internal/events" "github.com/Space-DF/telemetry-service/internal/events/loader" "github.com/Space-DF/telemetry-service/internal/models" "go.uber.org/zap" @@ -25,13 +26,18 @@ func NewEvaluator(logger *zap.Logger) *Evaluator { // EvaluateRule evaluates a single YAML rule against an entity func (e *Evaluator) EvaluateRule(rule loader.YAMLRule, entity models.TelemetryEntity) *models.MatchedEvent { // Skip inactive rules - if !rule.IsActive || rule.Status != "active" { + if !rule.IsActive { return nil } - // Check if rule applies to this entity (if entity_id_pattern is specified) + // Check if rule allows creating new events + if !rule.AllowNewEvent { + return nil + } + + // Check if rule applies to this entity if rule.EntityIDPattern != "" { - if !contains(entity.EntityID, rule.EntityIDPattern) && entity.EntityID != rule.EntityIDPattern { + if !events.Contains(entity.EntityID, rule.EntityIDPattern) && entity.EntityID != rule.EntityIDPattern { return nil } } @@ -43,22 +49,21 @@ func (e *Evaluator) EvaluateRule(rule loader.YAMLRule, entity models.TelemetryEn } // Parse the operand as float64 - operand, err := strconv.ParseFloat(rule.Operand, 64) - if err != nil { + operand, ok := events.ParseFloat64(rule.Operand) + if !ok { e.logger.Warn("Failed to parse operand as float64", zap.String("rule_key", rule.RuleKey), - zap.String("operand", rule.Operand), - zap.Error(err)) + zap.String("operand", rule.Operand)) return nil } // Evaluate the condition - matched := e.compareValues(value, operand, rule.Operator) + matched := events.CompareValues(value, operand, rule.Operator, e.logger) if !matched { return nil } - return &models.MatchedEvent{ + matchedEvent := &models.MatchedEvent{ EntityID: entity.EntityID, EntityType: entity.EntityType, RuleKey: rule.RuleKey, @@ -69,7 +74,10 @@ func (e *Evaluator) EvaluateRule(rule loader.YAMLRule, entity models.TelemetryEn Threshold: operand, Operator: rule.Operator, RuleSource: "default", + Timestamp: time.Now().UnixMilli(), } + + return matchedEvent } // EvaluateRuleDB evaluates a database rule against an entity @@ -82,6 +90,11 @@ func (e *Evaluator) EvaluateRuleDB(rule models.EventRule, entity models.Telemetr return nil } + // Check if rule allows creating new events + if rule.AllowNewEvent != nil && !*rule.AllowNewEvent { + return nil + } + // Get rule key from database rule ruleKey := "" if rule.RuleKey != nil { @@ -98,12 +111,11 @@ func (e *Evaluator) EvaluateRuleDB(rule models.EventRule, entity models.Telemetr } // Parse the operand as float64 - operand, err := strconv.ParseFloat(rule.Operand, 64) - if err != nil { + operand, ok := events.ParseFloat64(rule.Operand) + if !ok { e.logger.Warn("Failed to parse operand as float64", zap.String("rule_key", ruleKey), - zap.String("operand", rule.Operand), - zap.Error(err)) + zap.String("operand", rule.Operand)) return nil } @@ -117,7 +129,7 @@ func (e *Evaluator) EvaluateRuleDB(rule models.EventRule, entity models.Telemetr } // Evaluate the condition - matched := e.compareValues(value, operand, operator) + matched := events.CompareValues(value, operand, operator, e.logger) if !matched { return nil } @@ -125,7 +137,7 @@ func (e *Evaluator) EvaluateRuleDB(rule models.EventRule, entity models.Telemetr // Build description description := fmt.Sprintf("Rule %s matched: %.2f %s %.2f", ruleKey, value, operator, operand) - return &models.MatchedEvent{ + matchedEvent := &models.MatchedEvent{ EntityID: entity.EntityID, EntityType: entity.EntityType, RuleKey: ruleKey, @@ -136,7 +148,10 @@ func (e *Evaluator) EvaluateRuleDB(rule models.EventRule, entity models.Telemetr Threshold: operand, Operator: operator, RuleSource: "automation", + Timestamp: time.Now().UnixMilli(), } + + return matchedEvent } // getEntityValue extracts a numeric value from an entity based on the rule key @@ -144,23 +159,21 @@ func (e *Evaluator) getEntityValue(entity models.TelemetryEntity, ruleKey string // Try to get value from Attributes map first if entity.Attributes != nil { if val, ok := entity.Attributes[ruleKey]; ok { - return e.parseFloat64(val) + return events.ParseFloat64(val) } } - // Try to get value from State (which could be a map or direct value) + // Try to get value from State if entity.State != nil { switch s := entity.State.(type) { case map[string]interface{}: if val, ok := s[ruleKey]; ok { - return e.parseFloat64(val) + return events.ParseFloat64(val) } default: - // Check if rule_key matches or is a prefix of entity_type - // e.g., rule_key="battery_v" matches entity_type="battery" - // rule_key="temperature" matches entity_type="temperature" - if e.isRuleKeyRelevant(ruleKey, entity.EntityType, entity.Name) { - if val, ok := e.parseFloat64(s); ok { + // For other types, try to parse as float64 + if val, ok := events.ParseFloat64(s); ok { + if e.isRuleKeyRelevant(ruleKey, entity.EntityType, entity.Name) { return val, true } } @@ -184,55 +197,24 @@ func (e *Evaluator) isRuleKeyRelevant(ruleKey, entityType, entityName string) bo return true } - return false -} + // Check entity name for common sensor patterns + entityNameLower := strings.ToLower(entityName) + ruleKeyLower := strings.ToLower(ruleKey) -// parseFloat64 converts various types to float64 -func (e *Evaluator) parseFloat64(value interface{}) (float64, bool) { - switch v := value.(type) { - case float64: - return v, true - case float32: - return float64(v), true - case int: - return float64(v), true - case int64: - return float64(v), true - case int32: - return float64(v), true - case string: - f, err := strconv.ParseFloat(v, 64) - if err != nil { - return 0, false - } - return f, true - default: - return 0, false + // Direct match: "temperature" rule_key with "Temperature" entity name + if ruleKeyLower == entityNameLower { + return true } -} -// compareValues performs comparison based on operator -func (e *Evaluator) compareValues(value, threshold float64, operator string) bool { - switch operator { - case "gt": - return value > threshold - case "lt": - return value < threshold - case "gte": - return value >= threshold - case "lte": - return value <= threshold - case "eq": - return value == threshold - case "ne": - return value != threshold - default: - e.logger.Warn("Unknown operator", zap.String("operator", operator)) - return false + // Contains match: "battery_v" rule_key with "Battery Level" entity + if strings.Contains(entityNameLower, ruleKeyLower) { + return true } -} -// contains checks if the target string contains the substring (case-insensitive) -func contains(str, substr string) bool { - return strings.Contains(strings.ToLower(str), strings.ToLower(substr)) + // Prefix match: "humidity" rule_key with "Humidity Sensor" entity + if strings.HasPrefix(entityNameLower, ruleKeyLower) { + return true + } + + return false } diff --git a/internal/events/helpers.go b/internal/events/helpers.go new file mode 100644 index 0000000..f7bd1a5 --- /dev/null +++ b/internal/events/helpers.go @@ -0,0 +1,60 @@ +package events + +import ( + "strconv" + "strings" + + "go.uber.org/zap" +) + +// ParseFloat64 converts various types to float64 +func ParseFloat64(value interface{}) (float64, bool) { + switch v := value.(type) { + case float64: + return v, true + case float32: + return float64(v), true + case int: + return float64(v), true + case int64: + return float64(v), true + case int32: + return float64(v), true + case string: + f, err := strconv.ParseFloat(v, 64) + if err != nil { + return 0, false + } + return f, true + default: + return 0, false + } +} + +// CompareValues performs comparison based on operator +func CompareValues(value, threshold float64, operator string, logger *zap.Logger) bool { + switch operator { + case "gt": + return value > threshold + case "lt": + return value < threshold + case "gte": + return value >= threshold + case "lte": + return value <= threshold + case "eq": + return value == threshold + case "ne": + return value != threshold + default: + if logger != nil { + logger.Warn("Unknown operator", zap.String("operator", operator)) + } + return false + } +} + +// Contains checks if the target string contains the substring (case-insensitive) +func Contains(str, substr string) bool { + return strings.Contains(strings.ToLower(str), strings.ToLower(substr)) +} diff --git a/internal/events/loader/system_rules_loader.go b/internal/events/loader/system_rules_loader.go index 8d766e0..ba68e3d 100644 --- a/internal/events/loader/system_rules_loader.go +++ b/internal/events/loader/system_rules_loader.go @@ -20,8 +20,8 @@ type YAMLRule struct { EventType string `yaml:"event_type"` EventLevel string `yaml:"event_level"` Description string `yaml:"description"` - Status string `yaml:"status"` IsActive bool `yaml:"is_active"` + AllowNewEvent bool `yaml:"allow_new_event"` } // DeviceModelRules represents event rules for a specific device model @@ -123,17 +123,5 @@ func loadDeviceModelRules(filePath, baseDir string) (*DeviceModelRules, error) { rules.DisplayName = fmt.Sprintf("%s %s Rules", caser.String(rules.Brand), strings.ToUpper(rules.Model)) } - for i := range rules.Rules { - if rules.Rules[i].Status == "" { - rules.Rules[i].Status = "active" - } - } - return &rules, nil -} - -// GetRulesForDevice retrieves rules for a specific brand/model combination -func GetRulesForDevice(loadedRules map[string]*DeviceModelRules, brand, model string) *DeviceModelRules { - key := fmt.Sprintf("%s/%s", strings.ToLower(brand), strings.ToLower(model)) - return loadedRules[key] -} +} \ No newline at end of file diff --git a/internal/models/events.go b/internal/models/events.go index f5b5cd1..7bc4125 100644 --- a/internal/models/events.go +++ b/internal/models/events.go @@ -31,20 +31,22 @@ type EventRule struct { IsActive *bool `json:"is_active,omitempty" db:"is_active"` StartTime *time.Time `json:"start_time,omitempty" db:"start_time"` EndTime *time.Time `json:"end_time,omitempty" db:"end_time"` + AllowNewEvent *bool `json:"allow_new_event,omitempty" db:"allow_new_event"` CreatedAt time.Time `json:"created_at" db:"created_at"` UpdatedAt time.Time `json:"updated_at" db:"updated_at"` } // EventRuleRequest represents a request to create or update an event rule type EventRuleRequest struct { - DeviceID *string `json:"device_id,omitempty" validate:"omitempty,uuid"` - RuleKey *string `json:"rule_key,omitempty" validate:"required"` - Operator *string `json:"operator,omitempty" validate:"omitempty,oneof=eq ne gt lt gte lte contains"` - Operand string `json:"operand" validate:"required"` - Status *string `json:"status,omitempty" validate:"omitempty,oneof=active inactive paused"` - IsActive *bool `json:"is_active,omitempty"` - StartTime *string `json:"start_time,omitempty" validate:"omitempty,datetime=2006-01-02T15:04:05Z07:00"` - EndTime *string `json:"end_time,omitempty" validate:"omitempty,datetime=2006-01-02T15:04:05Z07:00"` + DeviceID *string `json:"device_id,omitempty" validate:"required,uuid"` + RuleKey *string `json:"rule_key,omitempty" validate:"required"` + Operator *string `json:"operator,omitempty" validate:"omitempty,oneof=eq ne gt lt gte lte contains"` + Operand string `json:"operand" validate:"required"` + Status *string `json:"status,omitempty" validate:"omitempty,oneof=active inactive paused"` + IsActive *bool `json:"is_active,omitempty"` + AllowNewEvent *bool `json:"allow_new_event,omitempty"` + StartTime *string `json:"start_time,omitempty" validate:"omitempty,datetime=2006-01-02T15:04:05Z07:00"` + EndTime *string `json:"end_time,omitempty" validate:"omitempty,datetime=2006-01-02T15:04:05Z07:00"` } // EventRuleResponse represents an event rule response @@ -82,7 +84,6 @@ type Event struct { StateID *int64 `json:"state_id,omitempty" db:"state_id"` ContextID []byte `json:"context_id_bin,omitempty" db:"context_id_bin"` TriggerID *string `json:"trigger_id,omitempty" db:"trigger_id"` - AllowNewEvent *bool `json:"allow_new_event,omitempty" db:"allow_new_event"` TimeFiredTs int64 `json:"time_fired_ts" db:"time_fired_ts"` CreatedAt time.Time `json:"created_at" db:"created_at"` diff --git a/internal/timescaledb/attributes.go b/internal/timescaledb/attributes.go index 432c891..7d09374 100644 --- a/internal/timescaledb/attributes.go +++ b/internal/timescaledb/attributes.go @@ -13,64 +13,6 @@ import ( "go.uber.org/zap" ) -// GetLatestAttributesForDeviceAt returns the shared attributes JSON for the -// given device at or before the provided timestamp. If there are no -// attributes available it returns (nil, nil). -func (c *Client) GetLatestAttributesForDeviceAt(ctx context.Context, deviceID string, at time.Time) (map[string]interface{}, error) { - org := orgFromContext(ctx) - - query := `SELECT a.shared_attrs - FROM entities e - JOIN entity_states s ON s.entity_id = e.id - LEFT JOIN entity_state_attributes a ON s.attributes_id = a.id - WHERE e.device_id::text = $1 AND s.reported_at <= $2 AND a.shared_attrs IS NOT NULL - ORDER BY s.reported_at DESC - LIMIT 1` - - var rawAttrs []byte - if org != "" { - if err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { - rows, err := tx.QueryContext(txCtx, query, deviceID, at) - if err != nil { - return err - } - defer func() { - _ = rows.Close() - }() - if rows.Next() { - return rows.Scan(&rawAttrs) - } - return nil - }); err != nil { - return nil, fmt.Errorf("failed to query attributes: %w", err) - } - } else { - rows, err := c.DB.QueryContext(ctx, query, deviceID, at) - if err != nil { - return nil, fmt.Errorf("failed to query attributes: %w", err) - } - defer func() { - _ = rows.Close() - }() - if rows.Next() { - if err := rows.Scan(&rawAttrs); err != nil { - return nil, err - } - } - } - - if len(rawAttrs) == 0 { - return nil, nil - } - - var attrs map[string]interface{} - if err := json.Unmarshal(rawAttrs, &attrs); err != nil { - return nil, fmt.Errorf("failed to unmarshal attributes JSON: %w", err) - } - - return attrs, nil -} - type Location struct { Time time.Time DeviceID string diff --git a/internal/timescaledb/events.go b/internal/timescaledb/events.go index bded5ba..758d0b4 100644 --- a/internal/timescaledb/events.go +++ b/internal/timescaledb/events.go @@ -12,10 +12,8 @@ import ( // EventType constants const ( - EventTypeStateChanged = "state_changed" - EventTypeServiceCall = "service_call" - EventTypeAutomation = "automation_triggered" - EventTypeDeviceTriggered = "device_triggered" + EventTypeStateChanged = "state_changed" + EventTypeAutomation = "automation_triggered" ) // Pagination constants @@ -27,7 +25,7 @@ const ( ) // GetEventsByDevice retrieves all events for a specific entity. -func (c *Client) GetEventsByDevice(ctx context.Context, org, deviceID string, limit int) ([]models.Event, error) { +func (c *Client) GetEventsByDevice(ctx context.Context, org, deviceID string, limit int, startTime, endTime *int64) ([]models.Event, error) { if org == "" { return nil, fmt.Errorf("organization is required") } @@ -41,19 +39,37 @@ func (c *Client) GetEventsByDevice(ctx context.Context, org, deviceID string, li var events []models.Event err := c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { - // Query events where the event_data contains this device_id - query := ` + // Build base query with device_id filter + whereClause := `ed.shared_data->>'device_id' = $1` + args := []interface{}{deviceID} + + // Add time range filters if provided + argIndex := 2 + if startTime != nil { + whereClause += fmt.Sprintf(" AND e.time_fired_ts >= $%d", argIndex) + args = append(args, *startTime) + argIndex++ + } + if endTime != nil { + whereClause += fmt.Sprintf(" AND e.time_fired_ts <= $%d", argIndex) + args = append(args, *endTime) + argIndex++ + } + args = append(args, limit) + + // Complete the query + query := fmt.Sprintf(` SELECT e.event_id, e.event_type_id, e.data_id, e.space_slug, e.context_id_bin, - e.trigger_id, e.allow_new_event, e.time_fired_ts, et.event_type, ed.shared_data + e.trigger_id, e.time_fired_ts, et.event_type, ed.shared_data FROM events e JOIN event_types et ON e.event_type_id = et.event_type_id LEFT JOIN event_data ed ON e.data_id = ed.data_id - WHERE ed.shared_data->>'device_id' = $1 + WHERE %s ORDER BY e.time_fired_ts DESC - LIMIT $2 - ` + LIMIT $%d + `, whereClause, argIndex) - rows, err := tx.QueryContext(txCtx, query, deviceID, limit) + rows, err := tx.QueryContext(txCtx, query, args...) if err != nil { return fmt.Errorf("failed to query events by device: %w", err) } @@ -67,10 +83,9 @@ func (c *Client) GetEventsByDevice(ctx context.Context, org, deviceID string, li var slug sql.NullString var contextID []byte var triggerID sql.NullString - var allowNewEvent sql.NullBool var sharedData []byte - if err := rows.Scan(&e.EventID, &e.EventTypeID, &dataID, &slug, &contextID, &triggerID, &allowNewEvent, &e.TimeFiredTs, &e.EventType, &sharedData); err != nil { + if err := rows.Scan(&e.EventID, &e.EventTypeID, &dataID, &slug, &contextID, &triggerID, &e.TimeFiredTs, &e.EventType, &sharedData); err != nil { return err } @@ -86,9 +101,6 @@ func (c *Client) GetEventsByDevice(ctx context.Context, org, deviceID string, li if triggerID.Valid { e.TriggerID = &triggerID.String } - if allowNewEvent.Valid { - e.AllowNewEvent = &allowNewEvent.Bool - } if len(sharedData) > 0 { e.SharedData = sharedData } @@ -171,7 +183,7 @@ func (c *Client) GetEventRules(ctx context.Context, deviceID string, page, pageS // Query rules query := ` SELECT er.event_rule_id, er.device_id, er.rule_key, er.operator, er.operand, - er.status, er.is_active, er.start_time, er.end_time, er.created_at, er.updated_at + er.status, er.is_active, er.start_time, er.end_time, er.allow_new_event, er.created_at, er.updated_at FROM event_rules er ` + whereClause + ` ORDER BY er.created_at DESC LIMIT $` + fmt.Sprintf("%d", len(args)+1) + ` OFFSET $` + fmt.Sprintf("%d", len(args)+2) args = append(args, pageSize, offset) @@ -185,8 +197,8 @@ func (c *Client) GetEventRules(ctx context.Context, deviceID string, page, pageS for rows.Next() { var r models.EventRule if err := rows.Scan( - &r.EventRuleID, &r.DeviceID, &r.RuleKey, &r.Operator, &r.Operand, - &r.Status, &r.IsActive, &r.StartTime, &r.EndTime, &r.CreatedAt, &r.UpdatedAt, + &r.EventRuleID, &r.DeviceID, &r.RuleKey, &r.Operator, &r.Operand, + &r.Status, &r.IsActive, &r.StartTime, &r.EndTime, &r.AllowNewEvent, &r.CreatedAt, &r.UpdatedAt, ); err != nil { return err } @@ -220,7 +232,7 @@ func (c *Client) GetActiveRulesForDevice(ctx context.Context, deviceID string) ( // Filter by time range to exclude expired rules query := ` SELECT er.event_rule_id, er.device_id, er.rule_key, er.operator, er.operand, - er.status, er.is_active, er.start_time, er.end_time, er.created_at, er.updated_at + er.status, er.is_active, er.start_time, er.end_time, er.allow_new_event, er.created_at, er.updated_at FROM event_rules er WHERE er.is_active = true AND er.device_id = $1 @@ -239,7 +251,7 @@ func (c *Client) GetActiveRulesForDevice(ctx context.Context, deviceID string) ( var r models.EventRule if err := rows.Scan( &r.EventRuleID, &r.DeviceID, &r.RuleKey, &r.Operator, &r.Operand, - &r.Status, &r.IsActive, &r.StartTime, &r.EndTime, &r.CreatedAt, &r.UpdatedAt, + &r.Status, &r.IsActive, &r.StartTime, &r.EndTime, &r.AllowNewEvent, &r.CreatedAt, &r.UpdatedAt, ); err != nil { return err } @@ -290,11 +302,11 @@ func (c *Client) CreateEventRule(ctx context.Context, req *models.EventRuleReque // Insert event rule err := tx.QueryRowContext(txCtx, ` - INSERT INTO event_rules (device_id, rule_key, operator, operand, status, is_active, start_time, end_time) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + INSERT INTO event_rules (device_id, rule_key, operator, operand, status, is_active, allow_new_event, start_time, end_time) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING event_rule_id, created_at, updated_at `, req.DeviceID, req.RuleKey, req.Operator, req.Operand, - req.Status, req.IsActive, startTime, endTime).Scan( + req.Status, req.IsActive, req.AllowNewEvent, startTime, endTime).Scan( &result.EventRuleID, &result.CreatedAt, &result.UpdatedAt, ) @@ -351,11 +363,11 @@ func (c *Client) UpdateEventRule(ctx context.Context, ruleID string, req *models err := tx.QueryRowContext(txCtx, ` UPDATE event_rules SET device_id = $1, rule_key = $2, operator = $3, operand = $4, - status = $5, is_active = $6, start_time = $7, end_time = $8, updated_at = NOW() - WHERE event_rule_id = $9 + status = $5, is_active = $6, allow_new_event = $7, start_time = $8, end_time = $9, updated_at = NOW() + WHERE event_rule_id = $10 RETURNING event_rule_id, created_at, updated_at `, req.DeviceID, req.RuleKey, req.Operator, req.Operand, - req.Status, req.IsActive, startTime, endTime, ruleID).Scan( + req.Status, req.IsActive, req.AllowNewEvent, startTime, endTime, ruleID).Scan( &result.EventRuleID, &result.CreatedAt, &result.UpdatedAt, ) diff --git a/internal/timescaledb/telemetry.go b/internal/timescaledb/telemetry.go index 0c53066..8c85a54 100644 --- a/internal/timescaledb/telemetry.go +++ b/internal/timescaledb/telemetry.go @@ -207,36 +207,3 @@ func nullUUID(id sql.NullString) any { } return nil } - -// UpdateDeviceTriggerEventType updates the trigger event type for an device -func (c *Client) UpdateDeviceTriggerEventType(ctx context.Context, deviceID, triggerEventType string) error { - if deviceID == "" { - return fmt.Errorf("device_id is required") - } - if triggerEventType == "" { - return fmt.Errorf("trigger_event_type is required") - } - - org := orgFromContext(ctx) - if org == "" { - return fmt.Errorf("organization not found in context") - } - - return c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { - _, err := tx.ExecContext(txCtx, ` - UPDATE entities - SET trigger_event_type = $1, - updated_at = NOW() - WHERE id = $2 - `, triggerEventType, deviceID) - - if err != nil { - return fmt.Errorf("failed to update device trigger event type: %w", err) - } - - log.Printf("[Telemetry] Updated device trigger event type: org=%s, device_id=%s, trigger_event_type=%s", - org, deviceID, triggerEventType) - - return nil - }) -} diff --git a/pkgs/db/migrations/20251225000000_create_events_schema.sql b/pkgs/db/migrations/20251225000000_create_events_schema.sql index b6931ed..91465d9 100644 --- a/pkgs/db/migrations/20251225000000_create_events_schema.sql +++ b/pkgs/db/migrations/20251225000000_create_events_schema.sql @@ -35,6 +35,7 @@ CREATE TABLE IF NOT EXISTS event_rules ( is_active BOOLEAN DEFAULT true, start_time TIMESTAMPTZ, end_time TIMESTAMPTZ, + allow_new_event BOOLEAN DEFAULT true, created_at TIMESTAMPTZ NOT NULL DEFAULT now(), updated_at TIMESTAMPTZ NOT NULL DEFAULT now() ); @@ -60,11 +61,9 @@ CREATE TABLE IF NOT EXISTS events ( event_rule_id UUID REFERENCES event_rules(event_rule_id) ON DELETE SET NULL, space_slug TEXT, entity_id TEXT, - device_model_id TEXT, state_id UUID REFERENCES entity_states(id) ON DELETE SET NULL, context_id_bin BYTEA, trigger_id UUID, -- for future automations table reference - allow_new_event BOOLEAN DEFAULT true, time_fired_ts BIGINT NOT NULL, created_at TIMESTAMPTZ NOT NULL DEFAULT now() ); @@ -73,7 +72,6 @@ CREATE INDEX IF NOT EXISTS idx_events_event_type_id ON events (event_type_id); CREATE INDEX IF NOT EXISTS idx_events_event_rule_id ON events (event_rule_id); CREATE INDEX IF NOT EXISTS idx_events_space_slug ON events (space_slug); CREATE INDEX IF NOT EXISTS idx_events_entity_id ON events (entity_id); -CREATE INDEX IF NOT EXISTS idx_events_device_model_id ON events (device_model_id); CREATE INDEX IF NOT EXISTS idx_events_state_id ON events (state_id); CREATE INDEX IF NOT EXISTS idx_events_trigger_id ON events (trigger_id); CREATE INDEX IF NOT EXISTS idx_events_time_fired_ts ON events (time_fired_ts DESC); From c8bf5fa44b897b67ea56148d82d671fe582ce92c Mon Sep 17 00:00:00 2001 From: lethanhdat762003 Date: Tue, 3 Feb 2026 16:18:32 +0700 Subject: [PATCH 08/10] chore: remove unnecessary validations --- internal/config/event_rules.go | 1 - internal/events/evaluator/evaluator.go | 10 ---------- 2 files changed, 11 deletions(-) diff --git a/internal/config/event_rules.go b/internal/config/event_rules.go index a272ace..43af323 100644 --- a/internal/config/event_rules.go +++ b/internal/config/event_rules.go @@ -12,7 +12,6 @@ import ( // EventRuleConfig represents a single event rule configuration type EventRuleConfig struct { RuleKey string `yaml:"rule_key"` - EntityIDPattern string `yaml:"entity_id_pattern"` Operator string `yaml:"operator"` Operand string `yaml:"operand"` EventType string `yaml:"event_type"` diff --git a/internal/events/evaluator/evaluator.go b/internal/events/evaluator/evaluator.go index 956d995..73d95f8 100644 --- a/internal/events/evaluator/evaluator.go +++ b/internal/events/evaluator/evaluator.go @@ -35,13 +35,6 @@ func (e *Evaluator) EvaluateRule(rule loader.YAMLRule, entity models.TelemetryEn return nil } - // Check if rule applies to this entity - if rule.EntityIDPattern != "" { - if !events.Contains(entity.EntityID, rule.EntityIDPattern) && entity.EntityID != rule.EntityIDPattern { - return nil - } - } - // Get the value from entity attributes based on rule_key value, exists := e.getEntityValue(entity, rule.RuleKey) if !exists { @@ -86,9 +79,6 @@ func (e *Evaluator) EvaluateRuleDB(rule models.EventRule, entity models.Telemetr if rule.IsActive != nil && !*rule.IsActive { return nil } - if rule.Status != nil && *rule.Status != "active" { - return nil - } // Check if rule allows creating new events if rule.AllowNewEvent != nil && !*rule.AllowNewEvent { From a05c46a2239cfaaf3cc3a065bd6d2554d520a348 Mon Sep 17 00:00:00 2001 From: lethanhdat762003 Date: Tue, 3 Feb 2026 21:41:13 +0700 Subject: [PATCH 09/10] refactor: use device id from the transformer --- configs/event_rules/rakwireless/rak4630.yaml | 4 ++-- internal/events/evaluator/evaluator.go | 8 ++++---- internal/events/registry/registry.go | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/configs/event_rules/rakwireless/rak4630.yaml b/configs/event_rules/rakwireless/rak4630.yaml index ebc881d..834408c 100644 --- a/configs/event_rules/rakwireless/rak4630.yaml +++ b/configs/event_rules/rakwireless/rak4630.yaml @@ -6,7 +6,7 @@ display_name: "RAK4630 Event Rules" rules: # Battery Low Warning - - rule_key: "battery_v" + - rule_key: "battery" operator: "lt" operand: "3.3" event_type: "device_event" @@ -16,7 +16,7 @@ rules: is_active: true # Battery Critical Warning - - rule_key: "battery_v" + - rule_key: "battery" operator: "lt" operand: "3.0" event_type: "device_event" diff --git a/internal/events/evaluator/evaluator.go b/internal/events/evaluator/evaluator.go index 73d95f8..5ffb0fa 100644 --- a/internal/events/evaluator/evaluator.go +++ b/internal/events/evaluator/evaluator.go @@ -24,7 +24,7 @@ func NewEvaluator(logger *zap.Logger) *Evaluator { } // EvaluateRule evaluates a single YAML rule against an entity -func (e *Evaluator) EvaluateRule(rule loader.YAMLRule, entity models.TelemetryEntity) *models.MatchedEvent { +func (e *Evaluator) EvaluateRule(rule loader.YAMLRule, deviceID string, entity models.TelemetryEntity) *models.MatchedEvent { // Skip inactive rules if !rule.IsActive { return nil @@ -57,7 +57,7 @@ func (e *Evaluator) EvaluateRule(rule loader.YAMLRule, entity models.TelemetryEn } matchedEvent := &models.MatchedEvent{ - EntityID: entity.EntityID, + EntityID: deviceID, EntityType: entity.EntityType, RuleKey: rule.RuleKey, EventType: rule.EventType, @@ -74,7 +74,7 @@ func (e *Evaluator) EvaluateRule(rule loader.YAMLRule, entity models.TelemetryEn } // EvaluateRuleDB evaluates a database rule against an entity -func (e *Evaluator) EvaluateRuleDB(rule models.EventRule, entity models.TelemetryEntity) *models.MatchedEvent { +func (e *Evaluator) EvaluateRuleDB(rule models.EventRule, deviceID string, entity models.TelemetryEntity) *models.MatchedEvent { // Skip inactive rules if rule.IsActive != nil && !*rule.IsActive { return nil @@ -128,7 +128,7 @@ func (e *Evaluator) EvaluateRuleDB(rule models.EventRule, entity models.Telemetr description := fmt.Sprintf("Rule %s matched: %.2f %s %.2f", ruleKey, value, operator, operand) matchedEvent := &models.MatchedEvent{ - EntityID: entity.EntityID, + EntityID: deviceID, EntityType: entity.EntityType, RuleKey: ruleKey, EventType: "device_event", diff --git a/internal/events/registry/registry.go b/internal/events/registry/registry.go index 364739d..1fa571b 100644 --- a/internal/events/registry/registry.go +++ b/internal/events/registry/registry.go @@ -108,7 +108,7 @@ func (r *RuleRegistry) Evaluate(ctx context.Context, deviceID, brand, model stri // Find rules that match this attribute key if rules, exists := rulesByKey[attrKey]; exists { for _, rule := range rules { - if matched := r.evaluator.EvaluateRuleDB(rule, entity); matched != nil { + if matched := r.evaluator.EvaluateRuleDB(rule, deviceID, entity); matched != nil { matchedEvents = append(matchedEvents, *matched) matchedRuleKeys[matched.RuleKey] = true } @@ -123,7 +123,7 @@ func (r *RuleRegistry) Evaluate(ctx context.Context, deviceID, brand, model stri if !processedKeys[entity.EntityType] { processedKeys[entity.EntityType] = true for _, rule := range rules { - if matched := r.evaluator.EvaluateRuleDB(rule, entity); matched != nil { + if matched := r.evaluator.EvaluateRuleDB(rule, deviceID, entity); matched != nil { matchedEvents = append(matchedEvents, *matched) matchedRuleKeys[matched.RuleKey] = true } @@ -164,7 +164,7 @@ func (r *RuleRegistry) Evaluate(ctx context.Context, deviceID, brand, model stri // Find default rules for this attribute if rules, exists := defaultRulesByKey[attrKey]; exists { for _, rule := range rules { - if matched := r.evaluator.EvaluateRule(rule, entity); matched != nil { + if matched := r.evaluator.EvaluateRule(rule, deviceID, entity); matched != nil { matchedEvents = append(matchedEvents, *matched) } } @@ -178,7 +178,7 @@ func (r *RuleRegistry) Evaluate(ctx context.Context, deviceID, brand, model stri if !matchedRuleKeys[entity.EntityType] { if rules, exists := defaultRulesByKey[entity.EntityType]; exists { for _, rule := range rules { - if matched := r.evaluator.EvaluateRule(rule, entity); matched != nil { + if matched := r.evaluator.EvaluateRule(rule, deviceID, entity); matched != nil { matchedEvents = append(matchedEvents, *matched) } } From a80a7794a5408e8cb4bdb72d182947feda87b9ae Mon Sep 17 00:00:00 2001 From: lethanhdat762003 Date: Wed, 4 Feb 2026 14:08:01 +0700 Subject: [PATCH 10/10] refactor: removed unnecessary logics. optimized query for evaluating. removed debug logs --- internal/api/events/handler.go | 8 -- internal/events/evaluator/evaluator.go | 73 +++-------- internal/events/registry/cache.go | 60 ++++++++- internal/events/registry/registry.go | 118 +++++------------- internal/models/events.go | 9 +- internal/models/telemetry.go | 1 + internal/services/processor.go | 1 - internal/timescaledb/events.go | 81 ++++++------ internal/timescaledb/telemetry.go | 25 ++-- .../20251225000000_create_events_schema.sql | 5 +- 10 files changed, 167 insertions(+), 214 deletions(-) diff --git a/internal/api/events/handler.go b/internal/api/events/handler.go index b46acbd..af8c560 100644 --- a/internal/api/events/handler.go +++ b/internal/api/events/handler.go @@ -12,10 +12,6 @@ import ( "go.uber.org/zap" ) -// ============================================================================ -// Events API Handlers -// ============================================================================ - // getEventsByDevice returns all events for a specific device func getEventsByDevice(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { return func(c echo.Context) error { @@ -72,10 +68,6 @@ func getEventsByDevice(logger *zap.Logger, tsClient *timescaledb.Client) echo.Ha } } -// ============================================================================ -// Event Rules API Handlers -// ============================================================================ - // getEventRules returns all event rules func getEventRules(logger *zap.Logger, tsClient *timescaledb.Client) echo.HandlerFunc { return func(c echo.Context) error { diff --git a/internal/events/evaluator/evaluator.go b/internal/events/evaluator/evaluator.go index 5ffb0fa..41ea694 100644 --- a/internal/events/evaluator/evaluator.go +++ b/internal/events/evaluator/evaluator.go @@ -66,8 +66,8 @@ func (e *Evaluator) EvaluateRule(rule loader.YAMLRule, deviceID string, entity m Value: value, Threshold: operand, Operator: rule.Operator, - RuleSource: "default", Timestamp: time.Now().UnixMilli(), + StateID: entity.StateID, } return matchedEvent @@ -94,7 +94,7 @@ func (e *Evaluator) EvaluateRuleDB(rule models.EventRule, deviceID string, entit return nil } - // Get the value from entity attributes based on rule_key + // Get the value from entity based on rule_key value, exists := e.getEntityValue(entity, ruleKey) if !exists { return nil @@ -137,35 +137,29 @@ func (e *Evaluator) EvaluateRuleDB(rule models.EventRule, deviceID string, entit Value: value, Threshold: operand, Operator: operator, - RuleSource: "automation", Timestamp: time.Now().UnixMilli(), + EventRuleID: &rule.EventRuleID, + StateID: entity.StateID, } return matchedEvent } -// getEntityValue extracts a numeric value from an entity based on the rule key +// getEntityValue extracts a numeric value from an entity state based on the rule key func (e *Evaluator) getEntityValue(entity models.TelemetryEntity, ruleKey string) (float64, bool) { - // Try to get value from Attributes map first - if entity.Attributes != nil { - if val, ok := entity.Attributes[ruleKey]; ok { + if entity.State == nil { + return 0, false + } + + switch s := entity.State.(type) { + case map[string]interface{}: + if val, ok := s[ruleKey]; ok { return events.ParseFloat64(val) } - } - - // Try to get value from State - if entity.State != nil { - switch s := entity.State.(type) { - case map[string]interface{}: - if val, ok := s[ruleKey]; ok { - return events.ParseFloat64(val) - } - default: - // For other types, try to parse as float64 - if val, ok := events.ParseFloat64(s); ok { - if e.isRuleKeyRelevant(ruleKey, entity.EntityType, entity.Name) { - return val, true - } + default: + if val, ok := events.ParseFloat64(s); ok { + if e.isRuleKeyMatched(ruleKey, entity.EntityType) { + return val, true } } } @@ -173,38 +167,9 @@ func (e *Evaluator) getEntityValue(entity models.TelemetryEntity, ruleKey string return 0, false } -// isRuleKeyRelevant checks if a rule key is relevant to an entity -func (e *Evaluator) isRuleKeyRelevant(ruleKey, entityType, entityName string) bool { - if ruleKey == entityType { - return true - } - - if strings.HasPrefix(ruleKey, entityType) { - return true - } - - if strings.HasPrefix(entityType, ruleKey) { - return true - } - - // Check entity name for common sensor patterns - entityNameLower := strings.ToLower(entityName) +// isRuleKeyMatched checks if a rule key matches an entity type +func (e *Evaluator) isRuleKeyMatched(ruleKey, entityType string) bool { ruleKeyLower := strings.ToLower(ruleKey) - // Direct match: "temperature" rule_key with "Temperature" entity name - if ruleKeyLower == entityNameLower { - return true - } - - // Contains match: "battery_v" rule_key with "Battery Level" entity - if strings.Contains(entityNameLower, ruleKeyLower) { - return true - } - - // Prefix match: "humidity" rule_key with "Humidity Sensor" entity - if strings.HasPrefix(entityNameLower, ruleKeyLower) { - return true - } - - return false + return ruleKeyLower == strings.ToLower(entityType) } diff --git a/internal/events/registry/cache.go b/internal/events/registry/cache.go index 16dcc70..7bd006f 100644 --- a/internal/events/registry/cache.go +++ b/internal/events/registry/cache.go @@ -20,9 +20,10 @@ const ( // DeviceRulesCacheEntry represents a cached entry for device automation rules type DeviceRulesCacheEntry struct { - Rules []models.EventRule - CachedAt time.Time - ExpiresAt time.Time + Rules []models.EventRule // Flat array (for compatibility) + RulesByKey map[string][]models.EventRule // Grouped by rule_key for O(1) lookup + CachedAt time.Time + ExpiresAt time.Time } // DeviceRulesCache manages caching of device automation rules @@ -110,11 +111,15 @@ func (c *DeviceRulesCache) Get(ctx context.Context, deviceID string) []models.Ev return nil } + // Group rules by rule_key for O(1) lookup + rulesByKey := c.groupRulesByKey(rules) + // Store in cache (write lock) entry = &DeviceRulesCacheEntry{ - Rules: rules, - CachedAt: now, - ExpiresAt: now.Add(c.ttl), + Rules: rules, + RulesByKey: rulesByKey, + CachedAt: now, + ExpiresAt: now.Add(c.ttl), } c.mu.Lock() @@ -129,6 +134,49 @@ func (c *DeviceRulesCache) Get(ctx context.Context, deviceID string) []models.Ev return rules } +// GetGrouped retrieves grouped device automation rules from cache or database +func (c *DeviceRulesCache) GetGrouped(ctx context.Context, deviceID string) map[string][]models.EventRule { + now := time.Now() + + // Try cache first (read lock) + c.mu.RLock() + entry, found := c.cache[deviceID] + c.mu.RUnlock() + + if found && now.Before(entry.ExpiresAt) { + // Cache hit + c.hits.Add(1) + return entry.RulesByKey + } + + // Cache miss - call Get() which will populate the cache + c.Get(ctx, deviceID) + + // Try cache again + c.mu.RLock() + entry, found = c.cache[deviceID] + c.mu.RUnlock() + + if found && now.Before(entry.ExpiresAt) { + return entry.RulesByKey + } + + return nil +} + +// groupRulesByKey groups rules by their rule_key for O(1) lookup +func (c *DeviceRulesCache) groupRulesByKey(rules []models.EventRule) map[string][]models.EventRule { + result := make(map[string][]models.EventRule) + + for _, rule := range rules { + if rule.RuleKey != nil && *rule.RuleKey != "" { + result[*rule.RuleKey] = append(result[*rule.RuleKey], rule) + } + } + + return result +} + // Invalidate removes cached rules for a specific device // Call this when automation rules are created, updated, or deleted func (c *DeviceRulesCache) Invalidate(deviceID string) { diff --git a/internal/events/registry/registry.go b/internal/events/registry/registry.go index 1fa571b..d2635e4 100644 --- a/internal/events/registry/registry.go +++ b/internal/events/registry/registry.go @@ -16,8 +16,9 @@ import ( // RuleRegistry manages event rules from both YAML files and database type RuleRegistry struct { // Default rules from YAML (key: "brand/model" e.g., "rakwireless/rak4630") - defaultRules map[string]*loader.DeviceModelRules - defaultRulesMu sync.RWMutex + defaultRules map[string]*loader.DeviceModelRules + groupedDefaultRules map[string]map[string][]loader.YAMLRule // "brand/model" → rule_key → rules + defaultRulesMu sync.RWMutex // Cache for device automation rules cache *DeviceRulesCache @@ -30,11 +31,12 @@ type RuleRegistry struct { // NewRuleRegistry creates a new rule registry func NewRuleRegistry(db *timescaledb.Client, logger *zap.Logger) *RuleRegistry { r := &RuleRegistry{ - defaultRules: make(map[string]*loader.DeviceModelRules), - cache: NewDeviceRulesCache(db, logger), - evaluator: evaluator.NewEvaluator(logger), - db: db, - logger: logger, + defaultRules: make(map[string]*loader.DeviceModelRules), + groupedDefaultRules: make(map[string]map[string][]loader.YAMLRule), + cache: NewDeviceRulesCache(db, logger), + evaluator: evaluator.NewEvaluator(logger), + db: db, + logger: logger, } // Start background cache cleanup @@ -55,6 +57,16 @@ func (r *RuleRegistry) LoadDefaultRulesFromDir(dir string) error { r.defaultRules = rules + // Group default rules by rule_key for O(1) lookup + r.groupedDefaultRules = make(map[string]map[string][]loader.YAMLRule) + for key, dm := range rules { + grouped := make(map[string][]loader.YAMLRule) + for _, rule := range dm.Rules { + grouped[rule.RuleKey] = append(grouped[rule.RuleKey], rule) + } + r.groupedDefaultRules[key] = grouped + } + // Log loaded rules for _, dm := range rules { r.logger.Info("Loaded default event rules", @@ -79,54 +91,24 @@ func (r *RuleRegistry) Evaluate(ctx context.Context, deviceID, brand, model stri return matchedEvents } - // Try to get automation rules from cache first - customRules := r.cache.Get(ctx, deviceID) + // Try to get grouped automation rules from cache first + rulesByKey := r.cache.GetGrouped(ctx, deviceID) // Evaluate custom automation rules if they exist - if len(customRules) > 0 { + if len(rulesByKey) > 0 { r.logger.Debug("Using custom automation rules for device", zap.String("device_id", deviceID), - zap.Int("rule_count", len(customRules))) + zap.Int("rule_count", len(rulesByKey))) - // Group rules by rule_key for O(1) lookup - rulesByKey := r.groupRulesByKey(customRules) - - // Match entity attributes to rules by rule_key + // Match entity to rules by rule_key for _, entity := range entities { - // Track which rule_keys we've processed for this entity - processedKeys := make(map[string]bool) - - // Check each attribute in the entity - if entity.Attributes != nil { - for attrKey := range entity.Attributes { - // Skip if we already processed a rule with this key - if processedKeys[attrKey] { - continue - } - processedKeys[attrKey] = true - - // Find rules that match this attribute key - if rules, exists := rulesByKey[attrKey]; exists { - for _, rule := range rules { - if matched := r.evaluator.EvaluateRuleDB(rule, deviceID, entity); matched != nil { - matchedEvents = append(matchedEvents, *matched) - matchedRuleKeys[matched.RuleKey] = true - } - } - } - } - } - - // Also check entity_type for state-based entities + // Check entity_type for state-based entities if entity.EntityType != "" { if rules, exists := rulesByKey[entity.EntityType]; exists { - if !processedKeys[entity.EntityType] { - processedKeys[entity.EntityType] = true - for _, rule := range rules { - if matched := r.evaluator.EvaluateRuleDB(rule, deviceID, entity); matched != nil { - matchedEvents = append(matchedEvents, *matched) - matchedRuleKeys[matched.RuleKey] = true - } + for _, rule := range rules { + if matched := r.evaluator.EvaluateRuleDB(rule, deviceID, entity); matched != nil { + matchedEvents = append(matchedEvents, *matched) + matchedRuleKeys[matched.RuleKey] = true } } } @@ -138,40 +120,11 @@ func (r *RuleRegistry) Evaluate(ctx context.Context, deviceID, brand, model stri // Only for rule_keys that didn't match custom automation rules r.defaultRulesMu.RLock() key := fmt.Sprintf("%s/%s", strings.ToLower(brand), strings.ToLower(model)) - defaultRules, exists := r.defaultRules[key] + defaultRulesByKey, exists := r.groupedDefaultRules[key] r.defaultRulesMu.RUnlock() if exists { - // Group default rules by rule_key - defaultRulesByKey := make(map[string][]loader.YAMLRule) - for _, rule := range defaultRules.Rules { - defaultRulesByKey[rule.RuleKey] = append(defaultRulesByKey[rule.RuleKey], rule) - } - for _, entity := range entities { - // Track processed keys to avoid duplicate evaluations - processedKeys := make(map[string]bool) - - // Check entity attributes - if entity.Attributes != nil { - for attrKey := range entity.Attributes { - // Skip if custom automation rule already matched for this rule_key - if matchedRuleKeys[attrKey] { - continue - } - processedKeys[attrKey] = true - - // Find default rules for this attribute - if rules, exists := defaultRulesByKey[attrKey]; exists { - for _, rule := range rules { - if matched := r.evaluator.EvaluateRule(rule, deviceID, entity); matched != nil { - matchedEvents = append(matchedEvents, *matched) - } - } - } - } - } - // Check entity_type for state-based entities if entity.EntityType != "" { // Skip if custom automation rule already matched for this rule_key @@ -191,17 +144,6 @@ func (r *RuleRegistry) Evaluate(ctx context.Context, deviceID, brand, model stri return matchedEvents } -// groupRulesByKey groups database rules by rule_key for efficient O(1) lookup -func (r *RuleRegistry) groupRulesByKey(rules []models.EventRule) map[string][]models.EventRule { - rulesByKey := make(map[string][]models.EventRule) - for _, rule := range rules { - if rule.RuleKey != nil && *rule.RuleKey != "" { - rulesByKey[*rule.RuleKey] = append(rulesByKey[*rule.RuleKey], rule) - } - } - return rulesByKey -} - // GetDefaultRules returns all loaded default rules (for debugging/inspection) func (r *RuleRegistry) GetDefaultRules() map[string]*loader.DeviceModelRules { r.defaultRulesMu.RLock() diff --git a/internal/models/events.go b/internal/models/events.go index 7bc4125..26cb977 100644 --- a/internal/models/events.go +++ b/internal/models/events.go @@ -25,9 +25,8 @@ type EventRule struct { EventRuleID string `json:"event_rule_id" db:"event_rule_id"` DeviceID *string `json:"device_id,omitempty" db:"device_id"` RuleKey *string `json:"rule_key,omitempty" db:"rule_key"` // e.g., 'battery_low', 'temperature_low' - Operator *string `json:"operator,omitempty" db:"operator"` // eq, ne, gt, lt, gte, lte, contains + Operator *string `json:"operator,omitempty" db:"operator"` // eq, ne, gt, lt, gte, lte,... Operand string `json:"operand" db:"operand"` - Status *string `json:"status,omitempty" db:"status"` // active, inactive, paused IsActive *bool `json:"is_active,omitempty" db:"is_active"` StartTime *time.Time `json:"start_time,omitempty" db:"start_time"` EndTime *time.Time `json:"end_time,omitempty" db:"end_time"` @@ -42,7 +41,6 @@ type EventRuleRequest struct { RuleKey *string `json:"rule_key,omitempty" validate:"required"` Operator *string `json:"operator,omitempty" validate:"omitempty,oneof=eq ne gt lt gte lte contains"` Operand string `json:"operand" validate:"required"` - Status *string `json:"status,omitempty" validate:"omitempty,oneof=active inactive paused"` IsActive *bool `json:"is_active,omitempty"` AllowNewEvent *bool `json:"allow_new_event,omitempty"` StartTime *string `json:"start_time,omitempty" validate:"omitempty,datetime=2006-01-02T15:04:05Z07:00"` @@ -56,7 +54,6 @@ type EventRuleResponse struct { RuleKey *string `json:"rule_key,omitempty"` Operator *string `json:"operator,omitempty"` Operand string `json:"operand"` - Status *string `json:"status,omitempty"` IsActive *bool `json:"is_active,omitempty"` StartTime *time.Time `json:"start_time,omitempty"` EndTime *time.Time `json:"end_time,omitempty"` @@ -82,7 +79,6 @@ type Event struct { SpaceSlug string `json:"space_slug,omitempty" db:"space_slug"` EntityID *string `json:"entity_id,omitempty" db:"entity_id"` StateID *int64 `json:"state_id,omitempty" db:"state_id"` - ContextID []byte `json:"context_id_bin,omitempty" db:"context_id_bin"` TriggerID *string `json:"trigger_id,omitempty" db:"trigger_id"` TimeFiredTs int64 `json:"time_fired_ts" db:"time_fired_ts"` CreatedAt time.Time `json:"created_at" db:"created_at"` @@ -259,6 +255,7 @@ type MatchedEvent struct { Value float64 `json:"value"` Threshold float64 `json:"threshold"` Operator string `json:"operator"` - RuleSource string `json:"rule_source"` // "default" or "automation" Timestamp int64 `json:"timestamp"` // Unix timestamp in milliseconds + EventRuleID *string `json:"event_rule_id,omitempty"` + StateID *string `json:"state_id,omitempty"` } diff --git a/internal/models/telemetry.go b/internal/models/telemetry.go index 308c8eb..9a4e021 100644 --- a/internal/models/telemetry.go +++ b/internal/models/telemetry.go @@ -35,4 +35,5 @@ type TelemetryEntity struct { UnitOfMeas string `json:"unit_of_measurement,omitempty"` Icon string `json:"icon,omitempty"` Timestamp string `json:"timestamp"` + StateID *string `json:"state_id,omitempty"` } diff --git a/internal/services/processor.go b/internal/services/processor.go index 652cbb2..5af3a64 100644 --- a/internal/services/processor.go +++ b/internal/services/processor.go @@ -174,7 +174,6 @@ func (p *LocationProcessor) ProcessTelemetry(ctx context.Context, payload *model zap.String("entity_id", event.EntityID), zap.String("rule_key", event.RuleKey), zap.String("event_type", event.EventType), - zap.String("rule_source", event.RuleSource), zap.String("event_level", event.EventLevel), zap.Float64("value", event.Value), zap.Float64("threshold", event.Threshold)) diff --git a/internal/timescaledb/events.go b/internal/timescaledb/events.go index 758d0b4..eeb02c3 100644 --- a/internal/timescaledb/events.go +++ b/internal/timescaledb/events.go @@ -3,7 +3,9 @@ package timescaledb import ( "context" "database/sql" + "encoding/json" "fmt" + "hash/crc32" "time" "github.com/Space-DF/telemetry-service/internal/models" @@ -59,7 +61,7 @@ func (c *Client) GetEventsByDevice(ctx context.Context, org, deviceID string, li // Complete the query query := fmt.Sprintf(` - SELECT e.event_id, e.event_type_id, e.data_id, e.space_slug, e.context_id_bin, + SELECT e.event_id, e.event_type_id, e.data_id, e.space_slug, e.trigger_id, e.time_fired_ts, et.event_type, ed.shared_data FROM events e JOIN event_types et ON e.event_type_id = et.event_type_id @@ -95,9 +97,6 @@ func (c *Client) GetEventsByDevice(ctx context.Context, org, deviceID string, li if slug.Valid { e.SpaceSlug = slug.String } - if len(contextID) > 0 { - e.ContextID = contextID - } if triggerID.Valid { e.TriggerID = &triggerID.String } @@ -118,10 +117,6 @@ func (c *Client) GetEventsByDevice(ctx context.Context, org, deviceID string, li return events, nil } -// ============================================================================ -// Event Rules -// ============================================================================ - // populateEventRuleResponse populates an EventRuleResponse from request data and times func populateEventRuleResponse(result *models.EventRuleResponse, req *models.EventRuleRequest, startTime, endTime *time.Time) { if req.DeviceID != nil { @@ -134,9 +129,6 @@ func populateEventRuleResponse(result *models.EventRuleResponse, req *models.Eve result.Operator = req.Operator } result.Operand = req.Operand - if req.Status != nil { - result.Status = req.Status - } if req.IsActive != nil { result.IsActive = req.IsActive } @@ -183,7 +175,7 @@ func (c *Client) GetEventRules(ctx context.Context, deviceID string, page, pageS // Query rules query := ` SELECT er.event_rule_id, er.device_id, er.rule_key, er.operator, er.operand, - er.status, er.is_active, er.start_time, er.end_time, er.allow_new_event, er.created_at, er.updated_at + er.is_active, er.start_time, er.end_time, er.allow_new_event, er.created_at, er.updated_at FROM event_rules er ` + whereClause + ` ORDER BY er.created_at DESC LIMIT $` + fmt.Sprintf("%d", len(args)+1) + ` OFFSET $` + fmt.Sprintf("%d", len(args)+2) args = append(args, pageSize, offset) @@ -198,7 +190,7 @@ func (c *Client) GetEventRules(ctx context.Context, deviceID string, page, pageS var r models.EventRule if err := rows.Scan( &r.EventRuleID, &r.DeviceID, &r.RuleKey, &r.Operator, &r.Operand, - &r.Status, &r.IsActive, &r.StartTime, &r.EndTime, &r.AllowNewEvent, &r.CreatedAt, &r.UpdatedAt, + &r.IsActive, &r.StartTime, &r.EndTime, &r.AllowNewEvent, &r.CreatedAt, &r.UpdatedAt, ); err != nil { return err } @@ -232,7 +224,7 @@ func (c *Client) GetActiveRulesForDevice(ctx context.Context, deviceID string) ( // Filter by time range to exclude expired rules query := ` SELECT er.event_rule_id, er.device_id, er.rule_key, er.operator, er.operand, - er.status, er.is_active, er.start_time, er.end_time, er.allow_new_event, er.created_at, er.updated_at + er.is_active, er.start_time, er.end_time, er.allow_new_event, er.created_at, er.updated_at FROM event_rules er WHERE er.is_active = true AND er.device_id = $1 @@ -251,7 +243,7 @@ func (c *Client) GetActiveRulesForDevice(ctx context.Context, deviceID string) ( var r models.EventRule if err := rows.Scan( &r.EventRuleID, &r.DeviceID, &r.RuleKey, &r.Operator, &r.Operand, - &r.Status, &r.IsActive, &r.StartTime, &r.EndTime, &r.AllowNewEvent, &r.CreatedAt, &r.UpdatedAt, + &r.IsActive, &r.StartTime, &r.EndTime, &r.AllowNewEvent, &r.CreatedAt, &r.UpdatedAt, ); err != nil { return err } @@ -302,11 +294,11 @@ func (c *Client) CreateEventRule(ctx context.Context, req *models.EventRuleReque // Insert event rule err := tx.QueryRowContext(txCtx, ` - INSERT INTO event_rules (device_id, rule_key, operator, operand, status, is_active, allow_new_event, start_time, end_time) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + INSERT INTO event_rules (device_id, rule_key, operator, operand, is_active, allow_new_event, start_time, end_time) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING event_rule_id, created_at, updated_at `, req.DeviceID, req.RuleKey, req.Operator, req.Operand, - req.Status, req.IsActive, req.AllowNewEvent, startTime, endTime).Scan( + req.IsActive, req.AllowNewEvent, startTime, endTime).Scan( &result.EventRuleID, &result.CreatedAt, &result.UpdatedAt, ) @@ -363,11 +355,11 @@ func (c *Client) UpdateEventRule(ctx context.Context, ruleID string, req *models err := tx.QueryRowContext(txCtx, ` UPDATE event_rules SET device_id = $1, rule_key = $2, operator = $3, operand = $4, - status = $5, is_active = $6, allow_new_event = $7, start_time = $8, end_time = $9, updated_at = NOW() - WHERE event_rule_id = $10 + is_active = $5, allow_new_event = $6, start_time = $7, end_time = $8, updated_at = NOW() + WHERE event_rule_id = $9 RETURNING event_rule_id, created_at, updated_at `, req.DeviceID, req.RuleKey, req.Operator, req.Operand, - req.Status, req.IsActive, req.AllowNewEvent, startTime, endTime, ruleID).Scan( + req.IsActive, req.AllowNewEvent, startTime, endTime, ruleID).Scan( &result.EventRuleID, &result.CreatedAt, &result.UpdatedAt, ) @@ -422,34 +414,53 @@ func (c *Client) CreateEvent(ctx context.Context, org string, event *models.Matc } return c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { - // Step 1: Get or create event_type + // Get event_type var eventTypeID int err := tx.QueryRowContext(txCtx, ` SELECT event_type_id FROM event_types WHERE event_type = $1 `, event.EventType).Scan(&eventTypeID) - if err == sql.ErrNoRows { - // Create new event_type - err = tx.QueryRowContext(txCtx, ` - INSERT INTO event_types (event_type) VALUES ($1) - RETURNING event_type_id - `, event.EventType).Scan(&eventTypeID) + if err != nil { + if err == sql.ErrNoRows { + return fmt.Errorf("event type '%s' does not exist", event.EventType) + } + return fmt.Errorf("failed to get event_type: %w", err) + } + + // Create event_data with event details + dataID := sql.NullInt64{Valid: false} + eventData := map[string]interface{}{ + "description": event.Description, + "value": event.Value, + "threshold": event.Threshold, + "operator": event.Operator, + "rule_key": event.RuleKey, } + rawData, err := json.Marshal(eventData) if err != nil { - return fmt.Errorf("failed to get/create event_type: %w", err) + return fmt.Errorf("failed to marshal event data: %w", err) } - // Step 2: Create event_data with the event information - dataID := sql.NullInt64{Valid: false} + hash := int64(crc32.ChecksumIEEE(rawData)) + err = tx.QueryRowContext(txCtx, ` + INSERT INTO event_data (hash, shared_data) + VALUES ($1, $2) + ON CONFLICT (hash) DO UPDATE SET shared_data = EXCLUDED.shared_data + RETURNING data_id + `, hash, rawData).Scan(&dataID) + + if err != nil { + return fmt.Errorf("failed to create event_data: %w", err) + } - // Step 3: Create the event + // Create the event _, err = tx.ExecContext(txCtx, ` INSERT INTO events ( event_type_id, data_id, event_level, event_rule_id, - space_slug, entity_id, time_fired_ts - ) VALUES ($1, $2, $3, $4, $5, $6, $7) - `, eventTypeID, dataID, event.EventLevel, nil, spaceSlug, event.EntityID, event.Timestamp) + space_slug, entity_id, state_id, time_fired_ts + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + `, eventTypeID, dataID, event.EventLevel, event.EventRuleID, spaceSlug, event.EntityID, event.StateID, event.Timestamp) if err != nil { return fmt.Errorf("failed to create event: %w", err) diff --git a/internal/timescaledb/telemetry.go b/internal/timescaledb/telemetry.go index 8c85a54..06cdba0 100644 --- a/internal/timescaledb/telemetry.go +++ b/internal/timescaledb/telemetry.go @@ -25,23 +25,22 @@ func (c *Client) SaveTelemetryPayload(ctx context.Context, payload *models.Telem return fmt.Errorf("missing organization in telemetry payload") } - log.Printf("[Telemetry] SaveTelemetryPayload: org=%s, device_id=%s, entities=%d", org, payload.DeviceID, len(payload.Entities)) return c.WithOrgTx(ctx, org, func(txCtx context.Context, tx bob.Tx) error { - for _, ent := range payload.Entities { - if err := c.upsertTelemetryEntity(txCtx, tx, &ent, payload); err != nil { + for i := range payload.Entities { + stateID, err := c.upsertTelemetryEntity(txCtx, tx, &payload.Entities[i], payload) + if err != nil { log.Printf("[Telemetry] ERROR upserting entity: %v", err) return err } - log.Printf("[Telemetry] Entity upserted: org=%s, device_id=%s, entity_id=%s", org, payload.DeviceID, ent.UniqueID) + payload.Entities[i].StateID = &stateID } - log.Printf("[Telemetry] Successfully saved payload: org=%s, device_id=%s", org, payload.DeviceID) return nil }) } -func (c *Client) upsertTelemetryEntity(ctx context.Context, tx bob.Tx, ent *models.TelemetryEntity, payload *models.TelemetryPayload) error { +func (c *Client) upsertTelemetryEntity(ctx context.Context, tx bob.Tx, ent *models.TelemetryEntity, payload *models.TelemetryPayload) (string, error) { if ent == nil { - return fmt.Errorf("nil telemetry entity") + return "", fmt.Errorf("nil telemetry entity") } displayType := ent.DisplayType @@ -65,7 +64,7 @@ func (c *Client) upsertTelemetryEntity(ctx context.Context, tx bob.Tx, ent *mode ent.EntityType, entityTypeKey, ).Scan(&entityTypeID); err != nil { - return fmt.Errorf("upsert entity_type '%s': %w", entityTypeKey, err) + return "", fmt.Errorf("upsert entity_type '%s': %w", entityTypeKey, err) } // Prepare optional device_id. @@ -104,7 +103,7 @@ func (c *Client) upsertTelemetryEntity(ctx context.Context, tx bob.Tx, ent *mode ent.UnitOfMeas, pq.Array(displayType), ).Scan(&entityID); err != nil { - return fmt.Errorf("upsert entity '%s': %w", ent.UniqueID, err) + return "", fmt.Errorf("upsert entity '%s': %w", ent.UniqueID, err) } // Handle attributes: deduplicate by hash to reuse existing row. @@ -112,7 +111,7 @@ func (c *Client) upsertTelemetryEntity(ctx context.Context, tx bob.Tx, ent *mode if len(ent.Attributes) > 0 { rawAttrs, err := json.Marshal(ent.Attributes) if err != nil { - return fmt.Errorf("marshal attributes for '%s': %w", ent.UniqueID, err) + return "", fmt.Errorf("marshal attributes for '%s': %w", ent.UniqueID, err) } hash := int64(crc32.ChecksumIEEE(rawAttrs)) @@ -125,7 +124,7 @@ func (c *Client) upsertTelemetryEntity(ctx context.Context, tx bob.Tx, ent *mode hash, rawAttrs, ).Scan(&attrsID); err != nil { - return fmt.Errorf("upsert attributes for '%s': %w", ent.UniqueID, err) + return "", fmt.Errorf("upsert attributes for '%s': %w", ent.UniqueID, err) } } @@ -184,10 +183,10 @@ func (c *Client) upsertTelemetryEntity(ctx context.Context, tx bob.Tx, ent *mode changedAt, ) if err != nil { - return fmt.Errorf("insert entity_state for '%s': %w", ent.UniqueID, err) + return "", fmt.Errorf("insert entity_state for '%s': %w", ent.UniqueID, err) } - return nil + return stateID.String(), nil } func parseRFC3339(ts string) time.Time { diff --git a/pkgs/db/migrations/20251225000000_create_events_schema.sql b/pkgs/db/migrations/20251225000000_create_events_schema.sql index 91465d9..7b0cdfc 100644 --- a/pkgs/db/migrations/20251225000000_create_events_schema.sql +++ b/pkgs/db/migrations/20251225000000_create_events_schema.sql @@ -44,7 +44,7 @@ CREATE TABLE IF NOT EXISTS event_rules ( CREATE INDEX IF NOT EXISTS idx_event_rules_device_id ON event_rules (device_id); CREATE INDEX IF NOT EXISTS idx_event_rules_status ON event_rules (status); CREATE INDEX IF NOT EXISTS idx_event_rules_is_active ON event_rules (is_active); --- Composite index for active device rules query (performance optimization) +-- Composite index for active device rules query CREATE INDEX IF NOT EXISTS idx_event_rules_active_device ON event_rules (is_active, device_id, created_at DESC) WHERE is_active = true; CREATE INDEX IF NOT EXISTS idx_event_rules_time_range ON event_rules (start_time, end_time) @@ -62,8 +62,7 @@ CREATE TABLE IF NOT EXISTS events ( space_slug TEXT, entity_id TEXT, state_id UUID REFERENCES entity_states(id) ON DELETE SET NULL, - context_id_bin BYTEA, - trigger_id UUID, -- for future automations table reference + trigger_id UUID, -- for future automation scale time_fired_ts BIGINT NOT NULL, created_at TIMESTAMPTZ NOT NULL DEFAULT now() );